VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 92204

Last change on this file since 92204 was 92204, checked in by vboxsync, 3 years ago

VMM/PDMCritSect: Make it possible for known worker thread to enter critical sections in ring-0. bugref:10093 bugref:6695

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 85.4 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 92204 2021-11-04 00:51:40Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37#endif
38#if defined(IN_RING3) || defined(IN_RING0)
39# include <iprt/semaphore.h>
40# include <iprt/thread.h>
41#endif
42#ifdef IN_RING0
43# include <iprt/time.h>
44#endif
45#ifdef RT_ARCH_AMD64
46# include <iprt/x86.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#if 0 /* unused */
54/** The number loops to spin for shared access in ring-3. */
55#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
56/** The number loops to spin for shared access in ring-0. */
57#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
58/** The number loops to spin for shared access in the raw-mode context. */
59#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
60
61/** The number loops to spin for exclusive access in ring-3. */
62#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
63/** The number loops to spin for exclusive access in ring-0. */
64#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
65/** The number loops to spin for exclusive access in the raw-mode context. */
66#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
67#endif
68
69/** Max number of write or write/read recursions. */
70#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
71
72/** Skips some of the overly paranoid atomic reads and updates.
73 * Makes some assumptions about cache coherence, though not brave enough not to
74 * always end with an atomic update. */
75#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
76
77/** For reading RTCRITSECTRWSTATE::s::u64State. */
78#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
79# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
80#else
81# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
82#endif
83
84
85/* Undefine the automatic VBOX_STRICT API mappings. */
86#undef PDMCritSectRwEnterExcl
87#undef PDMCritSectRwTryEnterExcl
88#undef PDMCritSectRwEnterShared
89#undef PDMCritSectRwTryEnterShared
90
91
92/*********************************************************************************************************************************
93* Defined Constants And Macros *
94*********************************************************************************************************************************/
95#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
96static int32_t g_fCmpWriteSupported = -1;
97#endif
98
99
100/*********************************************************************************************************************************
101* Internal Functions *
102*********************************************************************************************************************************/
103#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
104static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
105#else
106DECLASM(int) pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
107DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
108#endif
109
110
111#ifdef RTASM_HAVE_CMP_WRITE_U128
112
113# ifdef RT_ARCH_AMD64
114/**
115 * Called once to initialize g_fCmpWriteSupported.
116 */
117DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
118{
119 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
120 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
121 return fCmpWriteSupported;
122}
123# endif
124
125
126/**
127 * Indicates whether hardware actually supports 128-bit compare & write.
128 */
129DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
130{
131# ifdef RT_ARCH_AMD64
132 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
133 if (RT_LIKELY(fCmpWriteSupported >= 0))
134 return fCmpWriteSupported != 0;
135 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
136# else
137 return true;
138# endif
139}
140
141#endif /* RTASM_HAVE_CMP_WRITE_U128 */
142
143/**
144 * Gets the ring-3 native thread handle of the calling thread.
145 *
146 * @returns native thread handle (ring-3).
147 * @param pVM The cross context VM structure.
148 * @param pThis The read/write critical section. This is only used in
149 * R0 and RC.
150 */
151DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
152{
153#ifdef IN_RING3
154 RT_NOREF(pVM, pThis);
155 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
156
157#elif defined(IN_RING0)
158 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
159 NIL_RTNATIVETHREAD);
160 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
161 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
162
163#else
164# error "invalid context"
165#endif
166 return hNativeSelf;
167}
168
169
170DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
171{
172 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
173 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
174 return VERR_PDM_CRITSECTRW_IPE;
175}
176
177
178
179#ifdef IN_RING3
180/**
181 * Changes the lock validator sub-class of the read/write critical section.
182 *
183 * It is recommended to try make sure that nobody is using this critical section
184 * while changing the value.
185 *
186 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
187 * lock validator isn't compiled in or either of the parameters are
188 * invalid.
189 * @param pThis Pointer to the read/write critical section.
190 * @param uSubClass The new sub-class value.
191 */
192VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
193{
194 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
195 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
196# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
197 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
198
199 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
200 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
201# else
202 NOREF(uSubClass);
203 return RTLOCKVAL_SUB_CLASS_INVALID;
204# endif
205}
206#endif /* IN_RING3 */
207
208
209/**
210 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
211 */
212DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
213 bool fNoVal, RTTHREAD hThreadSelf)
214{
215#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
216 if (!fNoVal)
217 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
218#else
219 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
220#endif
221
222 /* got it! */
223 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
224 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
225 return VINF_SUCCESS;
226}
227
228/**
229 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
230 * that decrement the wait count and maybe resets the semaphore.
231 */
232DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
233 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
234{
235 for (;;)
236 {
237 uint64_t const u64OldState = u64State;
238 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
239 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
240 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
241 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
242 cWait--;
243 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
244 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
245
246 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
247 {
248 if (cWait == 0)
249 {
250 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
251 {
252 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
253 AssertRCReturn(rc, rc);
254 }
255 }
256 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
257 }
258
259 ASMNopPause();
260 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
261 ASMNopPause();
262
263 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
264 }
265 /* not reached */
266}
267
268
269#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
270/**
271 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
272 * and returns @a rc.
273 *
274 * @note May return VINF_SUCCESS if we race the exclusive leave function and
275 * come out on the bottom.
276 *
277 * Ring-3 only calls in a case where it is _not_ acceptable to take the
278 * lock, so even if we get the lock we'll have to leave. In the ring-0
279 * contexts, we can safely return VINF_SUCCESS in case of a race.
280 */
281DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
282 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
283{
284#ifdef IN_RING0
285 uint64_t const tsStart = RTTimeNanoTS();
286 uint64_t cNsElapsed = 0;
287#endif
288 for (;;)
289 {
290 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
291 uint64_t u64OldState = u64State;
292
293 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
294 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
295 cWait--;
296
297 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
298 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
299
300 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
301 {
302 c--;
303 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
304 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
305 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
306 return rc;
307 }
308 else
309 {
310 /*
311 * The direction changed, so we can actually get the lock now.
312 *
313 * This means that we _have_ to wait on the semaphore to be signalled
314 * so we can properly reset it. Otherwise the stuff gets out of wack,
315 * because signalling and resetting will race one another. An
316 * exception would be if we're not the last reader waiting and don't
317 * need to worry about the resetting.
318 *
319 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
320 * but that would still leave a racing PDMCritSectRwEnterShared
321 * spinning hard for a little bit, which isn't great...
322 */
323 if (cWait == 0)
324 {
325# ifdef IN_RING0
326 /* Do timeout processing first to avoid redoing the above. */
327 uint32_t cMsWait;
328 if (cNsElapsed <= RT_NS_10SEC)
329 cMsWait = 32;
330 else
331 {
332 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
333 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
334 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
335 {
336 LogFunc(("%p: giving up\n", pThis));
337 return rc;
338 }
339 cMsWait = 2;
340 }
341
342 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
343 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
344 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
345# else
346 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
347 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
348 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
349# endif
350 if (rcWait == VINF_SUCCESS)
351 {
352# ifdef IN_RING0
353 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
354# else
355 /* ring-3: Cannot return VINF_SUCCESS. */
356 Assert(RT_FAILURE_NP(rc));
357 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
358 if (RT_SUCCESS(rc2))
359 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
360 return rc;
361# endif
362 }
363 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
364 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
365 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
366 }
367 else
368 {
369 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
370 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
371 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
372 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
373 }
374
375# ifdef IN_RING0
376 /* Calculate the elapsed time here to avoid redoing state work. */
377 cNsElapsed = RTTimeNanoTS() - tsStart;
378# endif
379 }
380
381 ASMNopPause();
382 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
383 ASMNopPause();
384 }
385}
386#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
387
388
389/**
390 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
391 * Caller has already added us to the read and read-wait counters.
392 */
393static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
394 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
395{
396 PSUPDRVSESSION const pSession = pVM->pSession;
397 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
398# ifdef IN_RING0
399 uint64_t const tsStart = RTTimeNanoTS();
400 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
401 uint64_t cNsMaxTotal = cNsMaxTotalDef;
402 uint32_t cMsMaxOne = RT_MS_5SEC;
403 bool fNonInterruptible = false;
404# endif
405
406 for (uint32_t iLoop = 0; ; iLoop++)
407 {
408 /*
409 * Wait for the direction to switch.
410 */
411 int rc;
412# ifdef IN_RING3
413# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
414 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
415 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
416 if (RT_FAILURE(rc))
417 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
418# else
419 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
420# endif
421# endif
422
423 for (;;)
424 {
425 /*
426 * We always wait with a timeout so we can re-check the structure sanity
427 * and not get stuck waiting on a corrupt or deleted section.
428 */
429# ifdef IN_RING3
430 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
431# else
432 rc = !fNonInterruptible
433 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
434 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
435 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
436 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
437# endif
438 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
439 { /* likely */ }
440 else
441 {
442# ifdef IN_RING3
443 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
444# endif
445 return VERR_SEM_DESTROYED;
446 }
447 if (RT_LIKELY(rc == VINF_SUCCESS))
448 break;
449
450 /*
451 * Timeout and interrupted waits needs careful handling in ring-0
452 * because we're cooperating with ring-3 on this critical section
453 * and thus need to make absolutely sure we won't get stuck here.
454 *
455 * The r0 interrupted case means something is pending (termination,
456 * signal, APC, debugger, whatever), so we must try our best to
457 * return to the caller and to ring-3 so it can be dealt with.
458 */
459 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
460 {
461# ifdef IN_RING0
462 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
463 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
464 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
465 ("rcTerm=%Rrc\n", rcTerm));
466 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
467 cNsMaxTotal = RT_NS_1MIN;
468
469 if (rc == VERR_TIMEOUT)
470 {
471 /* Try return get out of here with a non-VINF_SUCCESS status if
472 the thread is terminating or if the timeout has been exceeded. */
473 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
474 if ( rcTerm == VINF_THREAD_IS_TERMINATING
475 || cNsElapsed > cNsMaxTotal)
476 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
477 pSrcPos, fNoVal, hThreadSelf);
478 }
479 else
480 {
481 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
482 we will try non-interruptible sleep for a while to help resolve the issue
483 w/o guru'ing. */
484 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
485 if ( rcTerm != VINF_THREAD_IS_TERMINATING
486 && rcBusy == VINF_SUCCESS
487 && pVCpu != NULL
488 && cNsElapsed <= cNsMaxTotal)
489 {
490 if (!fNonInterruptible)
491 {
492 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
493 fNonInterruptible = true;
494 cMsMaxOne = 32;
495 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
496 if (cNsLeft > RT_NS_10SEC)
497 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
498 }
499 }
500 else
501 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
502 pSrcPos, fNoVal, hThreadSelf);
503 }
504# else /* IN_RING3 */
505 RT_NOREF(pVM, pVCpu, rcBusy);
506# endif /* IN_RING3 */
507 }
508 /*
509 * Any other return code is fatal.
510 */
511 else
512 {
513# ifdef IN_RING3
514 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
515# endif
516 AssertMsgFailed(("rc=%Rrc\n", rc));
517 return RT_FAILURE_NP(rc) ? rc : -rc;
518 }
519 }
520
521# ifdef IN_RING3
522 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
523# endif
524
525 /*
526 * Check the direction.
527 */
528 Assert(pThis->s.Core.fNeedReset);
529 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
530 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
531 {
532 /*
533 * Decrement the wait count and maybe reset the semaphore (if we're last).
534 */
535 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
536 }
537
538 AssertMsg(iLoop < 1,
539 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
540 RTThreadYield();
541 }
542
543 /* not reached */
544}
545
546
547/**
548 * Worker that enters a read/write critical section with shard access.
549 *
550 * @returns VBox status code.
551 * @param pVM The cross context VM structure.
552 * @param pThis Pointer to the read/write critical section.
553 * @param rcBusy The busy return code for ring-0 and ring-3.
554 * @param fTryOnly Only try enter it, don't wait.
555 * @param pSrcPos The source position. (Can be NULL.)
556 * @param fNoVal No validation records.
557 */
558#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
559static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
560 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
561#else
562DECLASM(int) pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
563 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);
564DECLASM(int) StkBack_pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
565 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
566#endif
567{
568 /*
569 * Validate input.
570 */
571 AssertPtr(pThis);
572 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
573
574#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
575 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
576 if (!fTryOnly)
577 {
578 int rc9;
579 RTNATIVETHREAD hNativeWriter;
580 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
581 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
582 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
583 else
584 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
585 if (RT_FAILURE(rc9))
586 return rc9;
587 }
588#else
589 RTTHREAD hThreadSelf = NIL_RTTHREAD;
590#endif
591
592 /*
593 * Work the state.
594 */
595 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
596 uint64_t u64OldState = u64State;
597 for (;;)
598 {
599 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
600 {
601 /* It flows in the right direction, try follow it before it changes. */
602 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
603 c++;
604 Assert(c < RTCSRW_CNT_MASK / 4);
605 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
606 u64State &= ~RTCSRW_CNT_RD_MASK;
607 u64State |= c << RTCSRW_CNT_RD_SHIFT;
608 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
609 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
610 }
611 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
612 {
613 /* Wrong direction, but we're alone here and can simply try switch the direction. */
614 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
615 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
616 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
617 {
618 Assert(!pThis->s.Core.fNeedReset);
619 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
620 }
621 }
622 else
623 {
624 /* Is the writer perhaps doing a read recursion? */
625 RTNATIVETHREAD hNativeWriter;
626 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
627 if (hNativeWriter != NIL_RTNATIVETHREAD)
628 {
629 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
630 if (hNativeSelf == hNativeWriter)
631 {
632#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
633 if (!fNoVal)
634 {
635 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
636 if (RT_FAILURE(rc9))
637 return rc9;
638 }
639#endif
640 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
641 Assert(cReads < _16K);
642 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
643 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
644 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
645 return VINF_SUCCESS; /* don't break! */
646 }
647 }
648
649 /*
650 * If we're only trying, return already.
651 */
652 if (fTryOnly)
653 {
654 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
655 return VERR_SEM_BUSY;
656 }
657
658#if defined(IN_RING3) || defined(IN_RING0)
659 /*
660 * Add ourselves to the queue and wait for the direction to change.
661 */
662 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
663 c++;
664 Assert(c < RTCSRW_CNT_MASK / 2);
665 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
666
667 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
668 cWait++;
669 Assert(cWait <= c);
670 Assert(cWait < RTCSRW_CNT_MASK / 2);
671 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
672
673 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
674 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
675
676 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
677 {
678 /*
679 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
680 */
681# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
682 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
683# elif defined(IN_RING3)
684 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
685# else /* IN_RING0 */
686 /*
687 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
688 * account when waiting on contended locks.
689 */
690 PVMCPUCC pVCpu = VMMGetCpu(pVM);
691 if (pVCpu)
692 {
693 VMMR0EMTBLOCKCTX Ctx;
694 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
695 if (rc == VINF_SUCCESS)
696 {
697 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
698
699 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
700
701 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
702 }
703 else
704 {
705 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
706 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
707 }
708 return rc;
709 }
710
711 /* Non-EMT. */
712 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
713 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
714# endif /* IN_RING0 */
715 }
716
717#else /* !IN_RING3 && !IN_RING0 */
718 /*
719 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
720 * back to ring-3 and do it there or return rcBusy.
721 */
722# error "Unused code."
723 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
724 if (rcBusy == VINF_SUCCESS)
725 {
726 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
727 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
728 * back to ring-3. Goes for both kind of crit sects. */
729 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
730 }
731 return rcBusy;
732#endif /* !IN_RING3 && !IN_RING0 */
733 }
734
735 ASMNopPause();
736 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
737 { /* likely */ }
738 else
739 return VERR_SEM_DESTROYED;
740 ASMNopPause();
741
742 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
743 u64OldState = u64State;
744 }
745 /* not reached */
746}
747
748
749/**
750 * Enter a critical section with shared (read) access.
751 *
752 * @returns VBox status code.
753 * @retval VINF_SUCCESS on success.
754 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
755 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
756 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
757 * during the operation.
758 *
759 * @param pVM The cross context VM structure.
760 * @param pThis Pointer to the read/write critical section.
761 * @param rcBusy The status code to return when we're in RC or R0 and the
762 * section is busy. Pass VINF_SUCCESS to acquired the
763 * critical section thru a ring-3 call if necessary.
764 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
765 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
766 * RTCritSectRwEnterShared.
767 */
768VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
769{
770#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
771 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
772#else
773 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
774 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
775#endif
776}
777
778
779/**
780 * Enter a critical section with shared (read) access.
781 *
782 * @returns VBox status code.
783 * @retval VINF_SUCCESS on success.
784 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
785 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
786 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
787 * during the operation.
788 *
789 * @param pVM The cross context VM structure.
790 * @param pThis Pointer to the read/write critical section.
791 * @param rcBusy The status code to return when we're in RC or R0 and the
792 * section is busy. Pass VINF_SUCCESS to acquired the
793 * critical section thru a ring-3 call if necessary.
794 * @param uId Where we're entering the section.
795 * @param SRC_POS The source position.
796 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
797 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
798 * RTCritSectRwEnterSharedDebug.
799 */
800VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
801{
802 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
803#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
804 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
805#else
806 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
807 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
808#endif
809}
810
811
812/**
813 * Try enter a critical section with shared (read) access.
814 *
815 * @returns VBox status code.
816 * @retval VINF_SUCCESS on success.
817 * @retval VERR_SEM_BUSY if the critsect was owned.
818 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
819 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
820 * during the operation.
821 *
822 * @param pVM The cross context VM structure.
823 * @param pThis Pointer to the read/write critical section.
824 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
825 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
826 * RTCritSectRwTryEnterShared.
827 */
828VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
829{
830#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
831 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
832#else
833 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
834 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
835#endif
836}
837
838
839/**
840 * Try enter a critical section with shared (read) access.
841 *
842 * @returns VBox status code.
843 * @retval VINF_SUCCESS on success.
844 * @retval VERR_SEM_BUSY if the critsect was owned.
845 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
846 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
847 * during the operation.
848 *
849 * @param pVM The cross context VM structure.
850 * @param pThis Pointer to the read/write critical section.
851 * @param uId Where we're entering the section.
852 * @param SRC_POS The source position.
853 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
854 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
855 * RTCritSectRwTryEnterSharedDebug.
856 */
857VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
858{
859 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
860#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
861 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
862#else
863 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
864 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
865#endif
866}
867
868
869#ifdef IN_RING3
870/**
871 * Enters a PDM read/write critical section with shared (read) access.
872 *
873 * @returns VINF_SUCCESS if entered successfully.
874 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
875 * during the operation.
876 *
877 * @param pVM The cross context VM structure.
878 * @param pThis Pointer to the read/write critical section.
879 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
880 */
881VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
882{
883 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
884}
885#endif
886
887
888/**
889 * Leave a critical section held with shared access.
890 *
891 * @returns VBox status code.
892 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
893 * during the operation.
894 * @param pVM The cross context VM structure.
895 * @param pThis Pointer to the read/write critical section.
896 * @param fNoVal No validation records (i.e. queued release).
897 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
898 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
899 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
900 */
901#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
902static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
903#else
904DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
905#endif
906{
907 /*
908 * Validate handle.
909 */
910 AssertPtr(pThis);
911 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
912
913#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
914 NOREF(fNoVal);
915#endif
916
917 /*
918 * Check the direction and take action accordingly.
919 */
920#ifdef IN_RING0
921 PVMCPUCC pVCpu = NULL;
922#endif
923 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
924 uint64_t u64OldState = u64State;
925 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
926 {
927#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
928 if (fNoVal)
929 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
930 else
931 {
932 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
933 if (RT_FAILURE(rc9))
934 return rc9;
935 }
936#endif
937 for (;;)
938 {
939 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
940 AssertReturn(c > 0, VERR_NOT_OWNER);
941 c--;
942
943 if ( c > 0
944 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
945 {
946 /* Don't change the direction. */
947 u64State &= ~RTCSRW_CNT_RD_MASK;
948 u64State |= c << RTCSRW_CNT_RD_SHIFT;
949 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
950 break;
951 }
952 else
953 {
954#if defined(IN_RING3) || defined(IN_RING0)
955# ifdef IN_RING0
956 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
957 if (!pVCpu)
958 pVCpu = VMMGetCpu(pVM);
959 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
960 || VMMRZCallRing3IsEnabled(pVCpu)
961 || RTSemEventIsSignalSafe()
962 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
963 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
964 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
965 )
966# endif
967 {
968 /* Reverse the direction and signal the writer threads. */
969 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
970 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
971 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
972 {
973 int rc;
974# ifdef IN_RING0
975 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
976 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
977 {
978 VMMR0EMTBLOCKCTX Ctx;
979 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
980 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
981
982 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
983
984 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
985 }
986 else
987# endif
988 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
989 AssertRC(rc);
990 return rc;
991 }
992 }
993#endif /* IN_RING3 || IN_RING0 */
994#ifndef IN_RING3
995# ifdef IN_RING0
996 else
997# endif
998 {
999 /* Queue the exit request (ring-3). */
1000# ifndef IN_RING0
1001 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1002# endif
1003 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
1004 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
1005 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
1006 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1007 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
1008 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1009 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
1010 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & PAGE_OFFSET_MASK)
1011 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1012 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
1013 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
1014 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1015 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1016 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1017 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
1018 break;
1019 }
1020#endif
1021 }
1022
1023 ASMNopPause();
1024 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1025 { }
1026 else
1027 return VERR_SEM_DESTROYED;
1028 ASMNopPause();
1029
1030 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1031 u64OldState = u64State;
1032 }
1033 }
1034 else
1035 {
1036 /*
1037 * Write direction. Check that it's the owner calling and that it has reads to undo.
1038 */
1039 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1040 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1041
1042 RTNATIVETHREAD hNativeWriter;
1043 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1044 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1045 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
1046#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1047 if (!fNoVal)
1048 {
1049 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1050 if (RT_FAILURE(rc))
1051 return rc;
1052 }
1053#endif
1054 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
1055 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
1056 }
1057
1058 return VINF_SUCCESS;
1059}
1060
1061
1062/**
1063 * Leave a critical section held with shared access.
1064 *
1065 * @returns VBox status code.
1066 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1067 * during the operation.
1068 * @param pVM The cross context VM structure.
1069 * @param pThis Pointer to the read/write critical section.
1070 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1071 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1072 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1073 */
1074VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
1075{
1076 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
1077}
1078
1079
1080#if defined(IN_RING3) || defined(IN_RING0)
1081/**
1082 * PDMCritSectBothFF interface.
1083 *
1084 * @param pVM The cross context VM structure.
1085 * @param pThis Pointer to the read/write critical section.
1086 */
1087void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1088{
1089 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
1090}
1091#endif
1092
1093
1094/**
1095 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1096 *
1097 * @returns @a rc unless corrupted.
1098 * @param pThis Pointer to the read/write critical section.
1099 * @param rc The status to return.
1100 */
1101DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1102{
1103 /*
1104 * Decrement the counts and return the error.
1105 */
1106 for (;;)
1107 {
1108 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1109 uint64_t const u64OldState = u64State;
1110 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1111 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1112 c--;
1113 u64State &= ~RTCSRW_CNT_WR_MASK;
1114 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1115 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1116 return rc;
1117
1118 ASMNopPause();
1119 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1120 ASMNopPause();
1121 }
1122}
1123
1124
1125/**
1126 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1127 * gotten exclusive ownership of the critical section.
1128 */
1129DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1130 bool fNoVal, RTTHREAD hThreadSelf)
1131{
1132 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
1133 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1134
1135#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1136 pThis->s.Core.cWriteRecursions = 1;
1137#else
1138 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1139#endif
1140 Assert(pThis->s.Core.cWriterReads == 0);
1141
1142#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1143 if (!fNoVal)
1144 {
1145 if (hThreadSelf == NIL_RTTHREAD)
1146 hThreadSelf = RTThreadSelfAutoAdopt();
1147 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1148 }
1149#endif
1150 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1151 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1152 return VINF_SUCCESS;
1153}
1154
1155
1156#if defined(IN_RING3) || defined(IN_RING0)
1157/**
1158 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1159 * contended.
1160 */
1161static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
1162 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
1163{
1164 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
1165
1166 PSUPDRVSESSION const pSession = pVM->pSession;
1167 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1168# ifdef IN_RING0
1169 uint64_t const tsStart = RTTimeNanoTS();
1170 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
1171 uint64_t cNsMaxTotal = cNsMaxTotalDef;
1172 uint32_t cMsMaxOne = RT_MS_5SEC;
1173 bool fNonInterruptible = false;
1174# endif
1175
1176 for (uint32_t iLoop = 0; ; iLoop++)
1177 {
1178 /*
1179 * Wait for our turn.
1180 */
1181 int rc;
1182# ifdef IN_RING3
1183# ifdef PDMCRITSECTRW_STRICT
1184 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1185 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1186 if (RT_SUCCESS(rc))
1187 { /* likely */ }
1188 else
1189 return pdmCritSectRwEnterExclBailOut(pThis, rc);
1190# else
1191 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1192# endif
1193# endif
1194
1195 for (;;)
1196 {
1197 /*
1198 * We always wait with a timeout so we can re-check the structure sanity
1199 * and not get stuck waiting on a corrupt or deleted section.
1200 */
1201# ifdef IN_RING3
1202 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1203# else
1204 rc = !fNonInterruptible
1205 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1206 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1207 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
1208 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
1209# endif
1210 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1211 { /* likely */ }
1212 else
1213 {
1214# ifdef IN_RING3
1215 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1216# endif
1217 return VERR_SEM_DESTROYED;
1218 }
1219 if (RT_LIKELY(rc == VINF_SUCCESS))
1220 break;
1221
1222 /*
1223 * Timeout and interrupted waits needs careful handling in ring-0
1224 * because we're cooperating with ring-3 on this critical section
1225 * and thus need to make absolutely sure we won't get stuck here.
1226 *
1227 * The r0 interrupted case means something is pending (termination,
1228 * signal, APC, debugger, whatever), so we must try our best to
1229 * return to the caller and to ring-3 so it can be dealt with.
1230 */
1231 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
1232 {
1233# ifdef IN_RING0
1234 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1235 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1236 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1237 ("rcTerm=%Rrc\n", rcTerm));
1238 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
1239 cNsMaxTotal = RT_NS_1MIN;
1240
1241 if (rc == VERR_TIMEOUT)
1242 {
1243 /* Try return get out of here with a non-VINF_SUCCESS status if
1244 the thread is terminating or if the timeout has been exceeded. */
1245 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1246 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1247 || cNsElapsed > cNsMaxTotal)
1248 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1249 }
1250 else
1251 {
1252 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1253 we will try non-interruptible sleep for a while to help resolve the issue
1254 w/o guru'ing. */
1255 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1256 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1257 && rcBusy == VINF_SUCCESS
1258 && pVCpu != NULL
1259 && cNsElapsed <= cNsMaxTotal)
1260 {
1261 if (!fNonInterruptible)
1262 {
1263 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1264 fNonInterruptible = true;
1265 cMsMaxOne = 32;
1266 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1267 if (cNsLeft > RT_NS_10SEC)
1268 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1269 }
1270 }
1271 else
1272 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1273 }
1274# else /* IN_RING3 */
1275 RT_NOREF(pVM, pVCpu, rcBusy);
1276# endif /* IN_RING3 */
1277 }
1278 /*
1279 * Any other return code is fatal.
1280 */
1281 else
1282 {
1283# ifdef IN_RING3
1284 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1285# endif
1286 AssertMsgFailed(("rc=%Rrc\n", rc));
1287 return RT_FAILURE_NP(rc) ? rc : -rc;
1288 }
1289 }
1290
1291# ifdef IN_RING3
1292 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1293# endif
1294
1295 /*
1296 * Try take exclusive write ownership.
1297 */
1298 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1299 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1300 {
1301 bool fDone;
1302 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1303 if (fDone)
1304 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1305 }
1306 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1307 }
1308}
1309#endif /* IN_RING3 || IN_RING0 */
1310
1311
1312/**
1313 * Worker that enters a read/write critical section with exclusive access.
1314 *
1315 * @returns VBox status code.
1316 * @param pVM The cross context VM structure.
1317 * @param pThis Pointer to the read/write critical section.
1318 * @param rcBusy The busy return code for ring-0 and ring-3.
1319 * @param fTryOnly Only try enter it, don't wait.
1320 * @param pSrcPos The source position. (Can be NULL.)
1321 * @param fNoVal No validation records.
1322 */
1323#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
1324static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1325 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1326#else
1327DECLASM(int) pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1328 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal);
1329DECLASM(int) StkBack_pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1330 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1331#endif
1332{
1333 /*
1334 * Validate input.
1335 */
1336 AssertPtr(pThis);
1337 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1338
1339 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1340#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1341 if (!fTryOnly)
1342 {
1343 hThreadSelf = RTThreadSelfAutoAdopt();
1344 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1345 if (RT_FAILURE(rc9))
1346 return rc9;
1347 }
1348#endif
1349
1350 /*
1351 * Check if we're already the owner and just recursing.
1352 */
1353 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1354 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1355 RTNATIVETHREAD hNativeWriter;
1356 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1357 if (hNativeSelf == hNativeWriter)
1358 {
1359 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1360#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1361 if (!fNoVal)
1362 {
1363 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1364 if (RT_FAILURE(rc9))
1365 return rc9;
1366 }
1367#endif
1368 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1369#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1370 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1371#else
1372 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1373#endif
1374 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1375 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1376 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1377 return VINF_SUCCESS;
1378 }
1379
1380 /*
1381 * First we try grab an idle critical section using 128-bit atomics.
1382 */
1383 /** @todo This could be moved up before the recursion check. */
1384 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1385#ifdef RTASM_HAVE_CMP_WRITE_U128
1386 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1387 && pdmCritSectRwIsCmpWriteU128Supported())
1388 {
1389 RTCRITSECTRWSTATE OldState;
1390 OldState.s.u64State = u64State;
1391 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1392 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1393
1394 RTCRITSECTRWSTATE NewState;
1395 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1396 NewState.s.hNativeWriter = hNativeSelf;
1397
1398 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1399 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1400
1401 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1402 }
1403#endif
1404
1405 /*
1406 * Do it step by step. Update the state to reflect our desire.
1407 */
1408 uint64_t u64OldState = u64State;
1409
1410 for (;;)
1411 {
1412 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1413 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
1414 {
1415 /* It flows in the right direction, try follow it before it changes. */
1416 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1417 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1418 c++;
1419 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1420 u64State &= ~RTCSRW_CNT_WR_MASK;
1421 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1422 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1423 break;
1424 }
1425 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1426 {
1427 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1428 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1429 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1430 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1431 break;
1432 }
1433 else if (fTryOnly)
1434 {
1435 /* Wrong direction and we're not supposed to wait, just return. */
1436 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1437 return VERR_SEM_BUSY;
1438 }
1439 else
1440 {
1441 /* Add ourselves to the write count and break out to do the wait. */
1442 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1443 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1444 c++;
1445 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1446 u64State &= ~RTCSRW_CNT_WR_MASK;
1447 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1448 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1449 break;
1450 }
1451
1452 ASMNopPause();
1453
1454 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1455 { /* likely */ }
1456 else
1457 return VERR_SEM_DESTROYED;
1458
1459 ASMNopPause();
1460 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1461 u64OldState = u64State;
1462 }
1463
1464 /*
1465 * If we're in write mode now try grab the ownership. Play fair if there
1466 * are threads already waiting.
1467 */
1468 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1469 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1470 || fTryOnly);
1471 if (fDone)
1472 {
1473 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1474 if (fDone)
1475 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1476 }
1477
1478 /*
1479 * Okay, we have contention and will have to wait unless we're just trying.
1480 */
1481 if (fTryOnly)
1482 {
1483 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1484 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1485 }
1486
1487 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1488
1489 /*
1490 * Ring-3 is pretty straight forward.
1491 */
1492#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1493 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1494#elif defined(IN_RING3)
1495 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1496
1497#elif defined(IN_RING0)
1498 /*
1499 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1500 * account when waiting on contended locks.
1501 */
1502 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1503 if (pVCpu)
1504 {
1505 VMMR0EMTBLOCKCTX Ctx;
1506 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1507 if (rc == VINF_SUCCESS)
1508 {
1509 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1510
1511 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1512
1513 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1514 }
1515 else
1516 {
1517 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1518 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1519 }
1520 return rc;
1521 }
1522
1523 /* Non-EMT. */
1524 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1525 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1526
1527#else
1528# error "Unused."
1529 /*
1530 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1531 */
1532 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1533 if (rcBusy == VINF_SUCCESS)
1534 {
1535 Assert(!fTryOnly);
1536 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1537 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1538 * back to ring-3. Goes for both kind of crit sects. */
1539 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1540 }
1541 return rcBusy;
1542#endif
1543}
1544
1545
1546/**
1547 * Try enter a critical section with exclusive (write) access.
1548 *
1549 * @returns VBox status code.
1550 * @retval VINF_SUCCESS on success.
1551 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1552 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1553 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1554 * during the operation.
1555 *
1556 * @param pVM The cross context VM structure.
1557 * @param pThis Pointer to the read/write critical section.
1558 * @param rcBusy The status code to return when we're in RC or R0 and the
1559 * section is busy. Pass VINF_SUCCESS to acquired the
1560 * critical section thru a ring-3 call if necessary.
1561 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1562 * PDMCritSectRwTryEnterExclDebug,
1563 * PDMCritSectEnterDebug, PDMCritSectEnter,
1564 * RTCritSectRwEnterExcl.
1565 */
1566VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1567{
1568#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1569 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1570#else
1571 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1572 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1573#endif
1574}
1575
1576
1577/**
1578 * Try enter a critical section with exclusive (write) access.
1579 *
1580 * @returns VBox status code.
1581 * @retval VINF_SUCCESS on success.
1582 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1583 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1584 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1585 * during the operation.
1586 *
1587 * @param pVM The cross context VM structure.
1588 * @param pThis Pointer to the read/write critical section.
1589 * @param rcBusy The status code to return when we're in RC or R0 and the
1590 * section is busy. Pass VINF_SUCCESS to acquired the
1591 * critical section thru a ring-3 call if necessary.
1592 * @param uId Where we're entering the section.
1593 * @param SRC_POS The source position.
1594 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1595 * PDMCritSectRwTryEnterExclDebug,
1596 * PDMCritSectEnterDebug, PDMCritSectEnter,
1597 * RTCritSectRwEnterExclDebug.
1598 */
1599VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1600{
1601 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1602#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1603 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1604#else
1605 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1606 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1607#endif
1608}
1609
1610
1611/**
1612 * Try enter a critical section with exclusive (write) access.
1613 *
1614 * @retval VINF_SUCCESS on success.
1615 * @retval VERR_SEM_BUSY if the critsect was owned.
1616 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1617 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1618 * during the operation.
1619 *
1620 * @param pVM The cross context VM structure.
1621 * @param pThis Pointer to the read/write critical section.
1622 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1623 * PDMCritSectRwEnterExclDebug,
1624 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1625 * RTCritSectRwTryEnterExcl.
1626 */
1627VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1628{
1629#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1630 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1631#else
1632 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1633 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1634#endif
1635}
1636
1637
1638/**
1639 * Try enter a critical section with exclusive (write) access.
1640 *
1641 * @retval VINF_SUCCESS on success.
1642 * @retval VERR_SEM_BUSY if the critsect was owned.
1643 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1644 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1645 * during the operation.
1646 *
1647 * @param pVM The cross context VM structure.
1648 * @param pThis Pointer to the read/write critical section.
1649 * @param uId Where we're entering the section.
1650 * @param SRC_POS The source position.
1651 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1652 * PDMCritSectRwEnterExclDebug,
1653 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1654 * RTCritSectRwTryEnterExclDebug.
1655 */
1656VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1657{
1658 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1659#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1660 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1661#else
1662 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1663 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1664#endif
1665}
1666
1667
1668#ifdef IN_RING3
1669/**
1670 * Enters a PDM read/write critical section with exclusive (write) access.
1671 *
1672 * @returns VINF_SUCCESS if entered successfully.
1673 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1674 * during the operation.
1675 *
1676 * @param pVM The cross context VM structure.
1677 * @param pThis Pointer to the read/write critical section.
1678 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1679 */
1680VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1681{
1682 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1683}
1684#endif /* IN_RING3 */
1685
1686
1687/**
1688 * Leave a critical section held exclusively.
1689 *
1690 * @returns VBox status code.
1691 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1692 * during the operation.
1693 * @param pVM The cross context VM structure.
1694 * @param pThis Pointer to the read/write critical section.
1695 * @param fNoVal No validation records (i.e. queued release).
1696 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1697 */
1698#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
1699static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1700#else
1701DECLASM(int) pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
1702DECLASM(int) StkBack_pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1703#endif
1704{
1705 /*
1706 * Validate handle.
1707 */
1708 AssertPtr(pThis);
1709 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1710
1711#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1712 NOREF(fNoVal);
1713#endif
1714
1715 /*
1716 * Check ownership.
1717 */
1718 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1719 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1720
1721 RTNATIVETHREAD hNativeWriter;
1722 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1723 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1724
1725
1726 /*
1727 * Unwind one recursion. Not the last?
1728 */
1729 if (pThis->s.Core.cWriteRecursions != 1)
1730 {
1731#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1732 if (fNoVal)
1733 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1734 else
1735 {
1736 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1737 if (RT_FAILURE(rc9))
1738 return rc9;
1739 }
1740#endif
1741#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1742 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1743#else
1744 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1745#endif
1746 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1747 return VINF_SUCCESS;
1748 }
1749
1750
1751 /*
1752 * Final recursion.
1753 */
1754 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1755#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1756 if (fNoVal)
1757 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1758 else
1759 {
1760 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1761 if (RT_FAILURE(rc9))
1762 return rc9;
1763 }
1764#endif
1765
1766
1767#ifdef RTASM_HAVE_CMP_WRITE_U128
1768 /*
1769 * See if we can get out w/o any signalling as this is a common case.
1770 */
1771 if (pdmCritSectRwIsCmpWriteU128Supported())
1772 {
1773 RTCRITSECTRWSTATE OldState;
1774 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1775 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1776 {
1777 OldState.s.hNativeWriter = hNativeSelf;
1778 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1779
1780 RTCRITSECTRWSTATE NewState;
1781 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1782 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1783
1784# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1785 pThis->s.Core.cWriteRecursions = 0;
1786# else
1787 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1788# endif
1789 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1790
1791 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1792 return VINF_SUCCESS;
1793
1794 /* bail out. */
1795 pThis->s.Core.cWriteRecursions = 1;
1796 }
1797 }
1798#endif /* RTASM_HAVE_CMP_WRITE_U128 */
1799
1800
1801#if defined(IN_RING3) || defined(IN_RING0)
1802 /*
1803 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1804 * Ring-0: Try leave for real, depends on host and context.
1805 */
1806# ifdef IN_RING0
1807 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1808 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1809 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1810 || VMMRZCallRing3IsEnabled(pVCpu)
1811 || RTSemEventIsSignalSafe()
1812 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1813 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1814 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1815 )
1816# endif
1817 {
1818# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1819 pThis->s.Core.cWriteRecursions = 0;
1820# else
1821 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1822# endif
1823 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1824 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1825
1826 for (;;)
1827 {
1828 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1829 uint64_t u64OldState = u64State;
1830
1831 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1832 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1833 c--;
1834
1835 if ( c > 0
1836 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1837 {
1838 /*
1839 * Don't change the direction, wake up the next writer if any.
1840 */
1841 u64State &= ~RTCSRW_CNT_WR_MASK;
1842 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1843 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1844 {
1845 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1846 int rc;
1847 if (c == 0)
1848 rc = VINF_SUCCESS;
1849# ifdef IN_RING0
1850 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
1851 {
1852 VMMR0EMTBLOCKCTX Ctx;
1853 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1854 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1855
1856 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1857
1858 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1859 }
1860# endif
1861 else
1862 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1863 AssertRC(rc);
1864 return rc;
1865 }
1866 }
1867 else
1868 {
1869 /*
1870 * Reverse the direction and signal the reader threads.
1871 */
1872 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1873 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1874 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1875 {
1876 Assert(!pThis->s.Core.fNeedReset);
1877 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1878 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1879
1880 int rc;
1881# ifdef IN_RING0
1882 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
1883 {
1884 VMMR0EMTBLOCKCTX Ctx;
1885 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1886 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1887
1888 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1889
1890 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1891 }
1892 else
1893# endif
1894 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1895 AssertRC(rc);
1896 return rc;
1897 }
1898 }
1899
1900 ASMNopPause();
1901 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1902 { /*likely*/ }
1903 else
1904 return VERR_SEM_DESTROYED;
1905 ASMNopPause();
1906 }
1907 /* not reached! */
1908 }
1909#endif /* IN_RING3 || IN_RING0 */
1910
1911
1912#ifndef IN_RING3
1913 /*
1914 * Queue the requested exit for ring-3 execution.
1915 */
1916# ifndef IN_RING0
1917 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1918# endif
1919 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1920 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1921 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1922 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1923 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1924 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1925 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1926 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK)
1927 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1928 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1929 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1930 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1931 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1932 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1933 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1934 return VINF_SUCCESS;
1935#endif
1936}
1937
1938
1939/**
1940 * Leave a critical section held exclusively.
1941 *
1942 * @returns VBox status code.
1943 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1944 * during the operation.
1945 * @param pVM The cross context VM structure.
1946 * @param pThis Pointer to the read/write critical section.
1947 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1948 */
1949VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1950{
1951 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1952}
1953
1954
1955#if defined(IN_RING3) || defined(IN_RING0)
1956/**
1957 * PDMCritSectBothFF interface.
1958 *
1959 * @param pVM The cross context VM structure.
1960 * @param pThis Pointer to the read/write critical section.
1961 */
1962void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1963{
1964 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1965}
1966#endif
1967
1968
1969/**
1970 * Checks the caller is the exclusive (write) owner of the critical section.
1971 *
1972 * @retval true if owner.
1973 * @retval false if not owner.
1974 * @param pVM The cross context VM structure.
1975 * @param pThis Pointer to the read/write critical section.
1976 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1977 * RTCritSectRwIsWriteOwner.
1978 */
1979VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1980{
1981 /*
1982 * Validate handle.
1983 */
1984 AssertPtr(pThis);
1985 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1986
1987 /*
1988 * Check ownership.
1989 */
1990 RTNATIVETHREAD hNativeWriter;
1991 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1992 if (hNativeWriter == NIL_RTNATIVETHREAD)
1993 return false;
1994 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1995}
1996
1997
1998/**
1999 * Checks if the caller is one of the read owners of the critical section.
2000 *
2001 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
2002 * enabled. Meaning, the answer is not trustworhty unless
2003 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
2004 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
2005 * creating the semaphore. And finally, if you used a locking class,
2006 * don't disable deadlock detection by setting cMsMinDeadlock to
2007 * RT_INDEFINITE_WAIT.
2008 *
2009 * In short, only use this for assertions.
2010 *
2011 * @returns @c true if reader, @c false if not.
2012 * @param pVM The cross context VM structure.
2013 * @param pThis Pointer to the read/write critical section.
2014 * @param fWannaHear What you'd like to hear when lock validation is not
2015 * available. (For avoiding asserting all over the place.)
2016 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
2017 */
2018VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
2019{
2020 /*
2021 * Validate handle.
2022 */
2023 AssertPtr(pThis);
2024 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
2025
2026 /*
2027 * Inspect the state.
2028 */
2029 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2030 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
2031 {
2032 /*
2033 * It's in write mode, so we can only be a reader if we're also the
2034 * current writer.
2035 */
2036 RTNATIVETHREAD hWriter;
2037 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
2038 if (hWriter == NIL_RTNATIVETHREAD)
2039 return false;
2040 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
2041 }
2042
2043 /*
2044 * Read mode. If there are no current readers, then we cannot be a reader.
2045 */
2046 if (!(u64State & RTCSRW_CNT_RD_MASK))
2047 return false;
2048
2049#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
2050 /*
2051 * Ask the lock validator.
2052 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
2053 */
2054 NOREF(fWannaHear);
2055 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
2056#else
2057 /*
2058 * Ok, we don't know, just tell the caller what he want to hear.
2059 */
2060 return fWannaHear;
2061#endif
2062}
2063
2064
2065/**
2066 * Gets the write recursion count.
2067 *
2068 * @returns The write recursion count (0 if bad critsect).
2069 * @param pThis Pointer to the read/write critical section.
2070 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2071 * RTCritSectRwGetWriteRecursion.
2072 */
2073VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
2074{
2075 /*
2076 * Validate handle.
2077 */
2078 AssertPtr(pThis);
2079 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2080
2081 /*
2082 * Return the requested data.
2083 */
2084 return pThis->s.Core.cWriteRecursions;
2085}
2086
2087
2088/**
2089 * Gets the read recursion count of the current writer.
2090 *
2091 * @returns The read recursion count (0 if bad critsect).
2092 * @param pThis Pointer to the read/write critical section.
2093 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2094 * RTCritSectRwGetWriterReadRecursion.
2095 */
2096VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
2097{
2098 /*
2099 * Validate handle.
2100 */
2101 AssertPtr(pThis);
2102 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2103
2104 /*
2105 * Return the requested data.
2106 */
2107 return pThis->s.Core.cWriterReads;
2108}
2109
2110
2111/**
2112 * Gets the current number of reads.
2113 *
2114 * This includes all read recursions, so it might be higher than the number of
2115 * read owners. It does not include reads done by the current writer.
2116 *
2117 * @returns The read count (0 if bad critsect).
2118 * @param pThis Pointer to the read/write critical section.
2119 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2120 * RTCritSectRwGetReadCount.
2121 */
2122VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
2123{
2124 /*
2125 * Validate input.
2126 */
2127 AssertPtr(pThis);
2128 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2129
2130 /*
2131 * Return the requested data.
2132 */
2133 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2134 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
2135 return 0;
2136 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
2137}
2138
2139
2140/**
2141 * Checks if the read/write critical section is initialized or not.
2142 *
2143 * @retval true if initialized.
2144 * @retval false if not initialized.
2145 * @param pThis Pointer to the read/write critical section.
2146 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2147 */
2148VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
2149{
2150 AssertPtr(pThis);
2151 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
2152}
2153
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette