VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 93628

Last change on this file since 93628 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 83.9 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37#endif
38#if defined(IN_RING3) || defined(IN_RING0)
39# include <iprt/semaphore.h>
40# include <iprt/thread.h>
41#endif
42#ifdef IN_RING0
43# include <iprt/time.h>
44#endif
45#ifdef RT_ARCH_AMD64
46# include <iprt/x86.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#if 0 /* unused */
54/** The number loops to spin for shared access in ring-3. */
55#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
56/** The number loops to spin for shared access in ring-0. */
57#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
58/** The number loops to spin for shared access in the raw-mode context. */
59#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
60
61/** The number loops to spin for exclusive access in ring-3. */
62#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
63/** The number loops to spin for exclusive access in ring-0. */
64#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
65/** The number loops to spin for exclusive access in the raw-mode context. */
66#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
67#endif
68
69/** Max number of write or write/read recursions. */
70#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
71
72/** Skips some of the overly paranoid atomic reads and updates.
73 * Makes some assumptions about cache coherence, though not brave enough not to
74 * always end with an atomic update. */
75#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
76
77/** For reading RTCRITSECTRWSTATE::s::u64State. */
78#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
79# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
80#else
81# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
82#endif
83
84
85/* Undefine the automatic VBOX_STRICT API mappings. */
86#undef PDMCritSectRwEnterExcl
87#undef PDMCritSectRwTryEnterExcl
88#undef PDMCritSectRwEnterShared
89#undef PDMCritSectRwTryEnterShared
90
91
92/*********************************************************************************************************************************
93* Defined Constants And Macros *
94*********************************************************************************************************************************/
95#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
96static int32_t g_fCmpWriteSupported = -1;
97#endif
98
99
100/*********************************************************************************************************************************
101* Internal Functions *
102*********************************************************************************************************************************/
103static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
104
105
106#ifdef RTASM_HAVE_CMP_WRITE_U128
107
108# ifdef RT_ARCH_AMD64
109/**
110 * Called once to initialize g_fCmpWriteSupported.
111 */
112DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
113{
114 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
115 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
116 return fCmpWriteSupported;
117}
118# endif
119
120
121/**
122 * Indicates whether hardware actually supports 128-bit compare & write.
123 */
124DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
125{
126# ifdef RT_ARCH_AMD64
127 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
128 if (RT_LIKELY(fCmpWriteSupported >= 0))
129 return fCmpWriteSupported != 0;
130 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
131# else
132 return true;
133# endif
134}
135
136#endif /* RTASM_HAVE_CMP_WRITE_U128 */
137
138/**
139 * Gets the ring-3 native thread handle of the calling thread.
140 *
141 * @returns native thread handle (ring-3).
142 * @param pVM The cross context VM structure.
143 * @param pThis The read/write critical section. This is only used in
144 * R0 and RC.
145 */
146DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
147{
148#ifdef IN_RING3
149 RT_NOREF(pVM, pThis);
150 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
151
152#elif defined(IN_RING0)
153 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
154 NIL_RTNATIVETHREAD);
155 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
156 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
157
158#else
159# error "invalid context"
160#endif
161 return hNativeSelf;
162}
163
164
165DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
166{
167 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
168 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
169 return VERR_PDM_CRITSECTRW_IPE;
170}
171
172
173
174#ifdef IN_RING3
175/**
176 * Changes the lock validator sub-class of the read/write critical section.
177 *
178 * It is recommended to try make sure that nobody is using this critical section
179 * while changing the value.
180 *
181 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
182 * lock validator isn't compiled in or either of the parameters are
183 * invalid.
184 * @param pThis Pointer to the read/write critical section.
185 * @param uSubClass The new sub-class value.
186 */
187VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
188{
189 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
190 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
191# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
192 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
193
194 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
195 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
196# else
197 NOREF(uSubClass);
198 return RTLOCKVAL_SUB_CLASS_INVALID;
199# endif
200}
201#endif /* IN_RING3 */
202
203
204/**
205 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
206 */
207DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
208 bool fNoVal, RTTHREAD hThreadSelf)
209{
210#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
211 if (!fNoVal)
212 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
213#else
214 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
215#endif
216
217 /* got it! */
218 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
219 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
220 return VINF_SUCCESS;
221}
222
223/**
224 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
225 * that decrement the wait count and maybe resets the semaphore.
226 */
227DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
228 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
229{
230 for (;;)
231 {
232 uint64_t const u64OldState = u64State;
233 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
234 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
235 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
236 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
237 cWait--;
238 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
239 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
240
241 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
242 {
243 if (cWait == 0)
244 {
245 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
246 {
247 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
248 AssertRCReturn(rc, rc);
249 }
250 }
251 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
252 }
253
254 ASMNopPause();
255 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
256 ASMNopPause();
257
258 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
259 }
260 /* not reached */
261}
262
263
264#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
265/**
266 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
267 * and returns @a rc.
268 *
269 * @note May return VINF_SUCCESS if we race the exclusive leave function and
270 * come out on the bottom.
271 *
272 * Ring-3 only calls in a case where it is _not_ acceptable to take the
273 * lock, so even if we get the lock we'll have to leave. In the ring-0
274 * contexts, we can safely return VINF_SUCCESS in case of a race.
275 */
276DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
277 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
278{
279#ifdef IN_RING0
280 uint64_t const tsStart = RTTimeNanoTS();
281 uint64_t cNsElapsed = 0;
282#endif
283 for (;;)
284 {
285 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
286 uint64_t u64OldState = u64State;
287
288 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
289 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
290 cWait--;
291
292 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
293 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
294
295 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
296 {
297 c--;
298 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
299 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
300 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
301 return rc;
302 }
303 else
304 {
305 /*
306 * The direction changed, so we can actually get the lock now.
307 *
308 * This means that we _have_ to wait on the semaphore to be signalled
309 * so we can properly reset it. Otherwise the stuff gets out of wack,
310 * because signalling and resetting will race one another. An
311 * exception would be if we're not the last reader waiting and don't
312 * need to worry about the resetting.
313 *
314 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
315 * but that would still leave a racing PDMCritSectRwEnterShared
316 * spinning hard for a little bit, which isn't great...
317 */
318 if (cWait == 0)
319 {
320# ifdef IN_RING0
321 /* Do timeout processing first to avoid redoing the above. */
322 uint32_t cMsWait;
323 if (cNsElapsed <= RT_NS_10SEC)
324 cMsWait = 32;
325 else
326 {
327 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
328 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
329 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
330 {
331 LogFunc(("%p: giving up\n", pThis));
332 return rc;
333 }
334 cMsWait = 2;
335 }
336
337 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
338 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
339 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
340# else
341 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
342 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
343 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
344# endif
345 if (rcWait == VINF_SUCCESS)
346 {
347# ifdef IN_RING0
348 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
349# else
350 /* ring-3: Cannot return VINF_SUCCESS. */
351 Assert(RT_FAILURE_NP(rc));
352 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
353 if (RT_SUCCESS(rc2))
354 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
355 return rc;
356# endif
357 }
358 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
359 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
360 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
361 }
362 else
363 {
364 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
365 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
366 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
367 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
368 }
369
370# ifdef IN_RING0
371 /* Calculate the elapsed time here to avoid redoing state work. */
372 cNsElapsed = RTTimeNanoTS() - tsStart;
373# endif
374 }
375
376 ASMNopPause();
377 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
378 ASMNopPause();
379 }
380}
381#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
382
383
384/**
385 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
386 * Caller has already added us to the read and read-wait counters.
387 */
388static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
389 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
390{
391 PSUPDRVSESSION const pSession = pVM->pSession;
392 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
393# ifdef IN_RING0
394 uint64_t const tsStart = RTTimeNanoTS();
395 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
396 uint64_t cNsMaxTotal = cNsMaxTotalDef;
397 uint32_t cMsMaxOne = RT_MS_5SEC;
398 bool fNonInterruptible = false;
399# endif
400
401 for (uint32_t iLoop = 0; ; iLoop++)
402 {
403 /*
404 * Wait for the direction to switch.
405 */
406 int rc;
407# ifdef IN_RING3
408# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
409 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
410 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
411 if (RT_FAILURE(rc))
412 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
413# else
414 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
415# endif
416# endif
417
418 for (;;)
419 {
420 /*
421 * We always wait with a timeout so we can re-check the structure sanity
422 * and not get stuck waiting on a corrupt or deleted section.
423 */
424# ifdef IN_RING3
425 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
426# else
427 rc = !fNonInterruptible
428 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
429 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
430 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
431 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
432# endif
433 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
434 { /* likely */ }
435 else
436 {
437# ifdef IN_RING3
438 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
439# endif
440 return VERR_SEM_DESTROYED;
441 }
442 if (RT_LIKELY(rc == VINF_SUCCESS))
443 break;
444
445 /*
446 * Timeout and interrupted waits needs careful handling in ring-0
447 * because we're cooperating with ring-3 on this critical section
448 * and thus need to make absolutely sure we won't get stuck here.
449 *
450 * The r0 interrupted case means something is pending (termination,
451 * signal, APC, debugger, whatever), so we must try our best to
452 * return to the caller and to ring-3 so it can be dealt with.
453 */
454 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
455 {
456# ifdef IN_RING0
457 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
458 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
459 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
460 ("rcTerm=%Rrc\n", rcTerm));
461 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
462 cNsMaxTotal = RT_NS_1MIN;
463
464 if (rc == VERR_TIMEOUT)
465 {
466 /* Try return get out of here with a non-VINF_SUCCESS status if
467 the thread is terminating or if the timeout has been exceeded. */
468 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
469 if ( rcTerm == VINF_THREAD_IS_TERMINATING
470 || cNsElapsed > cNsMaxTotal)
471 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
472 pSrcPos, fNoVal, hThreadSelf);
473 }
474 else
475 {
476 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
477 we will try non-interruptible sleep for a while to help resolve the issue
478 w/o guru'ing. */
479 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
480 if ( rcTerm != VINF_THREAD_IS_TERMINATING
481 && rcBusy == VINF_SUCCESS
482 && pVCpu != NULL
483 && cNsElapsed <= cNsMaxTotal)
484 {
485 if (!fNonInterruptible)
486 {
487 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
488 fNonInterruptible = true;
489 cMsMaxOne = 32;
490 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
491 if (cNsLeft > RT_NS_10SEC)
492 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
493 }
494 }
495 else
496 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
497 pSrcPos, fNoVal, hThreadSelf);
498 }
499# else /* IN_RING3 */
500 RT_NOREF(pVM, pVCpu, rcBusy);
501# endif /* IN_RING3 */
502 }
503 /*
504 * Any other return code is fatal.
505 */
506 else
507 {
508# ifdef IN_RING3
509 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
510# endif
511 AssertMsgFailed(("rc=%Rrc\n", rc));
512 return RT_FAILURE_NP(rc) ? rc : -rc;
513 }
514 }
515
516# ifdef IN_RING3
517 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
518# endif
519
520 /*
521 * Check the direction.
522 */
523 Assert(pThis->s.Core.fNeedReset);
524 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
525 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
526 {
527 /*
528 * Decrement the wait count and maybe reset the semaphore (if we're last).
529 */
530 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
531 }
532
533 AssertMsg(iLoop < 1,
534 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
535 RTThreadYield();
536 }
537
538 /* not reached */
539}
540
541
542/**
543 * Worker that enters a read/write critical section with shard access.
544 *
545 * @returns VBox status code.
546 * @param pVM The cross context VM structure.
547 * @param pThis Pointer to the read/write critical section.
548 * @param rcBusy The busy return code for ring-0 and ring-3.
549 * @param fTryOnly Only try enter it, don't wait.
550 * @param pSrcPos The source position. (Can be NULL.)
551 * @param fNoVal No validation records.
552 */
553static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
554 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
555{
556 /*
557 * Validate input.
558 */
559 AssertPtr(pThis);
560 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
561
562#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
563 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
564 if (!fTryOnly)
565 {
566 int rc9;
567 RTNATIVETHREAD hNativeWriter;
568 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
569 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
570 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
571 else
572 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
573 if (RT_FAILURE(rc9))
574 return rc9;
575 }
576#else
577 RTTHREAD hThreadSelf = NIL_RTTHREAD;
578#endif
579
580 /*
581 * Work the state.
582 */
583 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
584 uint64_t u64OldState = u64State;
585 for (;;)
586 {
587 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
588 {
589 /* It flows in the right direction, try follow it before it changes. */
590 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
591 c++;
592 Assert(c < RTCSRW_CNT_MASK / 4);
593 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
594 u64State &= ~RTCSRW_CNT_RD_MASK;
595 u64State |= c << RTCSRW_CNT_RD_SHIFT;
596 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
597 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
598 }
599 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
600 {
601 /* Wrong direction, but we're alone here and can simply try switch the direction. */
602 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
603 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
604 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
605 {
606 Assert(!pThis->s.Core.fNeedReset);
607 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
608 }
609 }
610 else
611 {
612 /* Is the writer perhaps doing a read recursion? */
613 RTNATIVETHREAD hNativeWriter;
614 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
615 if (hNativeWriter != NIL_RTNATIVETHREAD)
616 {
617 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
618 if (hNativeSelf == hNativeWriter)
619 {
620#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
621 if (!fNoVal)
622 {
623 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
624 if (RT_FAILURE(rc9))
625 return rc9;
626 }
627#endif
628 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
629 Assert(cReads < _16K);
630 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
631 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
632 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
633 return VINF_SUCCESS; /* don't break! */
634 }
635 }
636
637 /*
638 * If we're only trying, return already.
639 */
640 if (fTryOnly)
641 {
642 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
643 return VERR_SEM_BUSY;
644 }
645
646#if defined(IN_RING3) || defined(IN_RING0)
647 /*
648 * Add ourselves to the queue and wait for the direction to change.
649 */
650 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
651 c++;
652 Assert(c < RTCSRW_CNT_MASK / 2);
653 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
654
655 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
656 cWait++;
657 Assert(cWait <= c);
658 Assert(cWait < RTCSRW_CNT_MASK / 2);
659 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
660
661 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
662 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
663
664 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
665 {
666 /*
667 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
668 */
669# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
670 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
671# elif defined(IN_RING3)
672 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
673# else /* IN_RING0 */
674 /*
675 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
676 * account when waiting on contended locks.
677 */
678 PVMCPUCC pVCpu = VMMGetCpu(pVM);
679 if (pVCpu)
680 {
681 VMMR0EMTBLOCKCTX Ctx;
682 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
683 if (rc == VINF_SUCCESS)
684 {
685 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
686
687 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
688
689 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
690 }
691 else
692 {
693 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
694 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
695 }
696 return rc;
697 }
698
699 /* Non-EMT. */
700 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
701 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
702# endif /* IN_RING0 */
703 }
704
705#else /* !IN_RING3 && !IN_RING0 */
706 /*
707 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
708 * back to ring-3 and do it there or return rcBusy.
709 */
710# error "Unused code."
711 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
712 if (rcBusy == VINF_SUCCESS)
713 {
714 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
715 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
716 * back to ring-3. Goes for both kind of crit sects. */
717 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
718 }
719 return rcBusy;
720#endif /* !IN_RING3 && !IN_RING0 */
721 }
722
723 ASMNopPause();
724 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
725 { /* likely */ }
726 else
727 return VERR_SEM_DESTROYED;
728 ASMNopPause();
729
730 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
731 u64OldState = u64State;
732 }
733 /* not reached */
734}
735
736
737/**
738 * Enter a critical section with shared (read) access.
739 *
740 * @returns VBox status code.
741 * @retval VINF_SUCCESS on success.
742 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
743 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
744 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
745 * during the operation.
746 *
747 * @param pVM The cross context VM structure.
748 * @param pThis Pointer to the read/write critical section.
749 * @param rcBusy The status code to return when we're in RC or R0 and the
750 * section is busy. Pass VINF_SUCCESS to acquired the
751 * critical section thru a ring-3 call if necessary.
752 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
753 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
754 * RTCritSectRwEnterShared.
755 */
756VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
757{
758#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
759 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
760#else
761 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
762 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
763#endif
764}
765
766
767/**
768 * Enter a critical section with shared (read) access.
769 *
770 * @returns VBox status code.
771 * @retval VINF_SUCCESS on success.
772 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
773 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
774 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
775 * during the operation.
776 *
777 * @param pVM The cross context VM structure.
778 * @param pThis Pointer to the read/write critical section.
779 * @param rcBusy The status code to return when we're in RC or R0 and the
780 * section is busy. Pass VINF_SUCCESS to acquired the
781 * critical section thru a ring-3 call if necessary.
782 * @param uId Where we're entering the section.
783 * @param SRC_POS The source position.
784 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
785 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
786 * RTCritSectRwEnterSharedDebug.
787 */
788VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
789{
790 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
791#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
792 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
793#else
794 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
795 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
796#endif
797}
798
799
800/**
801 * Try enter a critical section with shared (read) access.
802 *
803 * @returns VBox status code.
804 * @retval VINF_SUCCESS on success.
805 * @retval VERR_SEM_BUSY if the critsect was owned.
806 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
807 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
808 * during the operation.
809 *
810 * @param pVM The cross context VM structure.
811 * @param pThis Pointer to the read/write critical section.
812 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
813 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
814 * RTCritSectRwTryEnterShared.
815 */
816VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
817{
818#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
819 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
820#else
821 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
822 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
823#endif
824}
825
826
827/**
828 * Try enter a critical section with shared (read) access.
829 *
830 * @returns VBox status code.
831 * @retval VINF_SUCCESS on success.
832 * @retval VERR_SEM_BUSY if the critsect was owned.
833 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
834 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
835 * during the operation.
836 *
837 * @param pVM The cross context VM structure.
838 * @param pThis Pointer to the read/write critical section.
839 * @param uId Where we're entering the section.
840 * @param SRC_POS The source position.
841 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
842 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
843 * RTCritSectRwTryEnterSharedDebug.
844 */
845VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
846{
847 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
848#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
849 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
850#else
851 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
852 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
853#endif
854}
855
856
857#ifdef IN_RING3
858/**
859 * Enters a PDM read/write critical section with shared (read) access.
860 *
861 * @returns VINF_SUCCESS if entered successfully.
862 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
863 * during the operation.
864 *
865 * @param pVM The cross context VM structure.
866 * @param pThis Pointer to the read/write critical section.
867 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
868 */
869VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
870{
871 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
872}
873#endif
874
875
876/**
877 * Leave a critical section held with shared access.
878 *
879 * @returns VBox status code.
880 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
881 * during the operation.
882 * @param pVM The cross context VM structure.
883 * @param pThis Pointer to the read/write critical section.
884 * @param fNoVal No validation records (i.e. queued release).
885 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
886 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
887 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
888 */
889static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
890{
891 /*
892 * Validate handle.
893 */
894 AssertPtr(pThis);
895 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
896
897#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
898 NOREF(fNoVal);
899#endif
900
901 /*
902 * Check the direction and take action accordingly.
903 */
904#ifdef IN_RING0
905 PVMCPUCC pVCpu = NULL;
906#endif
907 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
908 uint64_t u64OldState = u64State;
909 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
910 {
911#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
912 if (fNoVal)
913 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
914 else
915 {
916 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
917 if (RT_FAILURE(rc9))
918 return rc9;
919 }
920#endif
921 for (;;)
922 {
923 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
924 AssertReturn(c > 0, VERR_NOT_OWNER);
925 c--;
926
927 if ( c > 0
928 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
929 {
930 /* Don't change the direction. */
931 u64State &= ~RTCSRW_CNT_RD_MASK;
932 u64State |= c << RTCSRW_CNT_RD_SHIFT;
933 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
934 break;
935 }
936 else
937 {
938#if defined(IN_RING3) || defined(IN_RING0)
939# ifdef IN_RING0
940 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
941 if (!pVCpu)
942 pVCpu = VMMGetCpu(pVM);
943 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
944 || VMMRZCallRing3IsEnabled(pVCpu)
945 || RTSemEventIsSignalSafe()
946 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
947 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
948 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
949 )
950# endif
951 {
952 /* Reverse the direction and signal the writer threads. */
953 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
954 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
955 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
956 {
957 int rc;
958# ifdef IN_RING0
959 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
960 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
961 {
962 VMMR0EMTBLOCKCTX Ctx;
963 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
964 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
965
966 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
967
968 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
969 }
970 else
971# endif
972 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
973 AssertRC(rc);
974 return rc;
975 }
976 }
977#endif /* IN_RING3 || IN_RING0 */
978#ifndef IN_RING3
979# ifdef IN_RING0
980 else
981# endif
982 {
983 /* Queue the exit request (ring-3). */
984# ifndef IN_RING0
985 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
986# endif
987 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
988 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
989 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
990 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
991 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
992 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
993 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
994 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & HOST_PAGE_OFFSET_MASK)
995 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
996 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
997 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
998 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
999 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1000 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1001 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
1002 break;
1003 }
1004#endif
1005 }
1006
1007 ASMNopPause();
1008 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1009 { }
1010 else
1011 return VERR_SEM_DESTROYED;
1012 ASMNopPause();
1013
1014 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1015 u64OldState = u64State;
1016 }
1017 }
1018 else
1019 {
1020 /*
1021 * Write direction. Check that it's the owner calling and that it has reads to undo.
1022 */
1023 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1024 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1025
1026 RTNATIVETHREAD hNativeWriter;
1027 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1028 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1029 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
1030#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1031 if (!fNoVal)
1032 {
1033 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1034 if (RT_FAILURE(rc))
1035 return rc;
1036 }
1037#endif
1038 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
1039 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
1040 }
1041
1042 return VINF_SUCCESS;
1043}
1044
1045
1046/**
1047 * Leave a critical section held with shared access.
1048 *
1049 * @returns VBox status code.
1050 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1051 * during the operation.
1052 * @param pVM The cross context VM structure.
1053 * @param pThis Pointer to the read/write critical section.
1054 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1055 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1056 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1057 */
1058VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
1059{
1060 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
1061}
1062
1063
1064#if defined(IN_RING3) || defined(IN_RING0)
1065/**
1066 * PDMCritSectBothFF interface.
1067 *
1068 * @param pVM The cross context VM structure.
1069 * @param pThis Pointer to the read/write critical section.
1070 */
1071void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1072{
1073 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
1074}
1075#endif
1076
1077
1078/**
1079 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1080 *
1081 * @returns @a rc unless corrupted.
1082 * @param pThis Pointer to the read/write critical section.
1083 * @param rc The status to return.
1084 */
1085DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1086{
1087 /*
1088 * Decrement the counts and return the error.
1089 */
1090 for (;;)
1091 {
1092 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1093 uint64_t const u64OldState = u64State;
1094 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1095 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1096 c--;
1097 u64State &= ~RTCSRW_CNT_WR_MASK;
1098 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1099 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1100 return rc;
1101
1102 ASMNopPause();
1103 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1104 ASMNopPause();
1105 }
1106}
1107
1108
1109/**
1110 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1111 * gotten exclusive ownership of the critical section.
1112 */
1113DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1114 bool fNoVal, RTTHREAD hThreadSelf)
1115{
1116 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
1117 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1118
1119#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1120 pThis->s.Core.cWriteRecursions = 1;
1121#else
1122 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1123#endif
1124 Assert(pThis->s.Core.cWriterReads == 0);
1125
1126#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1127 if (!fNoVal)
1128 {
1129 if (hThreadSelf == NIL_RTTHREAD)
1130 hThreadSelf = RTThreadSelfAutoAdopt();
1131 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1132 }
1133#endif
1134 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1135 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1136 return VINF_SUCCESS;
1137}
1138
1139
1140#if defined(IN_RING3) || defined(IN_RING0)
1141/**
1142 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1143 * contended.
1144 */
1145static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
1146 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
1147{
1148 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
1149
1150 PSUPDRVSESSION const pSession = pVM->pSession;
1151 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1152# ifdef IN_RING0
1153 uint64_t const tsStart = RTTimeNanoTS();
1154 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
1155 uint64_t cNsMaxTotal = cNsMaxTotalDef;
1156 uint32_t cMsMaxOne = RT_MS_5SEC;
1157 bool fNonInterruptible = false;
1158# endif
1159
1160 for (uint32_t iLoop = 0; ; iLoop++)
1161 {
1162 /*
1163 * Wait for our turn.
1164 */
1165 int rc;
1166# ifdef IN_RING3
1167# ifdef PDMCRITSECTRW_STRICT
1168 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1169 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1170 if (RT_SUCCESS(rc))
1171 { /* likely */ }
1172 else
1173 return pdmCritSectRwEnterExclBailOut(pThis, rc);
1174# else
1175 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1176# endif
1177# endif
1178
1179 for (;;)
1180 {
1181 /*
1182 * We always wait with a timeout so we can re-check the structure sanity
1183 * and not get stuck waiting on a corrupt or deleted section.
1184 */
1185# ifdef IN_RING3
1186 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1187# else
1188 rc = !fNonInterruptible
1189 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1190 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1191 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
1192 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
1193# endif
1194 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1195 { /* likely */ }
1196 else
1197 {
1198# ifdef IN_RING3
1199 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1200# endif
1201 return VERR_SEM_DESTROYED;
1202 }
1203 if (RT_LIKELY(rc == VINF_SUCCESS))
1204 break;
1205
1206 /*
1207 * Timeout and interrupted waits needs careful handling in ring-0
1208 * because we're cooperating with ring-3 on this critical section
1209 * and thus need to make absolutely sure we won't get stuck here.
1210 *
1211 * The r0 interrupted case means something is pending (termination,
1212 * signal, APC, debugger, whatever), so we must try our best to
1213 * return to the caller and to ring-3 so it can be dealt with.
1214 */
1215 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
1216 {
1217# ifdef IN_RING0
1218 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1219 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1220 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1221 ("rcTerm=%Rrc\n", rcTerm));
1222 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
1223 cNsMaxTotal = RT_NS_1MIN;
1224
1225 if (rc == VERR_TIMEOUT)
1226 {
1227 /* Try return get out of here with a non-VINF_SUCCESS status if
1228 the thread is terminating or if the timeout has been exceeded. */
1229 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1230 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1231 || cNsElapsed > cNsMaxTotal)
1232 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1233 }
1234 else
1235 {
1236 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1237 we will try non-interruptible sleep for a while to help resolve the issue
1238 w/o guru'ing. */
1239 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1240 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1241 && rcBusy == VINF_SUCCESS
1242 && pVCpu != NULL
1243 && cNsElapsed <= cNsMaxTotal)
1244 {
1245 if (!fNonInterruptible)
1246 {
1247 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1248 fNonInterruptible = true;
1249 cMsMaxOne = 32;
1250 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1251 if (cNsLeft > RT_NS_10SEC)
1252 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1253 }
1254 }
1255 else
1256 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1257 }
1258# else /* IN_RING3 */
1259 RT_NOREF(pVM, pVCpu, rcBusy);
1260# endif /* IN_RING3 */
1261 }
1262 /*
1263 * Any other return code is fatal.
1264 */
1265 else
1266 {
1267# ifdef IN_RING3
1268 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1269# endif
1270 AssertMsgFailed(("rc=%Rrc\n", rc));
1271 return RT_FAILURE_NP(rc) ? rc : -rc;
1272 }
1273 }
1274
1275# ifdef IN_RING3
1276 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1277# endif
1278
1279 /*
1280 * Try take exclusive write ownership.
1281 */
1282 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1283 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1284 {
1285 bool fDone;
1286 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1287 if (fDone)
1288 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1289 }
1290 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1291 }
1292}
1293#endif /* IN_RING3 || IN_RING0 */
1294
1295
1296/**
1297 * Worker that enters a read/write critical section with exclusive access.
1298 *
1299 * @returns VBox status code.
1300 * @param pVM The cross context VM structure.
1301 * @param pThis Pointer to the read/write critical section.
1302 * @param rcBusy The busy return code for ring-0 and ring-3.
1303 * @param fTryOnly Only try enter it, don't wait.
1304 * @param pSrcPos The source position. (Can be NULL.)
1305 * @param fNoVal No validation records.
1306 */
1307static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1308 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1309{
1310 /*
1311 * Validate input.
1312 */
1313 AssertPtr(pThis);
1314 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1315
1316 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1317#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1318 if (!fTryOnly)
1319 {
1320 hThreadSelf = RTThreadSelfAutoAdopt();
1321 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1322 if (RT_FAILURE(rc9))
1323 return rc9;
1324 }
1325#endif
1326
1327 /*
1328 * Check if we're already the owner and just recursing.
1329 */
1330 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1331 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1332 RTNATIVETHREAD hNativeWriter;
1333 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1334 if (hNativeSelf == hNativeWriter)
1335 {
1336 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1337#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1338 if (!fNoVal)
1339 {
1340 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1341 if (RT_FAILURE(rc9))
1342 return rc9;
1343 }
1344#endif
1345 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1346#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1347 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1348#else
1349 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1350#endif
1351 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1352 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1353 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1354 return VINF_SUCCESS;
1355 }
1356
1357 /*
1358 * First we try grab an idle critical section using 128-bit atomics.
1359 */
1360 /** @todo This could be moved up before the recursion check. */
1361 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1362#ifdef RTASM_HAVE_CMP_WRITE_U128
1363 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1364 && pdmCritSectRwIsCmpWriteU128Supported())
1365 {
1366 RTCRITSECTRWSTATE OldState;
1367 OldState.s.u64State = u64State;
1368 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1369 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1370
1371 RTCRITSECTRWSTATE NewState;
1372 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1373 NewState.s.hNativeWriter = hNativeSelf;
1374
1375 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1376 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1377
1378 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1379 }
1380#endif
1381
1382 /*
1383 * Do it step by step. Update the state to reflect our desire.
1384 */
1385 uint64_t u64OldState = u64State;
1386
1387 for (;;)
1388 {
1389 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1390 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
1391 {
1392 /* It flows in the right direction, try follow it before it changes. */
1393 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1394 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1395 c++;
1396 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1397 u64State &= ~RTCSRW_CNT_WR_MASK;
1398 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1399 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1400 break;
1401 }
1402 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1403 {
1404 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1405 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1406 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1407 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1408 break;
1409 }
1410 else if (fTryOnly)
1411 {
1412 /* Wrong direction and we're not supposed to wait, just return. */
1413 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1414 return VERR_SEM_BUSY;
1415 }
1416 else
1417 {
1418 /* Add ourselves to the write count and break out to do the wait. */
1419 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1420 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1421 c++;
1422 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1423 u64State &= ~RTCSRW_CNT_WR_MASK;
1424 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1425 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1426 break;
1427 }
1428
1429 ASMNopPause();
1430
1431 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1432 { /* likely */ }
1433 else
1434 return VERR_SEM_DESTROYED;
1435
1436 ASMNopPause();
1437 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1438 u64OldState = u64State;
1439 }
1440
1441 /*
1442 * If we're in write mode now try grab the ownership. Play fair if there
1443 * are threads already waiting.
1444 */
1445 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1446 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1447 || fTryOnly);
1448 if (fDone)
1449 {
1450 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1451 if (fDone)
1452 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1453 }
1454
1455 /*
1456 * Okay, we have contention and will have to wait unless we're just trying.
1457 */
1458 if (fTryOnly)
1459 {
1460 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1461 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1462 }
1463
1464 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1465
1466 /*
1467 * Ring-3 is pretty straight forward.
1468 */
1469#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1470 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1471#elif defined(IN_RING3)
1472 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1473
1474#elif defined(IN_RING0)
1475 /*
1476 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1477 * account when waiting on contended locks.
1478 */
1479 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1480 if (pVCpu)
1481 {
1482 VMMR0EMTBLOCKCTX Ctx;
1483 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1484 if (rc == VINF_SUCCESS)
1485 {
1486 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1487
1488 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1489
1490 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1491 }
1492 else
1493 {
1494 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1495 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1496 }
1497 return rc;
1498 }
1499
1500 /* Non-EMT. */
1501 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1502 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1503
1504#else
1505# error "Unused."
1506 /*
1507 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1508 */
1509 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1510 if (rcBusy == VINF_SUCCESS)
1511 {
1512 Assert(!fTryOnly);
1513 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1514 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1515 * back to ring-3. Goes for both kind of crit sects. */
1516 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1517 }
1518 return rcBusy;
1519#endif
1520}
1521
1522
1523/**
1524 * Try enter a critical section with exclusive (write) access.
1525 *
1526 * @returns VBox status code.
1527 * @retval VINF_SUCCESS on success.
1528 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1529 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1530 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1531 * during the operation.
1532 *
1533 * @param pVM The cross context VM structure.
1534 * @param pThis Pointer to the read/write critical section.
1535 * @param rcBusy The status code to return when we're in RC or R0 and the
1536 * section is busy. Pass VINF_SUCCESS to acquired the
1537 * critical section thru a ring-3 call if necessary.
1538 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1539 * PDMCritSectRwTryEnterExclDebug,
1540 * PDMCritSectEnterDebug, PDMCritSectEnter,
1541 * RTCritSectRwEnterExcl.
1542 */
1543VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1544{
1545#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1546 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1547#else
1548 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1549 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1550#endif
1551}
1552
1553
1554/**
1555 * Try enter a critical section with exclusive (write) access.
1556 *
1557 * @returns VBox status code.
1558 * @retval VINF_SUCCESS on success.
1559 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1560 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1561 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1562 * during the operation.
1563 *
1564 * @param pVM The cross context VM structure.
1565 * @param pThis Pointer to the read/write critical section.
1566 * @param rcBusy The status code to return when we're in RC or R0 and the
1567 * section is busy. Pass VINF_SUCCESS to acquired the
1568 * critical section thru a ring-3 call if necessary.
1569 * @param uId Where we're entering the section.
1570 * @param SRC_POS The source position.
1571 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1572 * PDMCritSectRwTryEnterExclDebug,
1573 * PDMCritSectEnterDebug, PDMCritSectEnter,
1574 * RTCritSectRwEnterExclDebug.
1575 */
1576VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1577{
1578 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1579#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1580 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1581#else
1582 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1583 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1584#endif
1585}
1586
1587
1588/**
1589 * Try enter a critical section with exclusive (write) access.
1590 *
1591 * @retval VINF_SUCCESS on success.
1592 * @retval VERR_SEM_BUSY if the critsect was owned.
1593 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1594 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1595 * during the operation.
1596 *
1597 * @param pVM The cross context VM structure.
1598 * @param pThis Pointer to the read/write critical section.
1599 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1600 * PDMCritSectRwEnterExclDebug,
1601 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1602 * RTCritSectRwTryEnterExcl.
1603 */
1604VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1605{
1606#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1607 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1608#else
1609 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1610 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1611#endif
1612}
1613
1614
1615/**
1616 * Try enter a critical section with exclusive (write) access.
1617 *
1618 * @retval VINF_SUCCESS on success.
1619 * @retval VERR_SEM_BUSY if the critsect was owned.
1620 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1621 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1622 * during the operation.
1623 *
1624 * @param pVM The cross context VM structure.
1625 * @param pThis Pointer to the read/write critical section.
1626 * @param uId Where we're entering the section.
1627 * @param SRC_POS The source position.
1628 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1629 * PDMCritSectRwEnterExclDebug,
1630 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1631 * RTCritSectRwTryEnterExclDebug.
1632 */
1633VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1634{
1635 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1636#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1637 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1638#else
1639 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1640 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1641#endif
1642}
1643
1644
1645#ifdef IN_RING3
1646/**
1647 * Enters a PDM read/write critical section with exclusive (write) access.
1648 *
1649 * @returns VINF_SUCCESS if entered successfully.
1650 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1651 * during the operation.
1652 *
1653 * @param pVM The cross context VM structure.
1654 * @param pThis Pointer to the read/write critical section.
1655 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1656 */
1657VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1658{
1659 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1660}
1661#endif /* IN_RING3 */
1662
1663
1664/**
1665 * Leave a critical section held exclusively.
1666 *
1667 * @returns VBox status code.
1668 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1669 * during the operation.
1670 * @param pVM The cross context VM structure.
1671 * @param pThis Pointer to the read/write critical section.
1672 * @param fNoVal No validation records (i.e. queued release).
1673 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1674 */
1675static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1676{
1677 /*
1678 * Validate handle.
1679 */
1680 AssertPtr(pThis);
1681 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1682
1683#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1684 NOREF(fNoVal);
1685#endif
1686
1687 /*
1688 * Check ownership.
1689 */
1690 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1691 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1692
1693 RTNATIVETHREAD hNativeWriter;
1694 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1695 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1696
1697
1698 /*
1699 * Unwind one recursion. Not the last?
1700 */
1701 if (pThis->s.Core.cWriteRecursions != 1)
1702 {
1703#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1704 if (fNoVal)
1705 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1706 else
1707 {
1708 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1709 if (RT_FAILURE(rc9))
1710 return rc9;
1711 }
1712#endif
1713#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1714 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1715#else
1716 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1717#endif
1718 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1719 return VINF_SUCCESS;
1720 }
1721
1722
1723 /*
1724 * Final recursion.
1725 */
1726 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1727#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1728 if (fNoVal)
1729 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1730 else
1731 {
1732 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1733 if (RT_FAILURE(rc9))
1734 return rc9;
1735 }
1736#endif
1737
1738
1739#ifdef RTASM_HAVE_CMP_WRITE_U128
1740 /*
1741 * See if we can get out w/o any signalling as this is a common case.
1742 */
1743 if (pdmCritSectRwIsCmpWriteU128Supported())
1744 {
1745 RTCRITSECTRWSTATE OldState;
1746 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1747 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1748 {
1749 OldState.s.hNativeWriter = hNativeSelf;
1750 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1751
1752 RTCRITSECTRWSTATE NewState;
1753 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1754 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1755
1756# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1757 pThis->s.Core.cWriteRecursions = 0;
1758# else
1759 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1760# endif
1761 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1762
1763 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1764 return VINF_SUCCESS;
1765
1766 /* bail out. */
1767 pThis->s.Core.cWriteRecursions = 1;
1768 }
1769 }
1770#endif /* RTASM_HAVE_CMP_WRITE_U128 */
1771
1772
1773#if defined(IN_RING3) || defined(IN_RING0)
1774 /*
1775 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1776 * Ring-0: Try leave for real, depends on host and context.
1777 */
1778# ifdef IN_RING0
1779 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1780 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1781 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1782 || VMMRZCallRing3IsEnabled(pVCpu)
1783 || RTSemEventIsSignalSafe()
1784 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1785 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1786 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1787 )
1788# endif
1789 {
1790# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1791 pThis->s.Core.cWriteRecursions = 0;
1792# else
1793 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1794# endif
1795 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1796 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1797
1798 for (;;)
1799 {
1800 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1801 uint64_t u64OldState = u64State;
1802
1803 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1804 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1805 c--;
1806
1807 if ( c > 0
1808 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1809 {
1810 /*
1811 * Don't change the direction, wake up the next writer if any.
1812 */
1813 u64State &= ~RTCSRW_CNT_WR_MASK;
1814 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1815 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1816 {
1817 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1818 int rc;
1819 if (c == 0)
1820 rc = VINF_SUCCESS;
1821# ifdef IN_RING0
1822 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
1823 {
1824 VMMR0EMTBLOCKCTX Ctx;
1825 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1826 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1827
1828 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1829
1830 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1831 }
1832# endif
1833 else
1834 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1835 AssertRC(rc);
1836 return rc;
1837 }
1838 }
1839 else
1840 {
1841 /*
1842 * Reverse the direction and signal the reader threads.
1843 */
1844 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1845 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1846 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1847 {
1848 Assert(!pThis->s.Core.fNeedReset);
1849 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1850 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1851
1852 int rc;
1853# ifdef IN_RING0
1854 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
1855 {
1856 VMMR0EMTBLOCKCTX Ctx;
1857 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1858 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1859
1860 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1861
1862 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1863 }
1864 else
1865# endif
1866 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1867 AssertRC(rc);
1868 return rc;
1869 }
1870 }
1871
1872 ASMNopPause();
1873 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1874 { /*likely*/ }
1875 else
1876 return VERR_SEM_DESTROYED;
1877 ASMNopPause();
1878 }
1879 /* not reached! */
1880 }
1881#endif /* IN_RING3 || IN_RING0 */
1882
1883
1884#ifndef IN_RING3
1885 /*
1886 * Queue the requested exit for ring-3 execution.
1887 */
1888# ifndef IN_RING0
1889 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1890# endif
1891 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1892 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1893 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1894 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1895 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1896 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1897 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1898 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & HOST_PAGE_OFFSET_MASK)
1899 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
1900 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1901 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1902 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1903 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1904 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1905 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1906 return VINF_SUCCESS;
1907#endif
1908}
1909
1910
1911/**
1912 * Leave a critical section held exclusively.
1913 *
1914 * @returns VBox status code.
1915 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1916 * during the operation.
1917 * @param pVM The cross context VM structure.
1918 * @param pThis Pointer to the read/write critical section.
1919 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1920 */
1921VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1922{
1923 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1924}
1925
1926
1927#if defined(IN_RING3) || defined(IN_RING0)
1928/**
1929 * PDMCritSectBothFF interface.
1930 *
1931 * @param pVM The cross context VM structure.
1932 * @param pThis Pointer to the read/write critical section.
1933 */
1934void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1935{
1936 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1937}
1938#endif
1939
1940
1941/**
1942 * Checks the caller is the exclusive (write) owner of the critical section.
1943 *
1944 * @retval true if owner.
1945 * @retval false if not owner.
1946 * @param pVM The cross context VM structure.
1947 * @param pThis Pointer to the read/write critical section.
1948 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1949 * RTCritSectRwIsWriteOwner.
1950 */
1951VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1952{
1953 /*
1954 * Validate handle.
1955 */
1956 AssertPtr(pThis);
1957 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1958
1959 /*
1960 * Check ownership.
1961 */
1962 RTNATIVETHREAD hNativeWriter;
1963 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1964 if (hNativeWriter == NIL_RTNATIVETHREAD)
1965 return false;
1966 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1967}
1968
1969
1970/**
1971 * Checks if the caller is one of the read owners of the critical section.
1972 *
1973 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1974 * enabled. Meaning, the answer is not trustworhty unless
1975 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1976 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1977 * creating the semaphore. And finally, if you used a locking class,
1978 * don't disable deadlock detection by setting cMsMinDeadlock to
1979 * RT_INDEFINITE_WAIT.
1980 *
1981 * In short, only use this for assertions.
1982 *
1983 * @returns @c true if reader, @c false if not.
1984 * @param pVM The cross context VM structure.
1985 * @param pThis Pointer to the read/write critical section.
1986 * @param fWannaHear What you'd like to hear when lock validation is not
1987 * available. (For avoiding asserting all over the place.)
1988 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1989 */
1990VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
1991{
1992 /*
1993 * Validate handle.
1994 */
1995 AssertPtr(pThis);
1996 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1997
1998 /*
1999 * Inspect the state.
2000 */
2001 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2002 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
2003 {
2004 /*
2005 * It's in write mode, so we can only be a reader if we're also the
2006 * current writer.
2007 */
2008 RTNATIVETHREAD hWriter;
2009 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
2010 if (hWriter == NIL_RTNATIVETHREAD)
2011 return false;
2012 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
2013 }
2014
2015 /*
2016 * Read mode. If there are no current readers, then we cannot be a reader.
2017 */
2018 if (!(u64State & RTCSRW_CNT_RD_MASK))
2019 return false;
2020
2021#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
2022 /*
2023 * Ask the lock validator.
2024 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
2025 */
2026 NOREF(fWannaHear);
2027 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
2028#else
2029 /*
2030 * Ok, we don't know, just tell the caller what he want to hear.
2031 */
2032 return fWannaHear;
2033#endif
2034}
2035
2036
2037/**
2038 * Gets the write recursion count.
2039 *
2040 * @returns The write recursion count (0 if bad critsect).
2041 * @param pThis Pointer to the read/write critical section.
2042 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2043 * RTCritSectRwGetWriteRecursion.
2044 */
2045VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
2046{
2047 /*
2048 * Validate handle.
2049 */
2050 AssertPtr(pThis);
2051 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2052
2053 /*
2054 * Return the requested data.
2055 */
2056 return pThis->s.Core.cWriteRecursions;
2057}
2058
2059
2060/**
2061 * Gets the read recursion count of the current writer.
2062 *
2063 * @returns The read recursion count (0 if bad critsect).
2064 * @param pThis Pointer to the read/write critical section.
2065 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2066 * RTCritSectRwGetWriterReadRecursion.
2067 */
2068VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
2069{
2070 /*
2071 * Validate handle.
2072 */
2073 AssertPtr(pThis);
2074 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2075
2076 /*
2077 * Return the requested data.
2078 */
2079 return pThis->s.Core.cWriterReads;
2080}
2081
2082
2083/**
2084 * Gets the current number of reads.
2085 *
2086 * This includes all read recursions, so it might be higher than the number of
2087 * read owners. It does not include reads done by the current writer.
2088 *
2089 * @returns The read count (0 if bad critsect).
2090 * @param pThis Pointer to the read/write critical section.
2091 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2092 * RTCritSectRwGetReadCount.
2093 */
2094VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
2095{
2096 /*
2097 * Validate input.
2098 */
2099 AssertPtr(pThis);
2100 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2101
2102 /*
2103 * Return the requested data.
2104 */
2105 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2106 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
2107 return 0;
2108 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
2109}
2110
2111
2112/**
2113 * Checks if the read/write critical section is initialized or not.
2114 *
2115 * @retval true if initialized.
2116 * @retval false if not initialized.
2117 * @param pThis Pointer to the read/write critical section.
2118 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2119 */
2120VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
2121{
2122 AssertPtr(pThis);
2123 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
2124}
2125
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette