VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 107631

Last change on this file since 107631 was 107609, checked in by vboxsync, 10 days ago

VMM/PDMCritSectRw,IPRT/RTCritSectRw: Fixed harmless issue in the enter-exclusive path. bugref:3409

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 84.0 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 107609 2025-01-09 20:19:40Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
33#include "PDMInternal.h"
34#include <VBox/vmm/pdmcritsectrw.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/err.h>
39#include <VBox/vmm/hm.h>
40
41#include <VBox/log.h>
42#include <iprt/asm.h>
43#include <iprt/assert.h>
44#ifdef IN_RING3
45# include <iprt/lockvalidator.h>
46#endif
47#if defined(IN_RING3) || defined(IN_RING0)
48# include <iprt/semaphore.h>
49# include <iprt/thread.h>
50#endif
51#ifdef IN_RING0
52# include <iprt/time.h>
53#endif
54#ifdef RT_ARCH_AMD64
55# include <iprt/x86.h>
56#endif
57
58
59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
62#if 0 /* unused */
63/** The number loops to spin for shared access in ring-3. */
64#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
65/** The number loops to spin for shared access in ring-0. */
66#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
67/** The number loops to spin for shared access in the raw-mode context. */
68#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
69
70/** The number loops to spin for exclusive access in ring-3. */
71#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
72/** The number loops to spin for exclusive access in ring-0. */
73#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
74/** The number loops to spin for exclusive access in the raw-mode context. */
75#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
76#endif
77
78/** Max number of write or write/read recursions. */
79#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
80
81/** Skips some of the overly paranoid atomic reads and updates.
82 * Makes some assumptions about cache coherence, though not brave enough not to
83 * always end with an atomic update. */
84#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
85
86/** For reading RTCRITSECTRWSTATE::s::u64State. */
87#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
88# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
89#else
90# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
91#endif
92
93
94/* Undefine the automatic VBOX_STRICT API mappings. */
95#undef PDMCritSectRwEnterExcl
96#undef PDMCritSectRwTryEnterExcl
97#undef PDMCritSectRwEnterShared
98#undef PDMCritSectRwTryEnterShared
99
100
101/*********************************************************************************************************************************
102* Defined Constants And Macros *
103*********************************************************************************************************************************/
104#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
105static int32_t g_fCmpWriteSupported = -1;
106#endif
107
108
109/*********************************************************************************************************************************
110* Internal Functions *
111*********************************************************************************************************************************/
112static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
113
114
115#ifdef RTASM_HAVE_CMP_WRITE_U128
116
117# ifdef RT_ARCH_AMD64
118/**
119 * Called once to initialize g_fCmpWriteSupported.
120 */
121DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
122{
123 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
124 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
125 return fCmpWriteSupported;
126}
127# endif
128
129
130/**
131 * Indicates whether hardware actually supports 128-bit compare & write.
132 */
133DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
134{
135# ifdef RT_ARCH_AMD64
136 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
137 if (RT_LIKELY(fCmpWriteSupported >= 0))
138 return fCmpWriteSupported != 0;
139 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
140# else
141 return true;
142# endif
143}
144
145#endif /* RTASM_HAVE_CMP_WRITE_U128 */
146
147/**
148 * Gets the ring-3 native thread handle of the calling thread.
149 *
150 * @returns native thread handle (ring-3).
151 * @param pVM The cross context VM structure.
152 * @param pThis The read/write critical section. This is only used in
153 * R0 and RC.
154 */
155DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
156{
157#ifdef IN_RING3
158 RT_NOREF(pVM, pThis);
159 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
160
161#elif defined(IN_RING0)
162 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
163 NIL_RTNATIVETHREAD);
164 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
165 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
166
167#else
168# error "invalid context"
169#endif
170 return hNativeSelf;
171}
172
173
174DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
175{
176 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
177 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
178 return VERR_PDM_CRITSECTRW_IPE;
179}
180
181
182
183#ifdef IN_RING3
184/**
185 * Changes the lock validator sub-class of the read/write critical section.
186 *
187 * It is recommended to try make sure that nobody is using this critical section
188 * while changing the value.
189 *
190 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
191 * lock validator isn't compiled in or either of the parameters are
192 * invalid.
193 * @param pThis Pointer to the read/write critical section.
194 * @param uSubClass The new sub-class value.
195 */
196VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
197{
198 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
199 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
200# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
201 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
202
203 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
204 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
205# else
206 NOREF(uSubClass);
207 return RTLOCKVAL_SUB_CLASS_INVALID;
208# endif
209}
210#endif /* IN_RING3 */
211
212
213/**
214 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
215 */
216DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
217 bool fNoVal, RTTHREAD hThreadSelf)
218{
219#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
220 if (!fNoVal)
221 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
222#else
223 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
224#endif
225
226 /* got it! */
227 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
228 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
229 return VINF_SUCCESS;
230}
231
232/**
233 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
234 * that decrement the wait count and maybe resets the semaphore.
235 */
236DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
237 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
238{
239 for (;;)
240 {
241 uint64_t const u64OldState = u64State;
242 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
243 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
244 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
245 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
246 cWait--;
247 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
248 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
249
250 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
251 {
252 if (cWait == 0)
253 {
254 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
255 {
256 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
257 AssertRCReturn(rc, rc);
258 }
259 }
260 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
261 }
262
263 ASMNopPause();
264 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
265 ASMNopPause();
266
267 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
268 }
269 /* not reached */
270}
271
272
273#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
274/**
275 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
276 * and returns @a rc.
277 *
278 * @note May return VINF_SUCCESS if we race the exclusive leave function and
279 * come out on the bottom.
280 *
281 * Ring-3 only calls in a case where it is _not_ acceptable to take the
282 * lock, so even if we get the lock we'll have to leave. In the ring-0
283 * contexts, we can safely return VINF_SUCCESS in case of a race.
284 */
285DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
286 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
287{
288#ifdef IN_RING0
289 uint64_t const tsStart = RTTimeNanoTS();
290 uint64_t cNsElapsed = 0;
291#endif
292 for (;;)
293 {
294 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
295 uint64_t u64OldState = u64State;
296
297 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
298 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
299 cWait--;
300
301 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
302 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
303
304 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
305 {
306 c--;
307 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
308 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
309 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
310 return rc;
311 }
312 else
313 {
314 /*
315 * The direction changed, so we can actually get the lock now.
316 *
317 * This means that we _have_ to wait on the semaphore to be signalled
318 * so we can properly reset it. Otherwise the stuff gets out of wack,
319 * because signalling and resetting will race one another. An
320 * exception would be if we're not the last reader waiting and don't
321 * need to worry about the resetting.
322 *
323 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
324 * but that would still leave a racing PDMCritSectRwEnterShared
325 * spinning hard for a little bit, which isn't great...
326 */
327 if (cWait == 0)
328 {
329# ifdef IN_RING0
330 /* Do timeout processing first to avoid redoing the above. */
331 uint32_t cMsWait;
332 if (cNsElapsed <= RT_NS_10SEC)
333 cMsWait = 32;
334 else
335 {
336 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
337 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
338 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
339 {
340 LogFunc(("%p: giving up\n", pThis));
341 return rc;
342 }
343 cMsWait = 2;
344 }
345
346 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
347 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
348 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
349# else
350 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
351 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
352 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
353# endif
354 if (rcWait == VINF_SUCCESS)
355 {
356# ifdef IN_RING0
357 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
358# else
359 /* ring-3: Cannot return VINF_SUCCESS. */
360 Assert(RT_FAILURE_NP(rc));
361 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
362 if (RT_SUCCESS(rc2))
363 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
364 return rc;
365# endif
366 }
367 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
368 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
369 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
370 }
371 else
372 {
373 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
374 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
375 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
376 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
377 }
378
379# ifdef IN_RING0
380 /* Calculate the elapsed time here to avoid redoing state work. */
381 cNsElapsed = RTTimeNanoTS() - tsStart;
382# endif
383 }
384
385 ASMNopPause();
386 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
387 ASMNopPause();
388 }
389}
390#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
391
392
393/**
394 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
395 * Caller has already added us to the read and read-wait counters.
396 */
397static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
398 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
399{
400 PSUPDRVSESSION const pSession = pVM->pSession;
401 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
402# ifdef IN_RING0
403 uint64_t const tsStart = RTTimeNanoTS();
404 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
405 uint64_t cNsMaxTotal = cNsMaxTotalDef;
406 uint32_t cMsMaxOne = RT_MS_5SEC;
407 bool fNonInterruptible = false;
408# endif
409
410 for (uint32_t iLoop = 0; ; iLoop++)
411 {
412 /*
413 * Wait for the direction to switch.
414 */
415 int rc;
416# ifdef IN_RING3
417# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
418 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
419 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
420 if (RT_FAILURE(rc))
421 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
422# else
423 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
424# endif
425# endif
426
427 for (;;)
428 {
429 /*
430 * We always wait with a timeout so we can re-check the structure sanity
431 * and not get stuck waiting on a corrupt or deleted section.
432 */
433# ifdef IN_RING3
434 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
435# else
436 rc = !fNonInterruptible
437 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
438 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
439 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
440 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
441# endif
442 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
443 { /* likely */ }
444 else
445 {
446# ifdef IN_RING3
447 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
448# endif
449 return VERR_SEM_DESTROYED;
450 }
451 if (RT_LIKELY(rc == VINF_SUCCESS))
452 break;
453
454 /*
455 * Timeout and interrupted waits needs careful handling in ring-0
456 * because we're cooperating with ring-3 on this critical section
457 * and thus need to make absolutely sure we won't get stuck here.
458 *
459 * The r0 interrupted case means something is pending (termination,
460 * signal, APC, debugger, whatever), so we must try our best to
461 * return to the caller and to ring-3 so it can be dealt with.
462 */
463 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
464 {
465# ifdef IN_RING0
466 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
467 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
468 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
469 ("rcTerm=%Rrc\n", rcTerm));
470 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
471 cNsMaxTotal = RT_NS_1MIN;
472
473 if (rc == VERR_TIMEOUT)
474 {
475 /* Try return get out of here with a non-VINF_SUCCESS status if
476 the thread is terminating or if the timeout has been exceeded. */
477 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
478 if ( rcTerm == VINF_THREAD_IS_TERMINATING
479 || cNsElapsed > cNsMaxTotal)
480 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
481 pSrcPos, fNoVal, hThreadSelf);
482 }
483 else
484 {
485 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
486 we will try non-interruptible sleep for a while to help resolve the issue
487 w/o guru'ing. */
488 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
489 if ( rcTerm != VINF_THREAD_IS_TERMINATING
490 && rcBusy == VINF_SUCCESS
491 && pVCpu != NULL
492 && cNsElapsed <= cNsMaxTotal)
493 {
494 if (!fNonInterruptible)
495 {
496 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
497 fNonInterruptible = true;
498 cMsMaxOne = 32;
499 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
500 if (cNsLeft > RT_NS_10SEC)
501 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
502 }
503 }
504 else
505 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
506 pSrcPos, fNoVal, hThreadSelf);
507 }
508# else /* IN_RING3 */
509 RT_NOREF(pVM, pVCpu, rcBusy);
510# endif /* IN_RING3 */
511 }
512 /*
513 * Any other return code is fatal.
514 */
515 else
516 {
517# ifdef IN_RING3
518 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
519# endif
520 AssertMsgFailed(("rc=%Rrc\n", rc));
521 return RT_FAILURE_NP(rc) ? rc : -rc;
522 }
523 }
524
525# ifdef IN_RING3
526 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
527# endif
528
529 /*
530 * Check the direction.
531 */
532 Assert(pThis->s.Core.fNeedReset);
533 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
534 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
535 {
536 /*
537 * Decrement the wait count and maybe reset the semaphore (if we're last).
538 */
539 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
540 }
541
542 AssertMsg(iLoop < 1,
543 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
544 RTThreadYield();
545 }
546
547 /* not reached */
548}
549
550
551/**
552 * Worker that enters a read/write critical section with shard access.
553 *
554 * @returns VBox status code.
555 * @param pVM The cross context VM structure.
556 * @param pThis Pointer to the read/write critical section.
557 * @param rcBusy The busy return code for ring-0 and ring-3.
558 * @param fTryOnly Only try enter it, don't wait.
559 * @param pSrcPos The source position. (Can be NULL.)
560 * @param fNoVal No validation records.
561 */
562static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
563 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
564{
565 /*
566 * Validate input.
567 */
568 AssertPtr(pThis);
569 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
570
571#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
572 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
573 if (!fTryOnly)
574 {
575 int rc9;
576 RTNATIVETHREAD hNativeWriter;
577 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
578 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
579 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
580 else
581 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
582 if (RT_FAILURE(rc9))
583 return rc9;
584 }
585#else
586 RTTHREAD hThreadSelf = NIL_RTTHREAD;
587#endif
588
589 /*
590 * Work the state.
591 */
592 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
593 uint64_t u64OldState = u64State;
594 for (;;)
595 {
596 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
597 {
598 /* It flows in the right direction, try follow it before it changes. */
599 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
600 c++;
601 Assert(c < RTCSRW_CNT_MASK / 4);
602 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
603 u64State &= ~RTCSRW_CNT_RD_MASK;
604 u64State |= c << RTCSRW_CNT_RD_SHIFT;
605 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
606 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
607 }
608 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
609 {
610 /* Wrong direction, but we're alone here and can simply try switch the direction. */
611 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
612 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
613 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
614 {
615 Assert(!pThis->s.Core.fNeedReset);
616 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
617 }
618 }
619 else
620 {
621 /* Is the writer perhaps doing a read recursion? */
622 RTNATIVETHREAD hNativeWriter;
623 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
624 if (hNativeWriter != NIL_RTNATIVETHREAD)
625 {
626 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
627 if (hNativeSelf == hNativeWriter)
628 {
629#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
630 if (!fNoVal)
631 {
632 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
633 if (RT_FAILURE(rc9))
634 return rc9;
635 }
636#endif
637 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
638 Assert(cReads < _16K);
639 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
640 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
641 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
642 return VINF_SUCCESS; /* don't break! */
643 }
644 }
645
646 /*
647 * If we're only trying, return already.
648 */
649 if (fTryOnly)
650 {
651 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
652 return VERR_SEM_BUSY;
653 }
654
655#if defined(IN_RING3) || defined(IN_RING0)
656 /*
657 * Add ourselves to the queue and wait for the direction to change.
658 */
659 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
660 c++;
661 Assert(c < RTCSRW_CNT_MASK / 2);
662 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
663
664 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
665 cWait++;
666 Assert(cWait <= c);
667 Assert(cWait < RTCSRW_CNT_MASK / 2);
668 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
669
670 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
671 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
672
673 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
674 {
675 /*
676 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
677 */
678# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
679 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
680# elif defined(IN_RING3)
681 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
682# else /* IN_RING0 */
683 /*
684 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
685 * account when waiting on contended locks.
686 */
687 PVMCPUCC pVCpu = VMMGetCpu(pVM);
688 if (pVCpu)
689 {
690 VMMR0EMTBLOCKCTX Ctx;
691 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
692 if (rc == VINF_SUCCESS)
693 {
694 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
695
696 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
697
698 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
699 }
700 else
701 {
702 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
703 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
704 }
705 return rc;
706 }
707
708 /* Non-EMT. */
709 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
710 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
711# endif /* IN_RING0 */
712 }
713
714#else /* !IN_RING3 && !IN_RING0 */
715 /*
716 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
717 * back to ring-3 and do it there or return rcBusy.
718 */
719# error "Unused code."
720 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
721 if (rcBusy == VINF_SUCCESS)
722 {
723 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
724 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
725 * back to ring-3. Goes for both kind of crit sects. */
726 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
727 }
728 return rcBusy;
729#endif /* !IN_RING3 && !IN_RING0 */
730 }
731
732 ASMNopPause();
733 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
734 { /* likely */ }
735 else
736 return VERR_SEM_DESTROYED;
737 ASMNopPause();
738
739 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
740 u64OldState = u64State;
741 }
742 /* not reached */
743}
744
745
746/**
747 * Enter a critical section with shared (read) access.
748 *
749 * @returns VBox status code.
750 * @retval VINF_SUCCESS on success.
751 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
752 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
753 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
754 * during the operation.
755 *
756 * @param pVM The cross context VM structure.
757 * @param pThis Pointer to the read/write critical section.
758 * @param rcBusy The status code to return when we're in RC or R0 and the
759 * section is busy. Pass VINF_SUCCESS to acquired the
760 * critical section thru a ring-3 call if necessary.
761 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
762 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
763 * RTCritSectRwEnterShared.
764 */
765VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
766{
767#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
768 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
769#else
770 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
771 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
772#endif
773}
774
775
776/**
777 * Enter a critical section with shared (read) access.
778 *
779 * @returns VBox status code.
780 * @retval VINF_SUCCESS on success.
781 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
782 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
783 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
784 * during the operation.
785 *
786 * @param pVM The cross context VM structure.
787 * @param pThis Pointer to the read/write critical section.
788 * @param rcBusy The status code to return when we're in RC or R0 and the
789 * section is busy. Pass VINF_SUCCESS to acquired the
790 * critical section thru a ring-3 call if necessary.
791 * @param uId Where we're entering the section.
792 * @param SRC_POS The source position.
793 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
794 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
795 * RTCritSectRwEnterSharedDebug.
796 */
797VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
798{
799 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
800#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
801 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
802#else
803 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
804 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
805#endif
806}
807
808
809/**
810 * Try enter a critical section with shared (read) access.
811 *
812 * @returns VBox status code.
813 * @retval VINF_SUCCESS on success.
814 * @retval VERR_SEM_BUSY if the critsect was owned.
815 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
816 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
817 * during the operation.
818 *
819 * @param pVM The cross context VM structure.
820 * @param pThis Pointer to the read/write critical section.
821 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
822 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
823 * RTCritSectRwTryEnterShared.
824 */
825VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
826{
827#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
828 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
829#else
830 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
831 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
832#endif
833}
834
835
836/**
837 * Try enter a critical section with shared (read) access.
838 *
839 * @returns VBox status code.
840 * @retval VINF_SUCCESS on success.
841 * @retval VERR_SEM_BUSY if the critsect was owned.
842 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
843 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
844 * during the operation.
845 *
846 * @param pVM The cross context VM structure.
847 * @param pThis Pointer to the read/write critical section.
848 * @param uId Where we're entering the section.
849 * @param SRC_POS The source position.
850 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
851 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
852 * RTCritSectRwTryEnterSharedDebug.
853 */
854VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
855{
856 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
857#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
858 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
859#else
860 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
861 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
862#endif
863}
864
865
866#ifdef IN_RING3
867/**
868 * Enters a PDM read/write critical section with shared (read) access.
869 *
870 * @returns VINF_SUCCESS if entered successfully.
871 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
872 * during the operation.
873 *
874 * @param pVM The cross context VM structure.
875 * @param pThis Pointer to the read/write critical section.
876 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
877 */
878VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
879{
880 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
881}
882#endif
883
884
885/**
886 * Leave a critical section held with shared access.
887 *
888 * @returns VBox status code.
889 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
890 * during the operation.
891 * @param pVM The cross context VM structure.
892 * @param pThis Pointer to the read/write critical section.
893 * @param fNoVal No validation records (i.e. queued release).
894 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
895 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
896 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
897 */
898static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
899{
900 /*
901 * Validate handle.
902 */
903 AssertPtr(pThis);
904 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
905
906#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
907 NOREF(fNoVal);
908#endif
909
910 /*
911 * Check the direction and take action accordingly.
912 */
913#ifdef IN_RING0
914 PVMCPUCC pVCpu = NULL;
915#endif
916 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
917 uint64_t u64OldState = u64State;
918 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
919 {
920#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
921 if (fNoVal)
922 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
923 else
924 {
925 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
926 if (RT_FAILURE(rc9))
927 return rc9;
928 }
929#endif
930 for (;;)
931 {
932 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
933 AssertReturn(c > 0, VERR_NOT_OWNER);
934 c--;
935
936 if ( c > 0
937 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
938 {
939 /* Don't change the direction. */
940 u64State &= ~RTCSRW_CNT_RD_MASK;
941 u64State |= c << RTCSRW_CNT_RD_SHIFT;
942 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
943 break;
944 }
945 else
946 {
947#if defined(IN_RING3) || defined(IN_RING0)
948# ifdef IN_RING0
949 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
950 if (!pVCpu)
951 pVCpu = VMMGetCpu(pVM);
952 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
953 || VMMRZCallRing3IsEnabled(pVCpu)
954 || RTSemEventIsSignalSafe()
955 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
956 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
957 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
958 )
959# endif
960 {
961 /* Reverse the direction and signal the writer threads. */
962 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
963 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
964 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
965 {
966 int rc;
967# ifdef IN_RING0
968 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
969 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
970 {
971 VMMR0EMTBLOCKCTX Ctx;
972 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
973 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
974
975 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
976
977 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
978 }
979 else
980# endif
981 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
982 AssertRC(rc);
983 return rc;
984 }
985 }
986#endif /* IN_RING3 || IN_RING0 */
987#ifndef IN_RING3
988# ifdef IN_RING0
989 else
990# endif
991 {
992 /* Queue the exit request (ring-3). */
993# ifndef IN_RING0
994 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
995# endif
996 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
997 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
998 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
999 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1000 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
1001 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1002 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
1003 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & HOST_PAGE_OFFSET_MASK)
1004 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
1005 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
1006 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
1007 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1008 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1009 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1010 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
1011 break;
1012 }
1013#endif
1014 }
1015
1016 ASMNopPause();
1017 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1018 { }
1019 else
1020 return VERR_SEM_DESTROYED;
1021 ASMNopPause();
1022
1023 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1024 u64OldState = u64State;
1025 }
1026 }
1027 else
1028 {
1029 /*
1030 * Write direction. Check that it's the owner calling and that it has reads to undo.
1031 */
1032 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1033 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1034
1035 RTNATIVETHREAD hNativeWriter;
1036 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1037 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1038 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
1039#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1040 if (!fNoVal)
1041 {
1042 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1043 if (RT_FAILURE(rc))
1044 return rc;
1045 }
1046#endif
1047 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
1048 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
1049 }
1050
1051 return VINF_SUCCESS;
1052}
1053
1054
1055/**
1056 * Leave a critical section held with shared access.
1057 *
1058 * @returns VBox status code.
1059 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1060 * during the operation.
1061 * @param pVM The cross context VM structure.
1062 * @param pThis Pointer to the read/write critical section.
1063 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1064 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1065 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1066 */
1067VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
1068{
1069 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
1070}
1071
1072
1073#if defined(IN_RING3) || defined(IN_RING0)
1074/**
1075 * PDMCritSectBothFF interface.
1076 *
1077 * @param pVM The cross context VM structure.
1078 * @param pThis Pointer to the read/write critical section.
1079 */
1080void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1081{
1082 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
1083}
1084#endif
1085
1086
1087/**
1088 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1089 *
1090 * @returns @a rc unless corrupted.
1091 * @param pThis Pointer to the read/write critical section.
1092 * @param rc The status to return.
1093 */
1094DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1095{
1096 /*
1097 * Decrement the counts and return the error.
1098 */
1099 for (;;)
1100 {
1101 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1102 uint64_t const u64OldState = u64State;
1103 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1104 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1105 c--;
1106 u64State &= ~RTCSRW_CNT_WR_MASK;
1107 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1108 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1109 return rc;
1110
1111 ASMNopPause();
1112 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1113 ASMNopPause();
1114 }
1115}
1116
1117
1118/**
1119 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1120 * gotten exclusive ownership of the critical section.
1121 */
1122DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1123 bool fNoVal, RTTHREAD hThreadSelf)
1124{
1125 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
1126 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1127
1128#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1129 pThis->s.Core.cWriteRecursions = 1;
1130#else
1131 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1132#endif
1133 Assert(pThis->s.Core.cWriterReads == 0);
1134
1135#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1136 if (!fNoVal)
1137 {
1138 if (hThreadSelf == NIL_RTTHREAD)
1139 hThreadSelf = RTThreadSelfAutoAdopt();
1140 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1141 }
1142#endif
1143 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1144 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1145 return VINF_SUCCESS;
1146}
1147
1148
1149#if defined(IN_RING3) || defined(IN_RING0)
1150/**
1151 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1152 * contended.
1153 */
1154static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
1155 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
1156{
1157 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
1158
1159 PSUPDRVSESSION const pSession = pVM->pSession;
1160 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1161# ifdef IN_RING0
1162 uint64_t const tsStart = RTTimeNanoTS();
1163 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
1164 uint64_t cNsMaxTotal = cNsMaxTotalDef;
1165 uint32_t cMsMaxOne = RT_MS_5SEC;
1166 bool fNonInterruptible = false;
1167# endif
1168
1169 for (uint32_t iLoop = 0; ; iLoop++)
1170 {
1171 /*
1172 * Wait for our turn.
1173 */
1174 int rc;
1175# ifdef IN_RING3
1176# ifdef PDMCRITSECTRW_STRICT
1177 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1178 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1179 if (RT_SUCCESS(rc))
1180 { /* likely */ }
1181 else
1182 return pdmCritSectRwEnterExclBailOut(pThis, rc);
1183# else
1184 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1185# endif
1186# endif
1187
1188 for (;;)
1189 {
1190 /*
1191 * We always wait with a timeout so we can re-check the structure sanity
1192 * and not get stuck waiting on a corrupt or deleted section.
1193 */
1194# ifdef IN_RING3
1195 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1196# else
1197 rc = !fNonInterruptible
1198 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1199 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1200 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
1201 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
1202# endif
1203 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1204 { /* likely */ }
1205 else
1206 {
1207# ifdef IN_RING3
1208 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1209# endif
1210 return VERR_SEM_DESTROYED;
1211 }
1212 if (RT_LIKELY(rc == VINF_SUCCESS))
1213 break;
1214
1215 /*
1216 * Timeout and interrupted waits needs careful handling in ring-0
1217 * because we're cooperating with ring-3 on this critical section
1218 * and thus need to make absolutely sure we won't get stuck here.
1219 *
1220 * The r0 interrupted case means something is pending (termination,
1221 * signal, APC, debugger, whatever), so we must try our best to
1222 * return to the caller and to ring-3 so it can be dealt with.
1223 */
1224 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
1225 {
1226# ifdef IN_RING0
1227 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1228 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1229 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1230 ("rcTerm=%Rrc\n", rcTerm));
1231 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
1232 cNsMaxTotal = RT_NS_1MIN;
1233
1234 if (rc == VERR_TIMEOUT)
1235 {
1236 /* Try return get out of here with a non-VINF_SUCCESS status if
1237 the thread is terminating or if the timeout has been exceeded. */
1238 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1239 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1240 || cNsElapsed > cNsMaxTotal)
1241 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1242 }
1243 else
1244 {
1245 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1246 we will try non-interruptible sleep for a while to help resolve the issue
1247 w/o guru'ing. */
1248 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1249 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1250 && rcBusy == VINF_SUCCESS
1251 && pVCpu != NULL
1252 && cNsElapsed <= cNsMaxTotal)
1253 {
1254 if (!fNonInterruptible)
1255 {
1256 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1257 fNonInterruptible = true;
1258 cMsMaxOne = 32;
1259 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1260 if (cNsLeft > RT_NS_10SEC)
1261 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1262 }
1263 }
1264 else
1265 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1266 }
1267# else /* IN_RING3 */
1268 RT_NOREF(pVM, pVCpu, rcBusy);
1269# endif /* IN_RING3 */
1270 }
1271 /*
1272 * Any other return code is fatal.
1273 */
1274 else
1275 {
1276# ifdef IN_RING3
1277 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1278# endif
1279 AssertMsgFailed(("rc=%Rrc\n", rc));
1280 return RT_FAILURE_NP(rc) ? rc : -rc;
1281 }
1282 }
1283
1284# ifdef IN_RING3
1285 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1286# endif
1287
1288 /*
1289 * Try take exclusive write ownership.
1290 */
1291 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1292 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1293 {
1294 bool fDone;
1295 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1296 if (fDone)
1297 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1298 }
1299 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1300 }
1301}
1302#endif /* IN_RING3 || IN_RING0 */
1303
1304
1305/**
1306 * Worker that enters a read/write critical section with exclusive access.
1307 *
1308 * @returns VBox status code.
1309 * @param pVM The cross context VM structure.
1310 * @param pThis Pointer to the read/write critical section.
1311 * @param rcBusy The busy return code for ring-0 and ring-3.
1312 * @param fTryOnly Only try enter it, don't wait.
1313 * @param pSrcPos The source position. (Can be NULL.)
1314 * @param fNoVal No validation records.
1315 */
1316static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1317 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1318{
1319 /*
1320 * Validate input.
1321 */
1322 AssertPtr(pThis);
1323 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1324
1325 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1326#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1327 if (!fTryOnly)
1328 {
1329 hThreadSelf = RTThreadSelfAutoAdopt();
1330 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1331 if (RT_FAILURE(rc9))
1332 return rc9;
1333 }
1334#endif
1335
1336 /*
1337 * Check if we're already the owner and just recursing.
1338 */
1339 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1340 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1341 RTNATIVETHREAD hNativeWriter;
1342 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1343 if (hNativeSelf == hNativeWriter)
1344 {
1345 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1346#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1347 if (!fNoVal)
1348 {
1349 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1350 if (RT_FAILURE(rc9))
1351 return rc9;
1352 }
1353#endif
1354 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1355#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1356 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1357#else
1358 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1359#endif
1360 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1361 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1362 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1363 return VINF_SUCCESS;
1364 }
1365
1366 /*
1367 * First we try grab an idle critical section using 128-bit atomics.
1368 */
1369 /** @todo This could be moved up before the recursion check. */
1370 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1371#ifdef RTASM_HAVE_CMP_WRITE_U128
1372 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1373 && pdmCritSectRwIsCmpWriteU128Supported())
1374 {
1375 RTCRITSECTRWSTATE OldState;
1376 OldState.s.u64State = u64State;
1377 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1378 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1379
1380 RTCRITSECTRWSTATE NewState;
1381 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1382 NewState.s.hNativeWriter = hNativeSelf;
1383
1384 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1385 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1386
1387 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1388 }
1389#endif
1390
1391 /*
1392 * Do it step by step. Update the state to reflect our desire.
1393 */
1394 uint64_t u64OldState = u64State;
1395
1396 for (;;)
1397 {
1398 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1399 {
1400 /* It flows in the right direction, try follow it before it changes. */
1401 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1402 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1403 c++;
1404 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1405 u64State &= ~RTCSRW_CNT_WR_MASK;
1406 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1407 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1408 break;
1409 }
1410 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1411 {
1412 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1413 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1414 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1415 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1416 break;
1417 }
1418 else if (fTryOnly)
1419 {
1420 /* Wrong direction and we're not supposed to wait, just return. */
1421 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1422 return VERR_SEM_BUSY;
1423 }
1424 else
1425 {
1426 /* Add ourselves to the write count and break out to do the wait. */
1427 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1428 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1429 c++;
1430 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1431 u64State &= ~RTCSRW_CNT_WR_MASK;
1432 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1433 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1434 break;
1435 }
1436
1437 ASMNopPause();
1438
1439 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1440 { /* likely */ }
1441 else
1442 return VERR_SEM_DESTROYED;
1443
1444 ASMNopPause();
1445 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1446 u64OldState = u64State;
1447 }
1448
1449 /*
1450 * If we're in write mode now try grab the ownership. Play fair if there
1451 * are threads already waiting.
1452 */
1453 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1454 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1455 || fTryOnly);
1456 if (fDone)
1457 {
1458 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1459 if (fDone)
1460 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1461 }
1462
1463 /*
1464 * Okay, we have contention and will have to wait unless we're just trying.
1465 */
1466 if (fTryOnly)
1467 {
1468 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1469 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1470 }
1471
1472 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1473
1474 /*
1475 * Ring-3 is pretty straight forward.
1476 */
1477#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1478 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1479#elif defined(IN_RING3)
1480 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1481
1482#elif defined(IN_RING0)
1483 /*
1484 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1485 * account when waiting on contended locks.
1486 */
1487 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1488 if (pVCpu)
1489 {
1490 VMMR0EMTBLOCKCTX Ctx;
1491 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1492 if (rc == VINF_SUCCESS)
1493 {
1494 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1495
1496 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1497
1498 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1499 }
1500 else
1501 {
1502 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1503 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1504 }
1505 return rc;
1506 }
1507
1508 /* Non-EMT. */
1509 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1510 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1511
1512#else
1513# error "Unused."
1514 /*
1515 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1516 */
1517 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1518 if (rcBusy == VINF_SUCCESS)
1519 {
1520 Assert(!fTryOnly);
1521 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1522 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1523 * back to ring-3. Goes for both kind of crit sects. */
1524 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1525 }
1526 return rcBusy;
1527#endif
1528}
1529
1530
1531/**
1532 * Try enter a critical section with exclusive (write) access.
1533 *
1534 * @returns VBox status code.
1535 * @retval VINF_SUCCESS on success.
1536 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1537 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1538 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1539 * during the operation.
1540 *
1541 * @param pVM The cross context VM structure.
1542 * @param pThis Pointer to the read/write critical section.
1543 * @param rcBusy The status code to return when we're in RC or R0 and the
1544 * section is busy. Pass VINF_SUCCESS to acquired the
1545 * critical section thru a ring-3 call if necessary.
1546 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1547 * PDMCritSectRwTryEnterExclDebug,
1548 * PDMCritSectEnterDebug, PDMCritSectEnter,
1549 * RTCritSectRwEnterExcl.
1550 */
1551VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1552{
1553#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1554 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1555#else
1556 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1557 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1558#endif
1559}
1560
1561
1562/**
1563 * Try enter a critical section with exclusive (write) access.
1564 *
1565 * @returns VBox status code.
1566 * @retval VINF_SUCCESS on success.
1567 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1568 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1569 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1570 * during the operation.
1571 *
1572 * @param pVM The cross context VM structure.
1573 * @param pThis Pointer to the read/write critical section.
1574 * @param rcBusy The status code to return when we're in RC or R0 and the
1575 * section is busy. Pass VINF_SUCCESS to acquired the
1576 * critical section thru a ring-3 call if necessary.
1577 * @param uId Where we're entering the section.
1578 * @param SRC_POS The source position.
1579 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1580 * PDMCritSectRwTryEnterExclDebug,
1581 * PDMCritSectEnterDebug, PDMCritSectEnter,
1582 * RTCritSectRwEnterExclDebug.
1583 */
1584VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1585{
1586 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1587#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1588 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1589#else
1590 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1591 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1592#endif
1593}
1594
1595
1596/**
1597 * Try enter a critical section with exclusive (write) access.
1598 *
1599 * @retval VINF_SUCCESS on success.
1600 * @retval VERR_SEM_BUSY if the critsect was owned.
1601 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1602 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1603 * during the operation.
1604 *
1605 * @param pVM The cross context VM structure.
1606 * @param pThis Pointer to the read/write critical section.
1607 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1608 * PDMCritSectRwEnterExclDebug,
1609 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1610 * RTCritSectRwTryEnterExcl.
1611 */
1612VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1613{
1614#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1615 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1616#else
1617 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1618 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1619#endif
1620}
1621
1622
1623/**
1624 * Try enter a critical section with exclusive (write) access.
1625 *
1626 * @retval VINF_SUCCESS on success.
1627 * @retval VERR_SEM_BUSY if the critsect was owned.
1628 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1629 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1630 * during the operation.
1631 *
1632 * @param pVM The cross context VM structure.
1633 * @param pThis Pointer to the read/write critical section.
1634 * @param uId Where we're entering the section.
1635 * @param SRC_POS The source position.
1636 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1637 * PDMCritSectRwEnterExclDebug,
1638 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1639 * RTCritSectRwTryEnterExclDebug.
1640 */
1641VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1642{
1643 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1644#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1645 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1646#else
1647 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1648 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1649#endif
1650}
1651
1652
1653#ifdef IN_RING3
1654/**
1655 * Enters a PDM read/write critical section with exclusive (write) access.
1656 *
1657 * @returns VINF_SUCCESS if entered successfully.
1658 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1659 * during the operation.
1660 *
1661 * @param pVM The cross context VM structure.
1662 * @param pThis Pointer to the read/write critical section.
1663 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1664 */
1665VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1666{
1667 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1668}
1669#endif /* IN_RING3 */
1670
1671
1672/**
1673 * Leave a critical section held exclusively.
1674 *
1675 * @returns VBox status code.
1676 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1677 * during the operation.
1678 * @param pVM The cross context VM structure.
1679 * @param pThis Pointer to the read/write critical section.
1680 * @param fNoVal No validation records (i.e. queued release).
1681 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1682 */
1683static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1684{
1685 /*
1686 * Validate handle.
1687 */
1688 AssertPtr(pThis);
1689 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1690
1691#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1692 NOREF(fNoVal);
1693#endif
1694
1695 /*
1696 * Check ownership.
1697 */
1698 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1699 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1700
1701 RTNATIVETHREAD hNativeWriter;
1702 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1703 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1704
1705
1706 /*
1707 * Unwind one recursion. Not the last?
1708 */
1709 if (pThis->s.Core.cWriteRecursions != 1)
1710 {
1711#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1712 if (fNoVal)
1713 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1714 else
1715 {
1716 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1717 if (RT_FAILURE(rc9))
1718 return rc9;
1719 }
1720#endif
1721#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1722 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1723#else
1724 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1725#endif
1726 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1727 return VINF_SUCCESS;
1728 }
1729
1730
1731 /*
1732 * Final recursion.
1733 */
1734 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1735#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1736 if (fNoVal)
1737 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1738 else
1739 {
1740 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1741 if (RT_FAILURE(rc9))
1742 return rc9;
1743 }
1744#endif
1745
1746
1747#ifdef RTASM_HAVE_CMP_WRITE_U128
1748 /*
1749 * See if we can get out w/o any signalling as this is a common case.
1750 */
1751 if (pdmCritSectRwIsCmpWriteU128Supported())
1752 {
1753 RTCRITSECTRWSTATE OldState;
1754 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1755 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1756 {
1757 OldState.s.hNativeWriter = hNativeSelf;
1758 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1759
1760 RTCRITSECTRWSTATE NewState;
1761 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1762 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1763
1764# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1765 pThis->s.Core.cWriteRecursions = 0;
1766# else
1767 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1768# endif
1769 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1770
1771 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1772 return VINF_SUCCESS;
1773
1774 /* bail out. */
1775 pThis->s.Core.cWriteRecursions = 1;
1776 }
1777 }
1778#endif /* RTASM_HAVE_CMP_WRITE_U128 */
1779
1780
1781#if defined(IN_RING3) || defined(IN_RING0)
1782 /*
1783 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1784 * Ring-0: Try leave for real, depends on host and context.
1785 */
1786# ifdef IN_RING0
1787 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1788 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1789 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1790 || VMMRZCallRing3IsEnabled(pVCpu)
1791 || RTSemEventIsSignalSafe()
1792 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1793 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1794 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1795 )
1796# endif
1797 {
1798# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1799 pThis->s.Core.cWriteRecursions = 0;
1800# else
1801 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1802# endif
1803 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1804 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1805
1806 for (;;)
1807 {
1808 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1809 uint64_t u64OldState = u64State;
1810
1811 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1812 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1813 c--;
1814
1815 if ( c > 0
1816 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1817 {
1818 /*
1819 * Don't change the direction, wake up the next writer if any.
1820 */
1821 u64State &= ~RTCSRW_CNT_WR_MASK;
1822 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1823 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1824 {
1825 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1826 int rc;
1827 if (c == 0)
1828 rc = VINF_SUCCESS;
1829# ifdef IN_RING0
1830 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
1831 {
1832 VMMR0EMTBLOCKCTX Ctx;
1833 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1834 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1835
1836 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1837
1838 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1839 }
1840# endif
1841 else
1842 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1843 AssertRC(rc);
1844 return rc;
1845 }
1846 }
1847 else
1848 {
1849 /*
1850 * Reverse the direction and signal the reader threads.
1851 */
1852 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1853 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1854 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1855 {
1856 Assert(!pThis->s.Core.fNeedReset);
1857 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1858 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1859
1860 int rc;
1861# ifdef IN_RING0
1862 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
1863 {
1864 VMMR0EMTBLOCKCTX Ctx;
1865 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1866 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1867
1868 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1869
1870 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1871 }
1872 else
1873# endif
1874 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1875 AssertRC(rc);
1876 return rc;
1877 }
1878 }
1879
1880 ASMNopPause();
1881 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1882 { /*likely*/ }
1883 else
1884 return VERR_SEM_DESTROYED;
1885 ASMNopPause();
1886 }
1887 /* not reached! */
1888 }
1889#endif /* IN_RING3 || IN_RING0 */
1890
1891
1892#ifndef IN_RING3
1893 /*
1894 * Queue the requested exit for ring-3 execution.
1895 */
1896# ifndef IN_RING0
1897 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1898# endif
1899 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1900 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1901 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1902 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1903 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1904 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1905 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1906 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & HOST_PAGE_OFFSET_MASK)
1907 == ((uintptr_t)pThis & HOST_PAGE_OFFSET_MASK),
1908 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1909 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1910 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1911 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1912 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1913 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1914 return VINF_SUCCESS;
1915#endif
1916}
1917
1918
1919/**
1920 * Leave a critical section held exclusively.
1921 *
1922 * @returns VBox status code.
1923 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1924 * during the operation.
1925 * @param pVM The cross context VM structure.
1926 * @param pThis Pointer to the read/write critical section.
1927 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1928 */
1929VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1930{
1931 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1932}
1933
1934
1935#if defined(IN_RING3) || defined(IN_RING0)
1936/**
1937 * PDMCritSectBothFF interface.
1938 *
1939 * @param pVM The cross context VM structure.
1940 * @param pThis Pointer to the read/write critical section.
1941 */
1942void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1943{
1944 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1945}
1946#endif
1947
1948
1949/**
1950 * Checks the caller is the exclusive (write) owner of the critical section.
1951 *
1952 * @retval true if owner.
1953 * @retval false if not owner.
1954 * @param pVM The cross context VM structure.
1955 * @param pThis Pointer to the read/write critical section.
1956 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1957 * RTCritSectRwIsWriteOwner.
1958 */
1959VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1960{
1961 /*
1962 * Validate handle.
1963 */
1964 AssertPtr(pThis);
1965 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1966
1967 /*
1968 * Check ownership.
1969 */
1970 RTNATIVETHREAD hNativeWriter;
1971 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1972 if (hNativeWriter == NIL_RTNATIVETHREAD)
1973 return false;
1974 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1975}
1976
1977
1978/**
1979 * Checks if the caller is one of the read owners of the critical section.
1980 *
1981 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1982 * enabled. Meaning, the answer is not trustworhty unless
1983 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1984 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1985 * creating the semaphore. And finally, if you used a locking class,
1986 * don't disable deadlock detection by setting cMsMinDeadlock to
1987 * RT_INDEFINITE_WAIT.
1988 *
1989 * In short, only use this for assertions.
1990 *
1991 * @returns @c true if reader, @c false if not.
1992 * @param pVM The cross context VM structure.
1993 * @param pThis Pointer to the read/write critical section.
1994 * @param fWannaHear What you'd like to hear when lock validation is not
1995 * available. (For avoiding asserting all over the place.)
1996 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1997 */
1998VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
1999{
2000 /*
2001 * Validate handle.
2002 */
2003 AssertPtr(pThis);
2004 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
2005
2006 /*
2007 * Inspect the state.
2008 */
2009 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2010 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
2011 {
2012 /*
2013 * It's in write mode, so we can only be a reader if we're also the
2014 * current writer.
2015 */
2016 RTNATIVETHREAD hWriter;
2017 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
2018 if (hWriter == NIL_RTNATIVETHREAD)
2019 return false;
2020 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
2021 }
2022
2023 /*
2024 * Read mode. If there are no current readers, then we cannot be a reader.
2025 */
2026 if (!(u64State & RTCSRW_CNT_RD_MASK))
2027 return false;
2028
2029#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
2030 /*
2031 * Ask the lock validator.
2032 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
2033 */
2034 NOREF(fWannaHear);
2035 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
2036#else
2037 /*
2038 * Ok, we don't know, just tell the caller what he want to hear.
2039 */
2040 return fWannaHear;
2041#endif
2042}
2043
2044
2045/**
2046 * Gets the write recursion count.
2047 *
2048 * @returns The write recursion count (0 if bad critsect).
2049 * @param pThis Pointer to the read/write critical section.
2050 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2051 * RTCritSectRwGetWriteRecursion.
2052 */
2053VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
2054{
2055 /*
2056 * Validate handle.
2057 */
2058 AssertPtr(pThis);
2059 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2060
2061 /*
2062 * Return the requested data.
2063 */
2064 return pThis->s.Core.cWriteRecursions;
2065}
2066
2067
2068/**
2069 * Gets the read recursion count of the current writer.
2070 *
2071 * @returns The read recursion count (0 if bad critsect).
2072 * @param pThis Pointer to the read/write critical section.
2073 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2074 * RTCritSectRwGetWriterReadRecursion.
2075 */
2076VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
2077{
2078 /*
2079 * Validate handle.
2080 */
2081 AssertPtr(pThis);
2082 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2083
2084 /*
2085 * Return the requested data.
2086 */
2087 return pThis->s.Core.cWriterReads;
2088}
2089
2090
2091/**
2092 * Gets the current number of reads.
2093 *
2094 * This includes all read recursions, so it might be higher than the number of
2095 * read owners. It does not include reads done by the current writer.
2096 *
2097 * @returns The read count (0 if bad critsect).
2098 * @param pThis Pointer to the read/write critical section.
2099 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2100 * RTCritSectRwGetReadCount.
2101 */
2102VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
2103{
2104 /*
2105 * Validate input.
2106 */
2107 AssertPtr(pThis);
2108 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2109
2110 /*
2111 * Return the requested data.
2112 */
2113 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2114 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
2115 return 0;
2116 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
2117}
2118
2119
2120/**
2121 * Checks if the read/write critical section is initialized or not.
2122 *
2123 * @retval true if initialized.
2124 * @retval false if not initialized.
2125 * @param pThis Pointer to the read/write critical section.
2126 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2127 */
2128VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
2129{
2130 AssertPtr(pThis);
2131 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
2132}
2133
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette