VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 90658

Last change on this file since 90658 was 90658, checked in by vboxsync, 4 years ago

VMM/PDMCritSectRwLeaveExcl: Some code structure changes, no actual code change. bugref:6695

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 70.2 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 90658 2021-08-12 11:29:37Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42#ifdef IN_RING0
43# include <iprt/time.h>
44#endif
45#ifdef RT_ARCH_AMD64
46# include <iprt/x86.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** The number loops to spin for shared access in ring-3. */
54#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
55/** The number loops to spin for shared access in ring-0. */
56#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
57/** The number loops to spin for shared access in the raw-mode context. */
58#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
59
60/** The number loops to spin for exclusive access in ring-3. */
61#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
62/** The number loops to spin for exclusive access in ring-0. */
63#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
64/** The number loops to spin for exclusive access in the raw-mode context. */
65#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
66
67/** Max number of write or write/read recursions. */
68#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
69
70/** Skips some of the overly paranoid atomic reads and updates.
71 * Makes some assumptions about cache coherence, though not brave enough not to
72 * always end with an atomic update. */
73#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
74
75/** For reading RTCRITSECTRWSTATE::s::u64State. */
76#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
77# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
78#else
79# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
80#endif
81
82
83/* Undefine the automatic VBOX_STRICT API mappings. */
84#undef PDMCritSectRwEnterExcl
85#undef PDMCritSectRwTryEnterExcl
86#undef PDMCritSectRwEnterShared
87#undef PDMCritSectRwTryEnterShared
88
89
90/*********************************************************************************************************************************
91* Defined Constants And Macros *
92*********************************************************************************************************************************/
93#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
94static int32_t g_fCmpWriteSupported = -1;
95#endif
96
97
98#ifdef RTASM_HAVE_CMP_WRITE_U128
99
100# ifdef RT_ARCH_AMD64
101/**
102 * Called once to initialize g_fCmpWriteSupported.
103 */
104DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
105{
106 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
107 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
108 return fCmpWriteSupported;
109}
110# endif
111
112
113/**
114 * Indicates whether hardware actually supports 128-bit compare & write.
115 */
116DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
117{
118# ifdef RT_ARCH_AMD64
119 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
120 if (RT_LIKELY(fCmpWriteSupported >= 0))
121 return fCmpWriteSupported != 0;
122 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
123# else
124 return true;
125# endif
126}
127
128#endif /* RTASM_HAVE_CMP_WRITE_U128 */
129
130/**
131 * Gets the ring-3 native thread handle of the calling thread.
132 *
133 * @returns native thread handle (ring-3).
134 * @param pVM The cross context VM structure.
135 * @param pThis The read/write critical section. This is only used in
136 * R0 and RC.
137 */
138DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
139{
140#ifdef IN_RING3
141 RT_NOREF(pVM, pThis);
142 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
143#else
144 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
145 NIL_RTNATIVETHREAD);
146 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
147 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD;
148 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
149#endif
150 return hNativeSelf;
151}
152
153
154DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
155{
156 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
157 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
158 return VERR_PDM_CRITSECTRW_IPE;
159}
160
161
162
163#ifdef IN_RING3
164/**
165 * Changes the lock validator sub-class of the read/write critical section.
166 *
167 * It is recommended to try make sure that nobody is using this critical section
168 * while changing the value.
169 *
170 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
171 * lock validator isn't compiled in or either of the parameters are
172 * invalid.
173 * @param pThis Pointer to the read/write critical section.
174 * @param uSubClass The new sub-class value.
175 */
176VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
177{
178 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
179 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
180# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
181 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
182
183 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
184 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
185# else
186 NOREF(uSubClass);
187 return RTLOCKVAL_SUB_CLASS_INVALID;
188# endif
189}
190#endif /* IN_RING3 */
191
192
193#ifdef IN_RING0
194/**
195 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
196 *
197 * @param pVM The cross context VM structure.
198 */
199static void pdmR0CritSectRwYieldToRing3(PVMCC pVM)
200{
201 PVMCPUCC pVCpu = VMMGetCpu(pVM);
202 AssertPtrReturnVoid(pVCpu);
203 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
204 AssertRC(rc);
205}
206#endif /* IN_RING0 */
207
208
209/**
210 * Worker that enters a read/write critical section with shard access.
211 *
212 * @returns VBox status code.
213 * @param pVM The cross context VM structure.
214 * @param pThis Pointer to the read/write critical section.
215 * @param rcBusy The busy return code for ring-0 and ring-3.
216 * @param fTryOnly Only try enter it, don't wait.
217 * @param pSrcPos The source position. (Can be NULL.)
218 * @param fNoVal No validation records.
219 */
220static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
221 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
222{
223 /*
224 * Validate input.
225 */
226 AssertPtr(pThis);
227 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
228
229#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
230 NOREF(pSrcPos);
231 NOREF(fNoVal);
232#endif
233#ifdef IN_RING3
234 NOREF(rcBusy);
235 NOREF(pVM);
236#endif
237
238#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
239 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
240 if (!fTryOnly)
241 {
242 int rc9;
243 RTNATIVETHREAD hNativeWriter;
244 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
245 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
246 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
247 else
248 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
249 if (RT_FAILURE(rc9))
250 return rc9;
251 }
252#endif
253
254 /*
255 * Get cracking...
256 */
257 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
258 uint64_t u64OldState = u64State;
259
260 for (;;)
261 {
262 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
263 {
264 /* It flows in the right direction, try follow it before it changes. */
265 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
266 c++;
267 Assert(c < RTCSRW_CNT_MASK / 4);
268 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
269 u64State &= ~RTCSRW_CNT_RD_MASK;
270 u64State |= c << RTCSRW_CNT_RD_SHIFT;
271 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
272 {
273#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
274 if (!fNoVal)
275 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
276#endif
277 break;
278 }
279 }
280 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
281 {
282 /* Wrong direction, but we're alone here and can simply try switch the direction. */
283 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
284 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
285 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
286 {
287 Assert(!pThis->s.Core.fNeedReset);
288#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
289 if (!fNoVal)
290 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
291#endif
292 break;
293 }
294 }
295 else
296 {
297 /* Is the writer perhaps doing a read recursion? */
298 RTNATIVETHREAD hNativeWriter;
299 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
300 if (hNativeWriter != NIL_RTNATIVETHREAD)
301 {
302 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
303 if (hNativeSelf == hNativeWriter)
304 {
305#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
306 if (!fNoVal)
307 {
308 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
309 if (RT_FAILURE(rc9))
310 return rc9;
311 }
312#endif
313 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
314 Assert(cReads < _16K);
315 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
316 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
317 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
318 return VINF_SUCCESS; /* don't break! */
319 }
320 }
321
322 /*
323 * If we're only trying, return already.
324 */
325 if (fTryOnly)
326 {
327 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
328 return VERR_SEM_BUSY;
329 }
330
331#if defined(IN_RING3) || defined(IN_RING0)
332# ifdef IN_RING0
333 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
334 && ASMIntAreEnabled())
335# endif
336 {
337 /*
338 * Add ourselves to the queue and wait for the direction to change.
339 */
340 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
341 c++;
342 Assert(c < RTCSRW_CNT_MASK / 2);
343 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
344
345 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
346 cWait++;
347 Assert(cWait <= c);
348 Assert(cWait < RTCSRW_CNT_MASK / 2);
349 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
350
351 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
352 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
353
354 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
355 {
356 for (uint32_t iLoop = 0; ; iLoop++)
357 {
358 int rc;
359# ifdef IN_RING3
360# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
361 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
362 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
363 if (RT_SUCCESS(rc))
364# else
365 RTTHREAD hThreadSelf = RTThreadSelf();
366 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
367# endif
368# endif
369 {
370 for (;;)
371 {
372 rc = SUPSemEventMultiWaitNoResume(pVM->pSession,
373 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
374 RT_INDEFINITE_WAIT);
375 if ( rc != VERR_INTERRUPTED
376 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
377 break;
378# ifdef IN_RING0
379 pdmR0CritSectRwYieldToRing3(pVM);
380# endif
381 }
382# ifdef IN_RING3
383 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
384# endif
385 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
386 return VERR_SEM_DESTROYED;
387 }
388 if (RT_FAILURE(rc))
389 {
390 /* Decrement the counts and return the error. */
391 for (;;)
392 {
393 u64OldState = u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
394 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
395 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
396 c--;
397 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
398 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
399 cWait--;
400 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
401 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
402 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
403 break;
404 }
405 return rc;
406 }
407
408 Assert(pThis->s.Core.fNeedReset);
409 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
410 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
411 break;
412 AssertMsg(iLoop < 1, ("%u\n", iLoop));
413 }
414
415 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
416 for (;;)
417 {
418 u64OldState = u64State;
419
420 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
421 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
422 cWait--;
423 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
424 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
425
426 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
427 {
428 if (cWait == 0)
429 {
430 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
431 {
432 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
433 AssertRCReturn(rc, rc);
434 }
435 }
436 break;
437 }
438 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
439 }
440
441# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
442 if (!fNoVal)
443 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
444# endif
445 break;
446 }
447 }
448#endif /* IN_RING3 || IN_RING3 */
449#ifndef IN_RING3
450# ifdef IN_RING0
451 else
452# endif
453 {
454 /*
455 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
456 * back to ring-3 and do it there or return rcBusy.
457 */
458 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
459 if (rcBusy == VINF_SUCCESS)
460 {
461 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
462 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
463 * back to ring-3. Goes for both kind of crit sects. */
464 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
465 }
466 return rcBusy;
467 }
468#endif /* !IN_RING3 */
469 }
470
471 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
472 return VERR_SEM_DESTROYED;
473
474 ASMNopPause();
475 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
476 u64OldState = u64State;
477 }
478
479 /* got it! */
480 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
481 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
482 return VINF_SUCCESS;
483
484}
485
486
487/**
488 * Enter a critical section with shared (read) access.
489 *
490 * @returns VBox status code.
491 * @retval VINF_SUCCESS on success.
492 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
493 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
494 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
495 * during the operation.
496 *
497 * @param pVM The cross context VM structure.
498 * @param pThis Pointer to the read/write critical section.
499 * @param rcBusy The status code to return when we're in RC or R0 and the
500 * section is busy. Pass VINF_SUCCESS to acquired the
501 * critical section thru a ring-3 call if necessary.
502 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
503 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
504 * RTCritSectRwEnterShared.
505 */
506VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
507{
508#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
509 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
510#else
511 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
512 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
513#endif
514}
515
516
517/**
518 * Enter a critical section with shared (read) access.
519 *
520 * @returns VBox status code.
521 * @retval VINF_SUCCESS on success.
522 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
523 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
524 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
525 * during the operation.
526 *
527 * @param pVM The cross context VM structure.
528 * @param pThis Pointer to the read/write critical section.
529 * @param rcBusy The status code to return when we're in RC or R0 and the
530 * section is busy. Pass VINF_SUCCESS to acquired the
531 * critical section thru a ring-3 call if necessary.
532 * @param uId Where we're entering the section.
533 * @param SRC_POS The source position.
534 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
535 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
536 * RTCritSectRwEnterSharedDebug.
537 */
538VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
539{
540 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
541#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
542 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
543#else
544 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
545 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
546#endif
547}
548
549
550/**
551 * Try enter a critical section with shared (read) access.
552 *
553 * @returns VBox status code.
554 * @retval VINF_SUCCESS on success.
555 * @retval VERR_SEM_BUSY if the critsect was owned.
556 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
557 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
558 * during the operation.
559 *
560 * @param pVM The cross context VM structure.
561 * @param pThis Pointer to the read/write critical section.
562 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
563 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
564 * RTCritSectRwTryEnterShared.
565 */
566VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
567{
568#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
569 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
570#else
571 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
572 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
573#endif
574}
575
576
577/**
578 * Try enter a critical section with shared (read) access.
579 *
580 * @returns VBox status code.
581 * @retval VINF_SUCCESS on success.
582 * @retval VERR_SEM_BUSY if the critsect was owned.
583 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
584 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
585 * during the operation.
586 *
587 * @param pVM The cross context VM structure.
588 * @param pThis Pointer to the read/write critical section.
589 * @param uId Where we're entering the section.
590 * @param SRC_POS The source position.
591 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
592 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
593 * RTCritSectRwTryEnterSharedDebug.
594 */
595VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
596{
597 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
598#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
599 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
600#else
601 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
602 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
603#endif
604}
605
606
607#ifdef IN_RING3
608/**
609 * Enters a PDM read/write critical section with shared (read) access.
610 *
611 * @returns VINF_SUCCESS if entered successfully.
612 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
613 * during the operation.
614 *
615 * @param pVM The cross context VM structure.
616 * @param pThis Pointer to the read/write critical section.
617 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
618 */
619VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
620{
621 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
622}
623#endif
624
625
626/**
627 * Leave a critical section held with shared access.
628 *
629 * @returns VBox status code.
630 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
631 * during the operation.
632 * @param pVM The cross context VM structure.
633 * @param pThis Pointer to the read/write critical section.
634 * @param fNoVal No validation records (i.e. queued release).
635 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
636 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
637 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
638 */
639static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
640{
641 /*
642 * Validate handle.
643 */
644 AssertPtr(pThis);
645 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
646
647#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
648 NOREF(fNoVal);
649#endif
650
651 /*
652 * Check the direction and take action accordingly.
653 */
654 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
655 uint64_t u64OldState = u64State;
656 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
657 {
658#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
659 if (fNoVal)
660 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
661 else
662 {
663 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
664 if (RT_FAILURE(rc9))
665 return rc9;
666 }
667#endif
668 for (;;)
669 {
670 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
671 AssertReturn(c > 0, VERR_NOT_OWNER);
672 c--;
673
674 if ( c > 0
675 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
676 {
677 /* Don't change the direction. */
678 u64State &= ~RTCSRW_CNT_RD_MASK;
679 u64State |= c << RTCSRW_CNT_RD_SHIFT;
680 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
681 break;
682 }
683 else
684 {
685#if defined(IN_RING3) || defined(IN_RING0)
686# ifdef IN_RING0
687 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
688 && ASMIntAreEnabled())
689# endif
690 {
691 /* Reverse the direction and signal the writer threads. */
692 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
693 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
694 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
695 {
696 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
697 AssertRC(rc);
698 break;
699 }
700 }
701#endif /* IN_RING3 || IN_RING0 */
702#ifndef IN_RING3
703# ifdef IN_RING0
704 else
705# endif
706 {
707 /* Queue the exit request (ring-3). */
708 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
709 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
710 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
711 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
712 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
713 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
714 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
715 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
716 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & PAGE_OFFSET_MASK)
717 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
718 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
719 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
720 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
721 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
722 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
723 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
724 break;
725 }
726#endif
727 }
728
729 ASMNopPause();
730 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
731 u64OldState = u64State;
732 }
733 }
734 else
735 {
736 /*
737 * Write direction. Check that it's the owner calling and that it has reads to undo.
738 */
739 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
740 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
741
742 RTNATIVETHREAD hNativeWriter;
743 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
744 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
745 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
746#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
747 if (!fNoVal)
748 {
749 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
750 if (RT_FAILURE(rc))
751 return rc;
752 }
753#endif
754 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
755 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
756 }
757
758 return VINF_SUCCESS;
759}
760
761
762/**
763 * Leave a critical section held with shared access.
764 *
765 * @returns VBox status code.
766 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
767 * during the operation.
768 * @param pVM The cross context VM structure.
769 * @param pThis Pointer to the read/write critical section.
770 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
771 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
772 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
773 */
774VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
775{
776 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
777}
778
779
780#if defined(IN_RING3) || defined(IN_RING0)
781/**
782 * PDMCritSectBothFF interface.
783 *
784 * @param pVM The cross context VM structure.
785 * @param pThis Pointer to the read/write critical section.
786 */
787void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
788{
789 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
790}
791#endif
792
793
794/**
795 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
796 *
797 * @returns @a rc unless corrupted.
798 * @param pThis Pointer to the read/write critical section.
799 * @param rc The status to return.
800 */
801DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
802{
803 /*
804 * Decrement the counts and return the error.
805 */
806 for (;;)
807 {
808 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
809 uint64_t const u64OldState = u64State;
810 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
811 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
812 c--;
813 u64State &= ~RTCSRW_CNT_WR_MASK;
814 u64State |= c << RTCSRW_CNT_WR_SHIFT;
815 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
816 return rc;
817
818 ASMNopPause();
819 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
820 ASMNopPause();
821 }
822}
823
824
825/**
826 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
827 * gotten exclusive ownership of the critical section.
828 */
829DECLINLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
830{
831 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
832 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
833
834#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
835 pThis->s.Core.cWriteRecursions = 1;
836#else
837 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
838#endif
839 Assert(pThis->s.Core.cWriterReads == 0);
840
841#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
842 if (!fNoVal)
843 {
844 if (hThreadSelf == NIL_RTTHREAD)
845 hThreadSelf = RTThreadSelfAutoAdopt();
846 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
847 }
848#endif
849 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
850 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
851 return VINF_SUCCESS;
852}
853
854
855#if defined(IN_RING3) || defined(IN_RING0)
856/**
857 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
858 * contended.
859 */
860static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
861 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
862{
863 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
864
865 PSUPDRVSESSION const pSession = pVM->pSession;
866 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
867# ifdef IN_RING0
868 uint64_t const tsStart = RTTimeNanoTS();
869 uint64_t cNsMaxTotal = RT_NS_5MIN;
870 uint32_t cMsMaxOne = RT_MS_5SEC;
871 bool fNonInterruptible = false;
872# endif
873
874 for (uint32_t iLoop = 0; ; iLoop++)
875 {
876 /*
877 * Wait for our turn.
878 */
879 int rc;
880# ifdef IN_RING3
881# ifdef PDMCRITSECTRW_STRICT
882 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
883 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
884 if (RT_SUCCESS(rc))
885 { /* likely */ }
886 else
887 return pdmCritSectRwEnterExclBailOut(pThis, rc);
888# else
889 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
890# endif
891# endif
892 for (;;)
893 {
894 /*
895 * We always wait with a timeout so we can re-check the structure sanity
896 * and not get stuck waiting on a corrupt or deleted section.
897 */
898# ifdef IN_RING3
899 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
900# else
901 rc = !fNonInterruptible
902 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
903 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
904 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
905 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
906# endif
907 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
908 { /* likely */ }
909 else
910 {
911# ifdef IN_RING3
912 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
913# endif
914 return VERR_SEM_DESTROYED;
915 }
916 if (rc == VINF_SUCCESS)
917 {
918# ifdef IN_RING3
919 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
920# endif
921 break;
922 }
923
924 /*
925 * Timeout and interrupted waits needs careful handling in ring-0
926 * because we're cooperating with ring-3 on this critical section
927 * and thus need to make absolutely sure we won't get stuck here.
928 *
929 * The r0 interrupted case means something is pending (termination,
930 * signal, APC, debugger, whatever), so we must try our best to
931 * return to the caller and to ring-3 so it can be dealt with.
932 */
933 if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
934 {
935# ifdef IN_RING0
936 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
937 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
938 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
939 ("rcTerm=%Rrc\n", rcTerm));
940 if (rcTerm == VERR_NOT_SUPPORTED)
941 cNsMaxTotal = RT_NS_1MIN;
942
943 if (rc == VERR_TIMEOUT)
944 {
945 /* Try return get out of here with a non-VINF_SUCCESS status if
946 the thread is terminating or if the timeout has been exceeded. */
947 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
948 if ( rcTerm == VINF_THREAD_IS_TERMINATING
949 || cNsElapsed > cNsMaxTotal)
950 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
951 }
952 else
953 {
954 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
955 we will try non-interruptible sleep for a while to help resolve the issue
956 w/o guru'ing. */
957 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
958 if ( rcTerm != VINF_THREAD_IS_TERMINATING
959 && rcBusy == VINF_SUCCESS
960 && pVCpu != NULL
961 && cNsElapsed <= cNsMaxTotal)
962 {
963 if (!fNonInterruptible)
964 {
965 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
966 fNonInterruptible = true;
967 cMsMaxOne = 32;
968 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
969 if (cNsLeft > RT_NS_10SEC)
970 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
971 }
972 }
973 else
974 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
975
976 }
977# else /* IN_RING3 */
978 RT_NOREF(pVM, pVCpu, rcBusy);
979# endif /* IN_RING3 */
980 }
981 /*
982 * Any other return code is fatal.
983 */
984 else
985 {
986# ifdef IN_RING3
987 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
988# endif
989 AssertMsgFailed(("rc=%Rrc\n", rc));
990 return RT_FAILURE_NP(rc) ? rc : -rc;
991 }
992 }
993
994 /*
995 * Try take exclusive write ownership.
996 */
997 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
998 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
999 {
1000 bool fDone;
1001 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1002 if (fDone)
1003 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1004 }
1005 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1006 }
1007}
1008#endif /* IN_RING3 || IN_RING0 */
1009
1010
1011/**
1012 * Worker that enters a read/write critical section with exclusive access.
1013 *
1014 * @returns VBox status code.
1015 * @param pVM The cross context VM structure.
1016 * @param pThis Pointer to the read/write critical section.
1017 * @param rcBusy The busy return code for ring-0 and ring-3.
1018 * @param fTryOnly Only try enter it, don't wait.
1019 * @param pSrcPos The source position. (Can be NULL.)
1020 * @param fNoVal No validation records.
1021 */
1022static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1023 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1024{
1025 /*
1026 * Validate input.
1027 */
1028 AssertPtr(pThis);
1029 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1030
1031 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1032#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1033 if (!fTryOnly)
1034 {
1035 hThreadSelf = RTThreadSelfAutoAdopt();
1036 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1037 if (RT_FAILURE(rc9))
1038 return rc9;
1039 }
1040#endif
1041
1042 /*
1043 * Check if we're already the owner and just recursing.
1044 */
1045 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1046 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1047 RTNATIVETHREAD hNativeWriter;
1048 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1049 if (hNativeSelf == hNativeWriter)
1050 {
1051 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1052#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1053 if (!fNoVal)
1054 {
1055 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1056 if (RT_FAILURE(rc9))
1057 return rc9;
1058 }
1059#endif
1060 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1061#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1062 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1063#else
1064 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1065#endif
1066 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1067 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1068 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1069 return VINF_SUCCESS;
1070 }
1071
1072 /*
1073 * Get cracking.
1074 */
1075 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1076 uint64_t u64OldState = u64State;
1077
1078 for (;;)
1079 {
1080 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1081 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
1082 {
1083 /* It flows in the right direction, try follow it before it changes. */
1084 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1085 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1086 c++;
1087 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1088 u64State &= ~RTCSRW_CNT_WR_MASK;
1089 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1090 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1091 break;
1092 }
1093 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1094 {
1095 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1096 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1097 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1098 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1099 break;
1100 }
1101 else if (fTryOnly)
1102 {
1103 /* Wrong direction and we're not supposed to wait, just return. */
1104 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1105 return VERR_SEM_BUSY;
1106 }
1107 else
1108 {
1109 /* Add ourselves to the write count and break out to do the wait. */
1110 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1111 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1112 c++;
1113 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1114 u64State &= ~RTCSRW_CNT_WR_MASK;
1115 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1116 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1117 break;
1118 }
1119
1120 ASMNopPause();
1121
1122 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1123 { /* likely */ }
1124 else
1125 return VERR_SEM_DESTROYED;
1126
1127 ASMNopPause();
1128 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1129 u64OldState = u64State;
1130 }
1131
1132 /*
1133 * If we're in write mode now try grab the ownership. Play fair if there
1134 * are threads already waiting, unless we're in ring-0.
1135 */
1136 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1137#if defined(IN_RING3)
1138 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1139 || fTryOnly)
1140#endif
1141 ;
1142 if (fDone)
1143 {
1144 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1145 if (fDone)
1146 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1147 }
1148
1149 /*
1150 * Okay, we have contention and will have to wait unless we're just trying.
1151 */
1152 if (fTryOnly)
1153 {
1154 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1155 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1156 }
1157
1158 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1159
1160 /*
1161 * Ring-3 is pretty straight forward.
1162 */
1163#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1164 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1165#elif defined(IN_RING3)
1166 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1167
1168#elif defined(IN_RING0)
1169 /*
1170 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1171 * account when waiting on contended locks.
1172 */
1173 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1174 if (pVCpu)
1175 {
1176 VMMR0EMTBLOCKCTX Ctx;
1177 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1178 if (rc == VINF_SUCCESS)
1179 {
1180 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1181
1182 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1183
1184 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1185 }
1186 else
1187 {
1188 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1189 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1190 }
1191 return rc;
1192 }
1193
1194 /* Non-EMT. */
1195 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1196 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1197
1198#else
1199# error "Unused."
1200 /*
1201 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1202 */
1203 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1204 if (rcBusy == VINF_SUCCESS)
1205 {
1206 Assert(!fTryOnly);
1207 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1208 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1209 * back to ring-3. Goes for both kind of crit sects. */
1210 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1211 }
1212 return rcBusy;
1213#endif
1214}
1215
1216
1217/**
1218 * Try enter a critical section with exclusive (write) access.
1219 *
1220 * @returns VBox status code.
1221 * @retval VINF_SUCCESS on success.
1222 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1223 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1224 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1225 * during the operation.
1226 *
1227 * @param pVM The cross context VM structure.
1228 * @param pThis Pointer to the read/write critical section.
1229 * @param rcBusy The status code to return when we're in RC or R0 and the
1230 * section is busy. Pass VINF_SUCCESS to acquired the
1231 * critical section thru a ring-3 call if necessary.
1232 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1233 * PDMCritSectRwTryEnterExclDebug,
1234 * PDMCritSectEnterDebug, PDMCritSectEnter,
1235 * RTCritSectRwEnterExcl.
1236 */
1237VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1238{
1239#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1240 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1241#else
1242 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1243 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1244#endif
1245}
1246
1247
1248/**
1249 * Try enter a critical section with exclusive (write) access.
1250 *
1251 * @returns VBox status code.
1252 * @retval VINF_SUCCESS on success.
1253 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1254 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1255 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1256 * during the operation.
1257 *
1258 * @param pVM The cross context VM structure.
1259 * @param pThis Pointer to the read/write critical section.
1260 * @param rcBusy The status code to return when we're in RC or R0 and the
1261 * section is busy. Pass VINF_SUCCESS to acquired the
1262 * critical section thru a ring-3 call if necessary.
1263 * @param uId Where we're entering the section.
1264 * @param SRC_POS The source position.
1265 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1266 * PDMCritSectRwTryEnterExclDebug,
1267 * PDMCritSectEnterDebug, PDMCritSectEnter,
1268 * RTCritSectRwEnterExclDebug.
1269 */
1270VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1271{
1272 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1273#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1274 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1275#else
1276 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1277 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1278#endif
1279}
1280
1281
1282/**
1283 * Try enter a critical section with exclusive (write) access.
1284 *
1285 * @retval VINF_SUCCESS on success.
1286 * @retval VERR_SEM_BUSY if the critsect was owned.
1287 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1288 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1289 * during the operation.
1290 *
1291 * @param pVM The cross context VM structure.
1292 * @param pThis Pointer to the read/write critical section.
1293 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1294 * PDMCritSectRwEnterExclDebug,
1295 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1296 * RTCritSectRwTryEnterExcl.
1297 */
1298VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1299{
1300#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1301 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1302#else
1303 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1304 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1305#endif
1306}
1307
1308
1309/**
1310 * Try enter a critical section with exclusive (write) access.
1311 *
1312 * @retval VINF_SUCCESS on success.
1313 * @retval VERR_SEM_BUSY if the critsect was owned.
1314 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1315 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1316 * during the operation.
1317 *
1318 * @param pVM The cross context VM structure.
1319 * @param pThis Pointer to the read/write critical section.
1320 * @param uId Where we're entering the section.
1321 * @param SRC_POS The source position.
1322 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1323 * PDMCritSectRwEnterExclDebug,
1324 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1325 * RTCritSectRwTryEnterExclDebug.
1326 */
1327VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1328{
1329 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1330#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1331 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1332#else
1333 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1334 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1335#endif
1336}
1337
1338
1339#ifdef IN_RING3
1340/**
1341 * Enters a PDM read/write critical section with exclusive (write) access.
1342 *
1343 * @returns VINF_SUCCESS if entered successfully.
1344 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1345 * during the operation.
1346 *
1347 * @param pVM The cross context VM structure.
1348 * @param pThis Pointer to the read/write critical section.
1349 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1350 */
1351VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1352{
1353 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1354}
1355#endif /* IN_RING3 */
1356
1357
1358/**
1359 * Leave a critical section held exclusively.
1360 *
1361 * @returns VBox status code.
1362 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1363 * during the operation.
1364 * @param pVM The cross context VM structure.
1365 * @param pThis Pointer to the read/write critical section.
1366 * @param fNoVal No validation records (i.e. queued release).
1367 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1368 */
1369static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1370{
1371 /*
1372 * Validate handle.
1373 */
1374 AssertPtr(pThis);
1375 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1376
1377#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1378 NOREF(fNoVal);
1379#endif
1380
1381 /*
1382 * Check ownership.
1383 */
1384 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1385 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1386
1387 RTNATIVETHREAD hNativeWriter;
1388 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1389 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1390
1391 /*
1392 * Unwind one recursion. Not the last?
1393 */
1394 if (pThis->s.Core.cWriteRecursions != 1)
1395 {
1396#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1397 if (fNoVal)
1398 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1399 else
1400 {
1401 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1402 if (RT_FAILURE(rc9))
1403 return rc9;
1404 }
1405#endif
1406#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1407 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1408#else
1409 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1410#endif
1411 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1412 return VINF_SUCCESS;
1413 }
1414
1415 /*
1416 * Final recursion.
1417 */
1418 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1419#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1420 if (fNoVal)
1421 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1422 else
1423 {
1424 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1425 if (RT_FAILURE(rc9))
1426 return rc9;
1427 }
1428#endif
1429
1430#ifdef RTASM_HAVE_CMP_WRITE_U128
1431 /*
1432 * See if we can get out w/o any signalling as this is a common case.
1433 */
1434 if (pdmCritSectRwIsCmpWriteU128Supported())
1435 {
1436 RTCRITSECTRWSTATE OldState;
1437 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1438 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1439 {
1440 OldState.s.hNativeWriter = hNativeSelf;
1441 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1442
1443 RTCRITSECTRWSTATE NewState;
1444 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1445 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1446
1447# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1448 pThis->s.Core.cWriteRecursions = 0;
1449# else
1450 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1451# endif
1452 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1453
1454 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1455 return VINF_SUCCESS;
1456
1457 /* bail out. */
1458 pThis->s.Core.cWriteRecursions = 1;
1459 }
1460 }
1461#endif
1462
1463#if defined(IN_RING3)
1464 /*
1465 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1466 */
1467# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1468 pThis->s.Core.cWriteRecursions = 0;
1469# else
1470 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1471# endif
1472 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1473 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1474
1475 for (;;)
1476 {
1477 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1478 uint64_t u64OldState = u64State;
1479
1480 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1481 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1482 c--;
1483
1484 if ( c > 0
1485 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1486 {
1487 /* Don't change the direction, wake up the next writer if any. */
1488 u64State &= ~RTCSRW_CNT_WR_MASK;
1489 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1490 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1491 {
1492 if (c > 0)
1493 {
1494 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1495 AssertRC(rc);
1496 }
1497 return VINF_SUCCESS;
1498 }
1499 }
1500 else
1501 {
1502 /* Reverse the direction and signal the reader threads. */
1503 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1504 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1505 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1506 {
1507 Assert(!pThis->s.Core.fNeedReset);
1508 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1509 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1510 AssertRC(rc);
1511 return VINF_SUCCESS;
1512 }
1513 }
1514
1515 ASMNopPause();
1516 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1517 return VERR_SEM_DESTROYED;
1518 }
1519
1520
1521#elif defined(IN_RING0)
1522 /*
1523 * Update the state.
1524 */
1525 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1526 && ASMIntAreEnabled())
1527 {
1528# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1529 pThis->s.Core.cWriteRecursions = 0;
1530# else
1531 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1532# endif
1533 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1534 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1535
1536 for (;;)
1537 {
1538 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1539 uint64_t u64OldState = u64State;
1540
1541 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1542 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1543 c--;
1544
1545 if ( c > 0
1546 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1547 {
1548 /* Don't change the direction, wake up the next writer if any. */
1549 u64State &= ~RTCSRW_CNT_WR_MASK;
1550 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1551 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1552 {
1553 if (c > 0)
1554 {
1555 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1556 AssertRC(rc);
1557 }
1558 break;
1559 }
1560 }
1561 else
1562 {
1563 /* Reverse the direction and signal the reader threads. */
1564 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1565 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1566 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1567 {
1568 Assert(!pThis->s.Core.fNeedReset);
1569 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1570 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1571 AssertRC(rc);
1572 break;
1573 }
1574 }
1575
1576 ASMNopPause();
1577 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1578 return VERR_SEM_DESTROYED;
1579 }
1580 }
1581#endif /* IN_RING0 */
1582
1583#ifndef IN_RING3
1584 /*
1585 * Queue the requested exit for ring-3 execution.
1586 */
1587 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1588 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1589 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1590 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1591 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1592 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1593 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1594 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1595 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK)
1596 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1597 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1598 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1599 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1600 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1601 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1602 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1603 return VINF_SUCCESS;
1604#endif
1605}
1606
1607
1608/**
1609 * Leave a critical section held exclusively.
1610 *
1611 * @returns VBox status code.
1612 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1613 * during the operation.
1614 * @param pVM The cross context VM structure.
1615 * @param pThis Pointer to the read/write critical section.
1616 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1617 */
1618VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1619{
1620 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1621}
1622
1623
1624#if defined(IN_RING3) || defined(IN_RING0)
1625/**
1626 * PDMCritSectBothFF interface.
1627 *
1628 * @param pVM The cross context VM structure.
1629 * @param pThis Pointer to the read/write critical section.
1630 */
1631void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1632{
1633 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1634}
1635#endif
1636
1637
1638/**
1639 * Checks the caller is the exclusive (write) owner of the critical section.
1640 *
1641 * @retval true if owner.
1642 * @retval false if not owner.
1643 * @param pVM The cross context VM structure.
1644 * @param pThis Pointer to the read/write critical section.
1645 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1646 * RTCritSectRwIsWriteOwner.
1647 */
1648VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1649{
1650 /*
1651 * Validate handle.
1652 */
1653 AssertPtr(pThis);
1654 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1655
1656 /*
1657 * Check ownership.
1658 */
1659 RTNATIVETHREAD hNativeWriter;
1660 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1661 if (hNativeWriter == NIL_RTNATIVETHREAD)
1662 return false;
1663 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1664}
1665
1666
1667/**
1668 * Checks if the caller is one of the read owners of the critical section.
1669 *
1670 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1671 * enabled. Meaning, the answer is not trustworhty unless
1672 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1673 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1674 * creating the semaphore. And finally, if you used a locking class,
1675 * don't disable deadlock detection by setting cMsMinDeadlock to
1676 * RT_INDEFINITE_WAIT.
1677 *
1678 * In short, only use this for assertions.
1679 *
1680 * @returns @c true if reader, @c false if not.
1681 * @param pVM The cross context VM structure.
1682 * @param pThis Pointer to the read/write critical section.
1683 * @param fWannaHear What you'd like to hear when lock validation is not
1684 * available. (For avoiding asserting all over the place.)
1685 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1686 */
1687VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
1688{
1689 /*
1690 * Validate handle.
1691 */
1692 AssertPtr(pThis);
1693 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1694
1695 /*
1696 * Inspect the state.
1697 */
1698 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1699 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1700 {
1701 /*
1702 * It's in write mode, so we can only be a reader if we're also the
1703 * current writer.
1704 */
1705 RTNATIVETHREAD hWriter;
1706 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
1707 if (hWriter == NIL_RTNATIVETHREAD)
1708 return false;
1709 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1710 }
1711
1712 /*
1713 * Read mode. If there are no current readers, then we cannot be a reader.
1714 */
1715 if (!(u64State & RTCSRW_CNT_RD_MASK))
1716 return false;
1717
1718#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1719 /*
1720 * Ask the lock validator.
1721 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1722 */
1723 NOREF(fWannaHear);
1724 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1725#else
1726 /*
1727 * Ok, we don't know, just tell the caller what he want to hear.
1728 */
1729 return fWannaHear;
1730#endif
1731}
1732
1733
1734/**
1735 * Gets the write recursion count.
1736 *
1737 * @returns The write recursion count (0 if bad critsect).
1738 * @param pThis Pointer to the read/write critical section.
1739 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1740 * RTCritSectRwGetWriteRecursion.
1741 */
1742VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1743{
1744 /*
1745 * Validate handle.
1746 */
1747 AssertPtr(pThis);
1748 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1749
1750 /*
1751 * Return the requested data.
1752 */
1753 return pThis->s.Core.cWriteRecursions;
1754}
1755
1756
1757/**
1758 * Gets the read recursion count of the current writer.
1759 *
1760 * @returns The read recursion count (0 if bad critsect).
1761 * @param pThis Pointer to the read/write critical section.
1762 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1763 * RTCritSectRwGetWriterReadRecursion.
1764 */
1765VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1766{
1767 /*
1768 * Validate handle.
1769 */
1770 AssertPtr(pThis);
1771 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1772
1773 /*
1774 * Return the requested data.
1775 */
1776 return pThis->s.Core.cWriterReads;
1777}
1778
1779
1780/**
1781 * Gets the current number of reads.
1782 *
1783 * This includes all read recursions, so it might be higher than the number of
1784 * read owners. It does not include reads done by the current writer.
1785 *
1786 * @returns The read count (0 if bad critsect).
1787 * @param pThis Pointer to the read/write critical section.
1788 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1789 * RTCritSectRwGetReadCount.
1790 */
1791VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1792{
1793 /*
1794 * Validate input.
1795 */
1796 AssertPtr(pThis);
1797 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1798
1799 /*
1800 * Return the requested data.
1801 */
1802 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1803 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1804 return 0;
1805 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1806}
1807
1808
1809/**
1810 * Checks if the read/write critical section is initialized or not.
1811 *
1812 * @retval true if initialized.
1813 * @retval false if not initialized.
1814 * @param pThis Pointer to the read/write critical section.
1815 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1816 */
1817VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1818{
1819 AssertPtr(pThis);
1820 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1821}
1822
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette