VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 90611

Last change on this file since 90611 was 90611, checked in by vboxsync, 4 years ago

VMM/PDMCritSecRw: More code refactoring. bugref:6695

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 60.4 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 90611 2021-08-10 22:08:53Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61/** Max number of write or write/read recursions. */
62#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
63
64/* Undefine the automatic VBOX_STRICT API mappings. */
65#undef PDMCritSectRwEnterExcl
66#undef PDMCritSectRwTryEnterExcl
67#undef PDMCritSectRwEnterShared
68#undef PDMCritSectRwTryEnterShared
69
70
71/**
72 * Gets the ring-3 native thread handle of the calling thread.
73 *
74 * @returns native thread handle (ring-3).
75 * @param pVM The cross context VM structure.
76 * @param pThis The read/write critical section. This is only used in
77 * R0 and RC.
78 */
79DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
80{
81#ifdef IN_RING3
82 RT_NOREF(pVM, pThis);
83 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
84#else
85 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
86 NIL_RTNATIVETHREAD);
87 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
88 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD;
89 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
90#endif
91 return hNativeSelf;
92}
93
94
95DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
96{
97 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
98 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
99 return VERR_PDM_CRITSECTRW_IPE;
100}
101
102
103
104#ifdef IN_RING3
105/**
106 * Changes the lock validator sub-class of the read/write critical section.
107 *
108 * It is recommended to try make sure that nobody is using this critical section
109 * while changing the value.
110 *
111 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
112 * lock validator isn't compiled in or either of the parameters are
113 * invalid.
114 * @param pThis Pointer to the read/write critical section.
115 * @param uSubClass The new sub-class value.
116 */
117VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
118{
119 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
120 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
121# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
122 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
123
124 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
125 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
126# else
127 NOREF(uSubClass);
128 return RTLOCKVAL_SUB_CLASS_INVALID;
129# endif
130}
131#endif /* IN_RING3 */
132
133
134#ifdef IN_RING0
135/**
136 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
137 *
138 * @param pVM The cross context VM structure.
139 */
140static void pdmR0CritSectRwYieldToRing3(PVMCC pVM)
141{
142 PVMCPUCC pVCpu = VMMGetCpu(pVM);
143 AssertPtrReturnVoid(pVCpu);
144 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
145 AssertRC(rc);
146}
147#endif /* IN_RING0 */
148
149
150/**
151 * Worker that enters a read/write critical section with shard access.
152 *
153 * @returns VBox status code.
154 * @param pVM The cross context VM structure.
155 * @param pThis Pointer to the read/write critical section.
156 * @param rcBusy The busy return code for ring-0 and ring-3.
157 * @param fTryOnly Only try enter it, don't wait.
158 * @param pSrcPos The source position. (Can be NULL.)
159 * @param fNoVal No validation records.
160 */
161static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
162 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
163{
164 /*
165 * Validate input.
166 */
167 AssertPtr(pThis);
168 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
169
170#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
171 NOREF(pSrcPos);
172 NOREF(fNoVal);
173#endif
174#ifdef IN_RING3
175 NOREF(rcBusy);
176 NOREF(pVM);
177#endif
178
179#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
180 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
181 if (!fTryOnly)
182 {
183 int rc9;
184 RTNATIVETHREAD hNativeWriter;
185 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
186 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
187 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
188 else
189 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
190 if (RT_FAILURE(rc9))
191 return rc9;
192 }
193#endif
194
195 /*
196 * Get cracking...
197 */
198 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
199 uint64_t u64OldState = u64State;
200
201 for (;;)
202 {
203 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
204 {
205 /* It flows in the right direction, try follow it before it changes. */
206 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
207 c++;
208 Assert(c < RTCSRW_CNT_MASK / 4);
209 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
210 u64State &= ~RTCSRW_CNT_RD_MASK;
211 u64State |= c << RTCSRW_CNT_RD_SHIFT;
212 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
213 {
214#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
215 if (!fNoVal)
216 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
217#endif
218 break;
219 }
220 }
221 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
222 {
223 /* Wrong direction, but we're alone here and can simply try switch the direction. */
224 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
225 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
226 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
227 {
228 Assert(!pThis->s.Core.fNeedReset);
229#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
230 if (!fNoVal)
231 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
232#endif
233 break;
234 }
235 }
236 else
237 {
238 /* Is the writer perhaps doing a read recursion? */
239 RTNATIVETHREAD hNativeWriter;
240 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
241 if (hNativeWriter != NIL_RTNATIVETHREAD)
242 {
243 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
244 if (hNativeSelf == hNativeWriter)
245 {
246#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
247 if (!fNoVal)
248 {
249 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
250 if (RT_FAILURE(rc9))
251 return rc9;
252 }
253#endif
254 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
255 Assert(cReads < _16K);
256 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
257 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
258 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
259 return VINF_SUCCESS; /* don't break! */
260 }
261 }
262
263 /*
264 * If we're only trying, return already.
265 */
266 if (fTryOnly)
267 {
268 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
269 return VERR_SEM_BUSY;
270 }
271
272#if defined(IN_RING3) || defined(IN_RING0)
273# ifdef IN_RING0
274 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
275 && ASMIntAreEnabled())
276# endif
277 {
278 /*
279 * Add ourselves to the queue and wait for the direction to change.
280 */
281 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
282 c++;
283 Assert(c < RTCSRW_CNT_MASK / 2);
284 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
285
286 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
287 cWait++;
288 Assert(cWait <= c);
289 Assert(cWait < RTCSRW_CNT_MASK / 2);
290 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
291
292 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
293 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
294
295 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
296 {
297 for (uint32_t iLoop = 0; ; iLoop++)
298 {
299 int rc;
300# ifdef IN_RING3
301# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
302 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
303 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
304 if (RT_SUCCESS(rc))
305# else
306 RTTHREAD hThreadSelf = RTThreadSelf();
307 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
308# endif
309# endif
310 {
311 for (;;)
312 {
313 rc = SUPSemEventMultiWaitNoResume(pVM->pSession,
314 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
315 RT_INDEFINITE_WAIT);
316 if ( rc != VERR_INTERRUPTED
317 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
318 break;
319# ifdef IN_RING0
320 pdmR0CritSectRwYieldToRing3(pVM);
321# endif
322 }
323# ifdef IN_RING3
324 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
325# endif
326 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
327 return VERR_SEM_DESTROYED;
328 }
329 if (RT_FAILURE(rc))
330 {
331 /* Decrement the counts and return the error. */
332 for (;;)
333 {
334 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
335 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
336 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
337 c--;
338 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
339 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
340 cWait--;
341 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
342 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
343 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
344 break;
345 }
346 return rc;
347 }
348
349 Assert(pThis->s.Core.fNeedReset);
350 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
351 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
352 break;
353 AssertMsg(iLoop < 1, ("%u\n", iLoop));
354 }
355
356 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
357 for (;;)
358 {
359 u64OldState = u64State;
360
361 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
362 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
363 cWait--;
364 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
365 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
366
367 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
368 {
369 if (cWait == 0)
370 {
371 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
372 {
373 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
374 AssertRCReturn(rc, rc);
375 }
376 }
377 break;
378 }
379 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
380 }
381
382# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
383 if (!fNoVal)
384 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
385# endif
386 break;
387 }
388 }
389#endif /* IN_RING3 || IN_RING3 */
390#ifndef IN_RING3
391# ifdef IN_RING0
392 else
393# endif
394 {
395 /*
396 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
397 * back to ring-3 and do it there or return rcBusy.
398 */
399 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
400 if (rcBusy == VINF_SUCCESS)
401 {
402 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
403 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
404 * back to ring-3. Goes for both kind of crit sects. */
405 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
406 }
407 return rcBusy;
408 }
409#endif /* !IN_RING3 */
410 }
411
412 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
413 return VERR_SEM_DESTROYED;
414
415 ASMNopPause();
416 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
417 u64OldState = u64State;
418 }
419
420 /* got it! */
421 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
422 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
423 return VINF_SUCCESS;
424
425}
426
427
428/**
429 * Enter a critical section with shared (read) access.
430 *
431 * @returns VBox status code.
432 * @retval VINF_SUCCESS on success.
433 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
434 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
435 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
436 * during the operation.
437 *
438 * @param pVM The cross context VM structure.
439 * @param pThis Pointer to the read/write critical section.
440 * @param rcBusy The status code to return when we're in RC or R0 and the
441 * section is busy. Pass VINF_SUCCESS to acquired the
442 * critical section thru a ring-3 call if necessary.
443 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
444 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
445 * RTCritSectRwEnterShared.
446 */
447VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
448{
449#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
450 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
451#else
452 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
453 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
454#endif
455}
456
457
458/**
459 * Enter a critical section with shared (read) access.
460 *
461 * @returns VBox status code.
462 * @retval VINF_SUCCESS on success.
463 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
464 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
465 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
466 * during the operation.
467 *
468 * @param pVM The cross context VM structure.
469 * @param pThis Pointer to the read/write critical section.
470 * @param rcBusy The status code to return when we're in RC or R0 and the
471 * section is busy. Pass VINF_SUCCESS to acquired the
472 * critical section thru a ring-3 call if necessary.
473 * @param uId Where we're entering the section.
474 * @param SRC_POS The source position.
475 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
476 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
477 * RTCritSectRwEnterSharedDebug.
478 */
479VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
480{
481 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
482#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
483 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
484#else
485 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
486 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
487#endif
488}
489
490
491/**
492 * Try enter a critical section with shared (read) access.
493 *
494 * @returns VBox status code.
495 * @retval VINF_SUCCESS on success.
496 * @retval VERR_SEM_BUSY if the critsect was owned.
497 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
498 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
499 * during the operation.
500 *
501 * @param pVM The cross context VM structure.
502 * @param pThis Pointer to the read/write critical section.
503 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
504 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
505 * RTCritSectRwTryEnterShared.
506 */
507VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
508{
509#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
510 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
511#else
512 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
513 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
514#endif
515}
516
517
518/**
519 * Try enter a critical section with shared (read) access.
520 *
521 * @returns VBox status code.
522 * @retval VINF_SUCCESS on success.
523 * @retval VERR_SEM_BUSY if the critsect was owned.
524 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
525 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
526 * during the operation.
527 *
528 * @param pVM The cross context VM structure.
529 * @param pThis Pointer to the read/write critical section.
530 * @param uId Where we're entering the section.
531 * @param SRC_POS The source position.
532 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
533 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
534 * RTCritSectRwTryEnterSharedDebug.
535 */
536VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
537{
538 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
539#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
540 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
541#else
542 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
543 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
544#endif
545}
546
547
548#ifdef IN_RING3
549/**
550 * Enters a PDM read/write critical section with shared (read) access.
551 *
552 * @returns VINF_SUCCESS if entered successfully.
553 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
554 * during the operation.
555 *
556 * @param pVM The cross context VM structure.
557 * @param pThis Pointer to the read/write critical section.
558 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
559 */
560VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
561{
562 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
563}
564#endif
565
566
567/**
568 * Leave a critical section held with shared access.
569 *
570 * @returns VBox status code.
571 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
572 * during the operation.
573 * @param pVM The cross context VM structure.
574 * @param pThis Pointer to the read/write critical section.
575 * @param fNoVal No validation records (i.e. queued release).
576 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
577 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
578 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
579 */
580static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
581{
582 /*
583 * Validate handle.
584 */
585 AssertPtr(pThis);
586 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
587
588#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
589 NOREF(fNoVal);
590#endif
591
592 /*
593 * Check the direction and take action accordingly.
594 */
595 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
596 uint64_t u64OldState = u64State;
597 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
598 {
599#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
600 if (fNoVal)
601 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
602 else
603 {
604 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
605 if (RT_FAILURE(rc9))
606 return rc9;
607 }
608#endif
609 for (;;)
610 {
611 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
612 AssertReturn(c > 0, VERR_NOT_OWNER);
613 c--;
614
615 if ( c > 0
616 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
617 {
618 /* Don't change the direction. */
619 u64State &= ~RTCSRW_CNT_RD_MASK;
620 u64State |= c << RTCSRW_CNT_RD_SHIFT;
621 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
622 break;
623 }
624 else
625 {
626#if defined(IN_RING3) || defined(IN_RING0)
627# ifdef IN_RING0
628 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
629 && ASMIntAreEnabled())
630# endif
631 {
632 /* Reverse the direction and signal the writer threads. */
633 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
634 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
635 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
636 {
637 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
638 AssertRC(rc);
639 break;
640 }
641 }
642#endif /* IN_RING3 || IN_RING0 */
643#ifndef IN_RING3
644# ifdef IN_RING0
645 else
646# endif
647 {
648 /* Queue the exit request (ring-3). */
649 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
650 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
651 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
652 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
653 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
654 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
655 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
656 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
657 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & PAGE_OFFSET_MASK)
658 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
659 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
660 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
661 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
662 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
663 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
664 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
665 break;
666 }
667#endif
668 }
669
670 ASMNopPause();
671 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
672 u64OldState = u64State;
673 }
674 }
675 else
676 {
677 /*
678 * Write direction. Check that it's the owner calling and that it has reads to undo.
679 */
680 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
681 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
682
683 RTNATIVETHREAD hNativeWriter;
684 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
685 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
686 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
687#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
688 if (!fNoVal)
689 {
690 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
691 if (RT_FAILURE(rc))
692 return rc;
693 }
694#endif
695 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
696 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
697 }
698
699 return VINF_SUCCESS;
700}
701
702
703/**
704 * Leave a critical section held with shared access.
705 *
706 * @returns VBox status code.
707 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
708 * during the operation.
709 * @param pVM The cross context VM structure.
710 * @param pThis Pointer to the read/write critical section.
711 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
712 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
713 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
714 */
715VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
716{
717 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
718}
719
720
721#if defined(IN_RING3) || defined(IN_RING0)
722/**
723 * PDMCritSectBothFF interface.
724 *
725 * @param pVM The cross context VM structure.
726 * @param pThis Pointer to the read/write critical section.
727 */
728void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
729{
730 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
731}
732#endif
733
734
735/**
736 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
737 *
738 * @returns @a rc unless corrupted.
739 * @param pThis Pointer to the read/write critical section.
740 * @param rc The status to return.
741 */
742DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
743{
744 /*
745 * Decrement the counts and return the error.
746 */
747 for (;;)
748 {
749 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
750 uint64_t const u64OldState = u64State;
751 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
752 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
753 c--;
754 u64State &= ~RTCSRW_CNT_WR_MASK;
755 u64State |= c << RTCSRW_CNT_WR_SHIFT;
756 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
757 return rc;
758
759 ASMNopPause();
760 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
761 ASMNopPause();
762 }
763}
764
765
766/**
767 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
768 * gotten exclusive ownership of the critical section.
769 */
770DECLINLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
771{
772 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
773 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
774
775#if 1 /** @todo consider generating less noise... */
776 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
777#else
778 pThis->s.Core.cWriteRecursions = 1;
779#endif
780 Assert(pThis->s.Core.cWriterReads == 0);
781
782#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
783 if (!fNoVal)
784 {
785 if (hThreadSelf == NIL_RTTHREAD)
786 hThreadSelf = RTThreadSelfAutoAdopt();
787 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
788 }
789#endif
790 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
791 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
792 return VINF_SUCCESS;
793}
794
795
796#if defined(IN_RING3) || defined(IN_RING0)
797/**
798 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
799 * contended.
800 */
801static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
802 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
803{
804 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
805
806 for (uint32_t iLoop = 0; ; iLoop++)
807 {
808 /*
809 * Wait for our turn.
810 */
811 int rc;
812# ifdef IN_RING3
813# ifdef PDMCRITSECTRW_STRICT
814 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
815 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
816 if (RT_SUCCESS(rc))
817# else
818 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
819# endif
820# endif
821 {
822 for (;;)
823 {
824 rc = SUPSemEventWaitNoResume(pVM->pSession,
825 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
826 RT_INDEFINITE_WAIT);
827 if ( rc != VERR_INTERRUPTED
828 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
829 break;
830# ifdef IN_RING0
831 pdmR0CritSectRwYieldToRing3(pVM);
832# endif
833 }
834
835# ifdef IN_RING3
836 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
837# endif
838 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
839 { /* likely */ }
840 else
841 return VERR_SEM_DESTROYED;
842 }
843 if (RT_FAILURE(rc))
844 return pdmCritSectRwEnterExclBailOut(pThis, rc);
845
846 /*
847 * Try take exclusive write ownership.
848 */
849 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
850 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
851 {
852 bool fDone;
853 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
854 if (fDone)
855 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
856 }
857 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
858 }
859}
860#endif /* IN_RING3 || IN_RING0 */
861
862
863/**
864 * Worker that enters a read/write critical section with exclusive access.
865 *
866 * @returns VBox status code.
867 * @param pVM The cross context VM structure.
868 * @param pThis Pointer to the read/write critical section.
869 * @param rcBusy The busy return code for ring-0 and ring-3.
870 * @param fTryOnly Only try enter it, don't wait.
871 * @param pSrcPos The source position. (Can be NULL.)
872 * @param fNoVal No validation records.
873 */
874static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
875 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
876{
877 /*
878 * Validate input.
879 */
880 AssertPtr(pThis);
881 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
882
883#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
884 NOREF(pSrcPos);
885 NOREF(fNoVal);
886#endif
887#ifdef IN_RING3
888 NOREF(rcBusy);
889 NOREF(pVM);
890#endif
891
892 RTTHREAD hThreadSelf = NIL_RTTHREAD;
893#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
894 if (!fTryOnly)
895 {
896 hThreadSelf = RTThreadSelfAutoAdopt();
897 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
898 if (RT_FAILURE(rc9))
899 return rc9;
900 }
901#endif
902
903 /*
904 * Check if we're already the owner and just recursing.
905 */
906 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
907 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
908 RTNATIVETHREAD hNativeWriter;
909 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
910 if (hNativeSelf == hNativeWriter)
911 {
912 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
913#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
914 if (!fNoVal)
915 {
916 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
917 if (RT_FAILURE(rc9))
918 return rc9;
919 }
920#endif
921 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
922 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
923 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
924 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
925 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
926 return VINF_SUCCESS;
927 }
928
929 /*
930 * Get cracking.
931 */
932 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
933 uint64_t u64OldState = u64State;
934
935 for (;;)
936 {
937 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
938 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
939 {
940 /* It flows in the right direction, try follow it before it changes. */
941 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
942 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
943 c++;
944 Assert(c < RTCSRW_CNT_WR_MASK / 4);
945 u64State &= ~RTCSRW_CNT_WR_MASK;
946 u64State |= c << RTCSRW_CNT_WR_SHIFT;
947 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
948 break;
949 }
950 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
951 {
952 /* Wrong direction, but we're alone here and can simply try switch the direction. */
953 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
954 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
955 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
956 break;
957 }
958 else if (fTryOnly)
959 {
960 /* Wrong direction and we're not supposed to wait, just return. */
961 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
962 return VERR_SEM_BUSY;
963 }
964 else
965 {
966 /* Add ourselves to the write count and break out to do the wait. */
967 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
968 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
969 c++;
970 Assert(c < RTCSRW_CNT_WR_MASK / 4);
971 u64State &= ~RTCSRW_CNT_WR_MASK;
972 u64State |= c << RTCSRW_CNT_WR_SHIFT;
973 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
974 break;
975 }
976
977 ASMNopPause();
978
979 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
980 { /* likely */ }
981 else
982 return VERR_SEM_DESTROYED;
983
984 ASMNopPause();
985 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
986 u64OldState = u64State;
987 }
988
989 /*
990 * If we're in write mode now try grab the ownership. Play fair if there
991 * are threads already waiting, unless we're in ring-0.
992 */
993 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
994#if defined(IN_RING3)
995 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
996 || fTryOnly)
997#endif
998 ;
999 if (fDone)
1000 {
1001 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1002 if (fDone)
1003 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1004 }
1005
1006 /*
1007 * Okay, we have contention and will have to wait unless we're just trying.
1008 */
1009 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1010
1011#if defined(IN_RING3) || defined(IN_RING0)
1012 if ( !fTryOnly
1013# ifdef IN_RING0
1014 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1015 && ASMIntAreEnabled()
1016# endif
1017 )
1018 {
1019# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1020 if (hThreadSelf == NIL_RTTHREAD)
1021 hThreadSelf = RTThreadSelfAutoAdopt();
1022 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1023# elif defined(IN_RING3)
1024 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1025# else
1026 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1027# endif
1028 }
1029#endif /* IN_RING3 || IN_RING0 */
1030
1031#ifdef IN_RING3
1032 /* TryEnter call - decrement the number of (waiting) writers. */
1033 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1034
1035#else
1036
1037 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
1038 ring-3 and do it there or return rcBusy. */
1039 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1040 if (rcBusy == VINF_SUCCESS)
1041 {
1042 Assert(!fTryOnly);
1043 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1044 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1045 * back to ring-3. Goes for both kind of crit sects. */
1046 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1047 }
1048 return rcBusy;
1049#endif
1050}
1051
1052
1053/**
1054 * Try enter a critical section with exclusive (write) access.
1055 *
1056 * @returns VBox status code.
1057 * @retval VINF_SUCCESS on success.
1058 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1059 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1060 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1061 * during the operation.
1062 *
1063 * @param pVM The cross context VM structure.
1064 * @param pThis Pointer to the read/write critical section.
1065 * @param rcBusy The status code to return when we're in RC or R0 and the
1066 * section is busy. Pass VINF_SUCCESS to acquired the
1067 * critical section thru a ring-3 call if necessary.
1068 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1069 * PDMCritSectRwTryEnterExclDebug,
1070 * PDMCritSectEnterDebug, PDMCritSectEnter,
1071 * RTCritSectRwEnterExcl.
1072 */
1073VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1074{
1075#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1076 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1077#else
1078 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1079 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1080#endif
1081}
1082
1083
1084/**
1085 * Try enter a critical section with exclusive (write) access.
1086 *
1087 * @returns VBox status code.
1088 * @retval VINF_SUCCESS on success.
1089 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1090 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1091 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1092 * during the operation.
1093 *
1094 * @param pVM The cross context VM structure.
1095 * @param pThis Pointer to the read/write critical section.
1096 * @param rcBusy The status code to return when we're in RC or R0 and the
1097 * section is busy. Pass VINF_SUCCESS to acquired the
1098 * critical section thru a ring-3 call if necessary.
1099 * @param uId Where we're entering the section.
1100 * @param SRC_POS The source position.
1101 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1102 * PDMCritSectRwTryEnterExclDebug,
1103 * PDMCritSectEnterDebug, PDMCritSectEnter,
1104 * RTCritSectRwEnterExclDebug.
1105 */
1106VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1107{
1108 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1109#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1110 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1111#else
1112 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1113 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1114#endif
1115}
1116
1117
1118/**
1119 * Try enter a critical section with exclusive (write) access.
1120 *
1121 * @retval VINF_SUCCESS on success.
1122 * @retval VERR_SEM_BUSY if the critsect was owned.
1123 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1124 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1125 * during the operation.
1126 *
1127 * @param pVM The cross context VM structure.
1128 * @param pThis Pointer to the read/write critical section.
1129 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1130 * PDMCritSectRwEnterExclDebug,
1131 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1132 * RTCritSectRwTryEnterExcl.
1133 */
1134VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1135{
1136#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1137 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1138#else
1139 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1140 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1141#endif
1142}
1143
1144
1145/**
1146 * Try enter a critical section with exclusive (write) access.
1147 *
1148 * @retval VINF_SUCCESS on success.
1149 * @retval VERR_SEM_BUSY if the critsect was owned.
1150 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1151 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1152 * during the operation.
1153 *
1154 * @param pVM The cross context VM structure.
1155 * @param pThis Pointer to the read/write critical section.
1156 * @param uId Where we're entering the section.
1157 * @param SRC_POS The source position.
1158 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1159 * PDMCritSectRwEnterExclDebug,
1160 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1161 * RTCritSectRwTryEnterExclDebug.
1162 */
1163VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1164{
1165 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1166#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1167 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1168#else
1169 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1170 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1171#endif
1172}
1173
1174
1175#ifdef IN_RING3
1176/**
1177 * Enters a PDM read/write critical section with exclusive (write) access.
1178 *
1179 * @returns VINF_SUCCESS if entered successfully.
1180 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1181 * during the operation.
1182 *
1183 * @param pVM The cross context VM structure.
1184 * @param pThis Pointer to the read/write critical section.
1185 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1186 */
1187VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1188{
1189 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1190}
1191#endif /* IN_RING3 */
1192
1193
1194/**
1195 * Leave a critical section held exclusively.
1196 *
1197 * @returns VBox status code.
1198 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1199 * during the operation.
1200 * @param pVM The cross context VM structure.
1201 * @param pThis Pointer to the read/write critical section.
1202 * @param fNoVal No validation records (i.e. queued release).
1203 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1204 */
1205static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1206{
1207 /*
1208 * Validate handle.
1209 */
1210 AssertPtr(pThis);
1211 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1212
1213#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1214 NOREF(fNoVal);
1215#endif
1216
1217 /*
1218 * Check ownership.
1219 */
1220 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1221 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1222
1223 RTNATIVETHREAD hNativeWriter;
1224 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1225 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1226
1227 /*
1228 * Unwind one recursion. Is it the final one?
1229 */
1230 if (pThis->s.Core.cWriteRecursions == 1)
1231 {
1232 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1233#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1234 if (fNoVal)
1235 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1236 else
1237 {
1238 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1239 if (RT_FAILURE(rc9))
1240 return rc9;
1241 }
1242#endif
1243 /*
1244 * Update the state.
1245 */
1246#if defined(IN_RING3) || defined(IN_RING0)
1247# ifdef IN_RING0
1248 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1249 && ASMIntAreEnabled())
1250# endif
1251 {
1252 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1253 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1254 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1255
1256 for (;;)
1257 {
1258 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1259 uint64_t u64OldState = u64State;
1260
1261 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1262 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1263 c--;
1264
1265 if ( c > 0
1266 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1267 {
1268 /* Don't change the direction, wake up the next writer if any. */
1269 u64State &= ~RTCSRW_CNT_WR_MASK;
1270 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1271 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1272 {
1273 if (c > 0)
1274 {
1275 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1276 AssertRC(rc);
1277 }
1278 break;
1279 }
1280 }
1281 else
1282 {
1283 /* Reverse the direction and signal the reader threads. */
1284 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1285 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1286 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1287 {
1288 Assert(!pThis->s.Core.fNeedReset);
1289 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1290 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1291 AssertRC(rc);
1292 break;
1293 }
1294 }
1295
1296 ASMNopPause();
1297 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1298 return VERR_SEM_DESTROYED;
1299 }
1300 }
1301#endif /* IN_RING3 || IN_RING0 */
1302#ifndef IN_RING3
1303# ifdef IN_RING0
1304 else
1305# endif
1306 {
1307 /*
1308 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1309 * so queue the exit request (ring-3).
1310 */
1311 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1312 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1313 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1314 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1315 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1316 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1317 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1318 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1319 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK)
1320 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1321 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1322 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1323 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1324 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1325 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1326 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1327 }
1328#endif
1329 }
1330 else
1331 {
1332 /*
1333 * Not the final recursion.
1334 */
1335#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1336 if (fNoVal)
1337 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1338 else
1339 {
1340 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1341 if (RT_FAILURE(rc9))
1342 return rc9;
1343 }
1344#endif
1345 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1346 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1347 }
1348
1349 return VINF_SUCCESS;
1350}
1351
1352
1353/**
1354 * Leave a critical section held exclusively.
1355 *
1356 * @returns VBox status code.
1357 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1358 * during the operation.
1359 * @param pVM The cross context VM structure.
1360 * @param pThis Pointer to the read/write critical section.
1361 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1362 */
1363VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1364{
1365 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1366}
1367
1368
1369#if defined(IN_RING3) || defined(IN_RING0)
1370/**
1371 * PDMCritSectBothFF interface.
1372 *
1373 * @param pVM The cross context VM structure.
1374 * @param pThis Pointer to the read/write critical section.
1375 */
1376void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1377{
1378 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1379}
1380#endif
1381
1382
1383/**
1384 * Checks the caller is the exclusive (write) owner of the critical section.
1385 *
1386 * @retval true if owner.
1387 * @retval false if not owner.
1388 * @param pVM The cross context VM structure.
1389 * @param pThis Pointer to the read/write critical section.
1390 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1391 * RTCritSectRwIsWriteOwner.
1392 */
1393VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1394{
1395 /*
1396 * Validate handle.
1397 */
1398 AssertPtr(pThis);
1399 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1400
1401 /*
1402 * Check ownership.
1403 */
1404 RTNATIVETHREAD hNativeWriter;
1405 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1406 if (hNativeWriter == NIL_RTNATIVETHREAD)
1407 return false;
1408 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1409}
1410
1411
1412/**
1413 * Checks if the caller is one of the read owners of the critical section.
1414 *
1415 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1416 * enabled. Meaning, the answer is not trustworhty unless
1417 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1418 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1419 * creating the semaphore. And finally, if you used a locking class,
1420 * don't disable deadlock detection by setting cMsMinDeadlock to
1421 * RT_INDEFINITE_WAIT.
1422 *
1423 * In short, only use this for assertions.
1424 *
1425 * @returns @c true if reader, @c false if not.
1426 * @param pVM The cross context VM structure.
1427 * @param pThis Pointer to the read/write critical section.
1428 * @param fWannaHear What you'd like to hear when lock validation is not
1429 * available. (For avoiding asserting all over the place.)
1430 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1431 */
1432VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
1433{
1434 /*
1435 * Validate handle.
1436 */
1437 AssertPtr(pThis);
1438 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1439
1440 /*
1441 * Inspect the state.
1442 */
1443 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1444 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1445 {
1446 /*
1447 * It's in write mode, so we can only be a reader if we're also the
1448 * current writer.
1449 */
1450 RTNATIVETHREAD hWriter;
1451 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1452 if (hWriter == NIL_RTNATIVETHREAD)
1453 return false;
1454 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1455 }
1456
1457 /*
1458 * Read mode. If there are no current readers, then we cannot be a reader.
1459 */
1460 if (!(u64State & RTCSRW_CNT_RD_MASK))
1461 return false;
1462
1463#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1464 /*
1465 * Ask the lock validator.
1466 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1467 */
1468 NOREF(fWannaHear);
1469 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1470#else
1471 /*
1472 * Ok, we don't know, just tell the caller what he want to hear.
1473 */
1474 return fWannaHear;
1475#endif
1476}
1477
1478
1479/**
1480 * Gets the write recursion count.
1481 *
1482 * @returns The write recursion count (0 if bad critsect).
1483 * @param pThis Pointer to the read/write critical section.
1484 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1485 * RTCritSectRwGetWriteRecursion.
1486 */
1487VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1488{
1489 /*
1490 * Validate handle.
1491 */
1492 AssertPtr(pThis);
1493 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1494
1495 /*
1496 * Return the requested data.
1497 */
1498 return pThis->s.Core.cWriteRecursions;
1499}
1500
1501
1502/**
1503 * Gets the read recursion count of the current writer.
1504 *
1505 * @returns The read recursion count (0 if bad critsect).
1506 * @param pThis Pointer to the read/write critical section.
1507 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1508 * RTCritSectRwGetWriterReadRecursion.
1509 */
1510VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1511{
1512 /*
1513 * Validate handle.
1514 */
1515 AssertPtr(pThis);
1516 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1517
1518 /*
1519 * Return the requested data.
1520 */
1521 return pThis->s.Core.cWriterReads;
1522}
1523
1524
1525/**
1526 * Gets the current number of reads.
1527 *
1528 * This includes all read recursions, so it might be higher than the number of
1529 * read owners. It does not include reads done by the current writer.
1530 *
1531 * @returns The read count (0 if bad critsect).
1532 * @param pThis Pointer to the read/write critical section.
1533 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1534 * RTCritSectRwGetReadCount.
1535 */
1536VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1537{
1538 /*
1539 * Validate input.
1540 */
1541 AssertPtr(pThis);
1542 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1543
1544 /*
1545 * Return the requested data.
1546 */
1547 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1548 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1549 return 0;
1550 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1551}
1552
1553
1554/**
1555 * Checks if the read/write critical section is initialized or not.
1556 *
1557 * @retval true if initialized.
1558 * @retval false if not initialized.
1559 * @param pThis Pointer to the read/write critical section.
1560 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1561 */
1562VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1563{
1564 AssertPtr(pThis);
1565 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1566}
1567
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette