VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 78402

Last change on this file since 78402 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.4 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125#ifdef IN_RING0
126/**
127 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
128 *
129 * @param pThis Pointer to the read/write critical section.
130 */
131static void pdmR0CritSectRwYieldToRing3(PPDMCRITSECTRW pThis)
132{
133 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
134 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
135 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
136 AssertRC(rc);
137}
138#endif /* IN_RING0 */
139
140
141/**
142 * Worker that enters a read/write critical section with shard access.
143 *
144 * @returns VBox status code.
145 * @param pThis Pointer to the read/write critical section.
146 * @param rcBusy The busy return code for ring-0 and ring-3.
147 * @param fTryOnly Only try enter it, don't wait.
148 * @param pSrcPos The source position. (Can be NULL.)
149 * @param fNoVal No validation records.
150 */
151static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
152{
153 /*
154 * Validate input.
155 */
156 AssertPtr(pThis);
157 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
158
159#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
160 NOREF(pSrcPos);
161 NOREF(fNoVal);
162#endif
163#ifdef IN_RING3
164 NOREF(rcBusy);
165#endif
166
167#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
168 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
169 if (!fTryOnly)
170 {
171 int rc9;
172 RTNATIVETHREAD hNativeWriter;
173 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
174 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
175 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
176 else
177 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
178 if (RT_FAILURE(rc9))
179 return rc9;
180 }
181#endif
182
183 /*
184 * Get cracking...
185 */
186 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
187 uint64_t u64OldState = u64State;
188
189 for (;;)
190 {
191 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
192 {
193 /* It flows in the right direction, try follow it before it changes. */
194 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
195 c++;
196 Assert(c < RTCSRW_CNT_MASK / 2);
197 u64State &= ~RTCSRW_CNT_RD_MASK;
198 u64State |= c << RTCSRW_CNT_RD_SHIFT;
199 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
200 {
201#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
202 if (!fNoVal)
203 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
204#endif
205 break;
206 }
207 }
208 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
209 {
210 /* Wrong direction, but we're alone here and can simply try switch the direction. */
211 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
212 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
213 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
214 {
215 Assert(!pThis->s.Core.fNeedReset);
216#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
217 if (!fNoVal)
218 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
219#endif
220 break;
221 }
222 }
223 else
224 {
225 /* Is the writer perhaps doing a read recursion? */
226 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
227 RTNATIVETHREAD hNativeWriter;
228 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
229 if (hNativeSelf == hNativeWriter)
230 {
231#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
232 if (!fNoVal)
233 {
234 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
235 if (RT_FAILURE(rc9))
236 return rc9;
237 }
238#endif
239 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
240 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
241 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
242 return VINF_SUCCESS; /* don't break! */
243 }
244
245 /*
246 * If we're only trying, return already.
247 */
248 if (fTryOnly)
249 {
250 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
251 return VERR_SEM_BUSY;
252 }
253
254#if defined(IN_RING3) || defined(IN_RING0)
255# ifdef IN_RING0
256 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
257 && ASMIntAreEnabled())
258# endif
259 {
260 /*
261 * Add ourselves to the queue and wait for the direction to change.
262 */
263 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
264 c++;
265 Assert(c < RTCSRW_CNT_MASK / 2);
266
267 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
268 cWait++;
269 Assert(cWait <= c);
270 Assert(cWait < RTCSRW_CNT_MASK / 2);
271
272 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
273 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
274
275 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
276 {
277 for (uint32_t iLoop = 0; ; iLoop++)
278 {
279 int rc;
280# ifdef IN_RING3
281# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
282 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
283 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
284 if (RT_SUCCESS(rc))
285# else
286 RTTHREAD hThreadSelf = RTThreadSelf();
287 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
288# endif
289# endif
290 {
291 for (;;)
292 {
293 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
294 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
295 RT_INDEFINITE_WAIT);
296 if ( rc != VERR_INTERRUPTED
297 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
298 break;
299# ifdef IN_RING0
300 pdmR0CritSectRwYieldToRing3(pThis);
301# endif
302 }
303# ifdef IN_RING3
304 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
305# endif
306 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
307 return VERR_SEM_DESTROYED;
308 }
309 if (RT_FAILURE(rc))
310 {
311 /* Decrement the counts and return the error. */
312 for (;;)
313 {
314 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
315 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
316 c--;
317 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
318 cWait--;
319 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
320 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
321 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
322 break;
323 }
324 return rc;
325 }
326
327 Assert(pThis->s.Core.fNeedReset);
328 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
329 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
330 break;
331 AssertMsg(iLoop < 1, ("%u\n", iLoop));
332 }
333
334 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
335 for (;;)
336 {
337 u64OldState = u64State;
338
339 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
340 Assert(cWait > 0);
341 cWait--;
342 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
343 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
344
345 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
346 {
347 if (cWait == 0)
348 {
349 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
350 {
351 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
352 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
353 AssertRCReturn(rc, rc);
354 }
355 }
356 break;
357 }
358 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
359 }
360
361# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
362 if (!fNoVal)
363 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
364# endif
365 break;
366 }
367 }
368#endif /* IN_RING3 || IN_RING3 */
369#ifndef IN_RING3
370# ifdef IN_RING0
371 else
372# endif
373 {
374 /*
375 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
376 * back to ring-3 and do it there or return rcBusy.
377 */
378 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
379 if (rcBusy == VINF_SUCCESS)
380 {
381 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
382 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
383 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
384 * back to ring-3. Goes for both kind of crit sects. */
385 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
386 }
387 return rcBusy;
388 }
389#endif /* !IN_RING3 */
390 }
391
392 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
393 return VERR_SEM_DESTROYED;
394
395 ASMNopPause();
396 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
397 u64OldState = u64State;
398 }
399
400 /* got it! */
401 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
402 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
403 return VINF_SUCCESS;
404
405}
406
407
408/**
409 * Enter a critical section with shared (read) access.
410 *
411 * @returns VBox status code.
412 * @retval VINF_SUCCESS on success.
413 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
414 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
415 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
416 * during the operation.
417 *
418 * @param pThis Pointer to the read/write critical section.
419 * @param rcBusy The status code to return when we're in RC or R0 and the
420 * section is busy. Pass VINF_SUCCESS to acquired the
421 * critical section thru a ring-3 call if necessary.
422 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
423 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
424 * RTCritSectRwEnterShared.
425 */
426VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
427{
428#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
429 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
430#else
431 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
432 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
433#endif
434}
435
436
437/**
438 * Enter a critical section with shared (read) access.
439 *
440 * @returns VBox status code.
441 * @retval VINF_SUCCESS on success.
442 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
443 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
444 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
445 * during the operation.
446 *
447 * @param pThis Pointer to the read/write critical section.
448 * @param rcBusy The status code to return when we're in RC or R0 and the
449 * section is busy. Pass VINF_SUCCESS to acquired the
450 * critical section thru a ring-3 call if necessary.
451 * @param uId Where we're entering the section.
452 * @param SRC_POS The source position.
453 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
454 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
455 * RTCritSectRwEnterSharedDebug.
456 */
457VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
458{
459 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
460#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
461 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
462#else
463 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
464 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
465#endif
466}
467
468
469/**
470 * Try enter a critical section with shared (read) access.
471 *
472 * @returns VBox status code.
473 * @retval VINF_SUCCESS on success.
474 * @retval VERR_SEM_BUSY if the critsect was owned.
475 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
476 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
477 * during the operation.
478 *
479 * @param pThis Pointer to the read/write critical section.
480 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
481 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
482 * RTCritSectRwTryEnterShared.
483 */
484VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
485{
486#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
487 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
488#else
489 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
490 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
491#endif
492}
493
494
495/**
496 * Try enter a critical section with shared (read) access.
497 *
498 * @returns VBox status code.
499 * @retval VINF_SUCCESS on success.
500 * @retval VERR_SEM_BUSY if the critsect was owned.
501 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
502 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
503 * during the operation.
504 *
505 * @param pThis Pointer to the read/write critical section.
506 * @param uId Where we're entering the section.
507 * @param SRC_POS The source position.
508 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
509 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
510 * RTCritSectRwTryEnterSharedDebug.
511 */
512VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
513{
514 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
515#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
516 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
517#else
518 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
519 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
520#endif
521}
522
523
524#ifdef IN_RING3
525/**
526 * Enters a PDM read/write critical section with shared (read) access.
527 *
528 * @returns VINF_SUCCESS if entered successfully.
529 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
530 * during the operation.
531 *
532 * @param pThis Pointer to the read/write critical section.
533 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
534 */
535VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
536{
537 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
538}
539#endif
540
541
542/**
543 * Leave a critical section held with shared access.
544 *
545 * @returns VBox status code.
546 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
547 * during the operation.
548 * @param pThis Pointer to the read/write critical section.
549 * @param fNoVal No validation records (i.e. queued release).
550 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
551 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
552 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
553 */
554static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
555{
556 /*
557 * Validate handle.
558 */
559 AssertPtr(pThis);
560 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
561
562#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
563 NOREF(fNoVal);
564#endif
565
566 /*
567 * Check the direction and take action accordingly.
568 */
569 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
570 uint64_t u64OldState = u64State;
571 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
572 {
573#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
574 if (fNoVal)
575 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
576 else
577 {
578 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
579 if (RT_FAILURE(rc9))
580 return rc9;
581 }
582#endif
583 for (;;)
584 {
585 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
586 AssertReturn(c > 0, VERR_NOT_OWNER);
587 c--;
588
589 if ( c > 0
590 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
591 {
592 /* Don't change the direction. */
593 u64State &= ~RTCSRW_CNT_RD_MASK;
594 u64State |= c << RTCSRW_CNT_RD_SHIFT;
595 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
596 break;
597 }
598 else
599 {
600#if defined(IN_RING3) || defined(IN_RING0)
601# ifdef IN_RING0
602 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
603 && ASMIntAreEnabled())
604# endif
605 {
606 /* Reverse the direction and signal the writer threads. */
607 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
608 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
609 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
610 {
611 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
612 AssertRC(rc);
613 break;
614 }
615 }
616#endif /* IN_RING3 || IN_RING0 */
617#ifndef IN_RING3
618# ifdef IN_RING0
619 else
620# endif
621 {
622 /* Queue the exit request (ring-3). */
623 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
624 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
625 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
626 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
627 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
628 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
629 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
630 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
631 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
632 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
633 break;
634 }
635#endif
636 }
637
638 ASMNopPause();
639 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
640 u64OldState = u64State;
641 }
642 }
643 else
644 {
645 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
646 RTNATIVETHREAD hNativeWriter;
647 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
648 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
649 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
650#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
651 if (!fNoVal)
652 {
653 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
654 if (RT_FAILURE(rc))
655 return rc;
656 }
657#endif
658 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
659 }
660
661 return VINF_SUCCESS;
662}
663
664/**
665 * Leave a critical section held with shared access.
666 *
667 * @returns VBox status code.
668 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
669 * during the operation.
670 * @param pThis Pointer to the read/write critical section.
671 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
672 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
673 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
674 */
675VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
676{
677 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
678}
679
680
681#if defined(IN_RING3) || defined(IN_RING0)
682/**
683 * PDMCritSectBothFF interface.
684 *
685 * @param pThis Pointer to the read/write critical section.
686 */
687void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
688{
689 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
690}
691#endif
692
693
694/**
695 * Worker that enters a read/write critical section with exclusive access.
696 *
697 * @returns VBox status code.
698 * @param pThis Pointer to the read/write critical section.
699 * @param rcBusy The busy return code for ring-0 and ring-3.
700 * @param fTryOnly Only try enter it, don't wait.
701 * @param pSrcPos The source position. (Can be NULL.)
702 * @param fNoVal No validation records.
703 */
704static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
705{
706 /*
707 * Validate input.
708 */
709 AssertPtr(pThis);
710 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
711
712#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
713 NOREF(pSrcPos);
714 NOREF(fNoVal);
715#endif
716#ifdef IN_RING3
717 NOREF(rcBusy);
718#endif
719
720#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
721 RTTHREAD hThreadSelf = NIL_RTTHREAD;
722 if (!fTryOnly)
723 {
724 hThreadSelf = RTThreadSelfAutoAdopt();
725 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
726 if (RT_FAILURE(rc9))
727 return rc9;
728 }
729#endif
730
731 /*
732 * Check if we're already the owner and just recursing.
733 */
734 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
735 RTNATIVETHREAD hNativeWriter;
736 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
737 if (hNativeSelf == hNativeWriter)
738 {
739 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
740#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
741 if (!fNoVal)
742 {
743 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
744 if (RT_FAILURE(rc9))
745 return rc9;
746 }
747#endif
748 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
749 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
750 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
751 return VINF_SUCCESS;
752 }
753
754 /*
755 * Get cracking.
756 */
757 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
758 uint64_t u64OldState = u64State;
759
760 for (;;)
761 {
762 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
763 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
764 {
765 /* It flows in the right direction, try follow it before it changes. */
766 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
767 c++;
768 Assert(c < RTCSRW_CNT_MASK / 2);
769 u64State &= ~RTCSRW_CNT_WR_MASK;
770 u64State |= c << RTCSRW_CNT_WR_SHIFT;
771 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
772 break;
773 }
774 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
775 {
776 /* Wrong direction, but we're alone here and can simply try switch the direction. */
777 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
778 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
779 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
780 break;
781 }
782 else if (fTryOnly)
783 {
784 /* Wrong direction and we're not supposed to wait, just return. */
785 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
786 return VERR_SEM_BUSY;
787 }
788 else
789 {
790 /* Add ourselves to the write count and break out to do the wait. */
791 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
792 c++;
793 Assert(c < RTCSRW_CNT_MASK / 2);
794 u64State &= ~RTCSRW_CNT_WR_MASK;
795 u64State |= c << RTCSRW_CNT_WR_SHIFT;
796 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
797 break;
798 }
799
800 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
801 return VERR_SEM_DESTROYED;
802
803 ASMNopPause();
804 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
805 u64OldState = u64State;
806 }
807
808 /*
809 * If we're in write mode now try grab the ownership. Play fair if there
810 * are threads already waiting.
811 */
812 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
813#if defined(IN_RING3)
814 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
815 || fTryOnly)
816#endif
817 ;
818 if (fDone)
819 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
820 if (!fDone)
821 {
822 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
823
824#if defined(IN_RING3) || defined(IN_RING0)
825 if ( !fTryOnly
826# ifdef IN_RING0
827 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
828 && ASMIntAreEnabled()
829# endif
830 )
831 {
832
833 /*
834 * Wait for our turn.
835 */
836 for (uint32_t iLoop = 0; ; iLoop++)
837 {
838 int rc;
839# ifdef IN_RING3
840# ifdef PDMCRITSECTRW_STRICT
841 if (hThreadSelf == NIL_RTTHREAD)
842 hThreadSelf = RTThreadSelfAutoAdopt();
843 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
844 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
845 if (RT_SUCCESS(rc))
846# else
847 RTTHREAD hThreadSelf = RTThreadSelf();
848 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
849# endif
850# endif
851 {
852 for (;;)
853 {
854 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
855 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
856 RT_INDEFINITE_WAIT);
857 if ( rc != VERR_INTERRUPTED
858 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
859 break;
860# ifdef IN_RING0
861 pdmR0CritSectRwYieldToRing3(pThis);
862# endif
863 }
864# ifdef IN_RING3
865 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
866# endif
867 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
868 return VERR_SEM_DESTROYED;
869 }
870 if (RT_FAILURE(rc))
871 {
872 /* Decrement the counts and return the error. */
873 for (;;)
874 {
875 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
876 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
877 c--;
878 u64State &= ~RTCSRW_CNT_WR_MASK;
879 u64State |= c << RTCSRW_CNT_WR_SHIFT;
880 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
881 break;
882 }
883 return rc;
884 }
885
886 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
887 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
888 {
889 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
890 if (fDone)
891 break;
892 }
893 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
894 }
895
896 }
897 else
898#endif /* IN_RING3 || IN_RING0 */
899 {
900#ifdef IN_RING3
901 /* TryEnter call - decrement the number of (waiting) writers. */
902#else
903 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
904 ring-3 and do it there or return rcBusy. */
905#endif
906
907 for (;;)
908 {
909 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
910 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
911 c--;
912 u64State &= ~RTCSRW_CNT_WR_MASK;
913 u64State |= c << RTCSRW_CNT_WR_SHIFT;
914 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
915 break;
916 }
917
918#ifdef IN_RING3
919 return VERR_SEM_BUSY;
920#else
921 if (rcBusy == VINF_SUCCESS)
922 {
923 Assert(!fTryOnly);
924 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
925 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
926 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
927 * back to ring-3. Goes for both kind of crit sects. */
928 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
929 }
930 return rcBusy;
931#endif
932 }
933 }
934
935 /*
936 * Got it!
937 */
938 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
939 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
940 Assert(pThis->s.Core.cWriterReads == 0);
941#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
942 if (!fNoVal)
943 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
944#endif
945 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
946 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
947
948 return VINF_SUCCESS;
949}
950
951
952/**
953 * Try enter a critical section with exclusive (write) access.
954 *
955 * @returns VBox status code.
956 * @retval VINF_SUCCESS on success.
957 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
958 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
959 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
960 * during the operation.
961 *
962 * @param pThis Pointer to the read/write critical section.
963 * @param rcBusy The status code to return when we're in RC or R0 and the
964 * section is busy. Pass VINF_SUCCESS to acquired the
965 * critical section thru a ring-3 call if necessary.
966 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
967 * PDMCritSectRwTryEnterExclDebug,
968 * PDMCritSectEnterDebug, PDMCritSectEnter,
969 * RTCritSectRwEnterExcl.
970 */
971VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
972{
973#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
974 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
975#else
976 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
977 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
978#endif
979}
980
981
982/**
983 * Try enter a critical section with exclusive (write) access.
984 *
985 * @returns VBox status code.
986 * @retval VINF_SUCCESS on success.
987 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
988 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
989 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
990 * during the operation.
991 *
992 * @param pThis Pointer to the read/write critical section.
993 * @param rcBusy The status code to return when we're in RC or R0 and the
994 * section is busy. Pass VINF_SUCCESS to acquired the
995 * critical section thru a ring-3 call if necessary.
996 * @param uId Where we're entering the section.
997 * @param SRC_POS The source position.
998 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
999 * PDMCritSectRwTryEnterExclDebug,
1000 * PDMCritSectEnterDebug, PDMCritSectEnter,
1001 * RTCritSectRwEnterExclDebug.
1002 */
1003VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1004{
1005 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1006#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1007 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1008#else
1009 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1010 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1011#endif
1012}
1013
1014
1015/**
1016 * Try enter a critical section with exclusive (write) access.
1017 *
1018 * @retval VINF_SUCCESS on success.
1019 * @retval VERR_SEM_BUSY if the critsect was owned.
1020 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1021 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1022 * during the operation.
1023 *
1024 * @param pThis Pointer to the read/write critical section.
1025 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1026 * PDMCritSectRwEnterExclDebug,
1027 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1028 * RTCritSectRwTryEnterExcl.
1029 */
1030VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
1031{
1032#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1033 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1034#else
1035 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1036 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1037#endif
1038}
1039
1040
1041/**
1042 * Try enter a critical section with exclusive (write) access.
1043 *
1044 * @retval VINF_SUCCESS on success.
1045 * @retval VERR_SEM_BUSY if the critsect was owned.
1046 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1047 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1048 * during the operation.
1049 *
1050 * @param pThis Pointer to the read/write critical section.
1051 * @param uId Where we're entering the section.
1052 * @param SRC_POS The source position.
1053 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1054 * PDMCritSectRwEnterExclDebug,
1055 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1056 * RTCritSectRwTryEnterExclDebug.
1057 */
1058VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1059{
1060 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1061#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1062 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1063#else
1064 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1065 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1066#endif
1067}
1068
1069
1070#ifdef IN_RING3
1071/**
1072 * Enters a PDM read/write critical section with exclusive (write) access.
1073 *
1074 * @returns VINF_SUCCESS if entered successfully.
1075 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1076 * during the operation.
1077 *
1078 * @param pThis Pointer to the read/write critical section.
1079 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1080 */
1081VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1082{
1083 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1084}
1085#endif /* IN_RING3 */
1086
1087
1088/**
1089 * Leave a critical section held exclusively.
1090 *
1091 * @returns VBox status code.
1092 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1093 * during the operation.
1094 * @param pThis Pointer to the read/write critical section.
1095 * @param fNoVal No validation records (i.e. queued release).
1096 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1097 */
1098static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1099{
1100 /*
1101 * Validate handle.
1102 */
1103 AssertPtr(pThis);
1104 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1105
1106#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1107 NOREF(fNoVal);
1108#endif
1109
1110 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1111 RTNATIVETHREAD hNativeWriter;
1112 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1113 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1114
1115 /*
1116 * Unwind one recursion. Is it the final one?
1117 */
1118 if (pThis->s.Core.cWriteRecursions == 1)
1119 {
1120 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1121#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1122 if (fNoVal)
1123 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1124 else
1125 {
1126 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1127 if (RT_FAILURE(rc9))
1128 return rc9;
1129 }
1130#endif
1131 /*
1132 * Update the state.
1133 */
1134#if defined(IN_RING3) || defined(IN_RING0)
1135# ifdef IN_RING0
1136 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1137 && ASMIntAreEnabled())
1138# endif
1139 {
1140 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1141 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1142 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1143
1144 for (;;)
1145 {
1146 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1147 uint64_t u64OldState = u64State;
1148
1149 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1150 Assert(c > 0);
1151 c--;
1152
1153 if ( c > 0
1154 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1155 {
1156 /* Don't change the direction, wake up the next writer if any. */
1157 u64State &= ~RTCSRW_CNT_WR_MASK;
1158 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1159 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1160 {
1161 if (c > 0)
1162 {
1163 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1164 AssertRC(rc);
1165 }
1166 break;
1167 }
1168 }
1169 else
1170 {
1171 /* Reverse the direction and signal the reader threads. */
1172 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1173 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1174 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1175 {
1176 Assert(!pThis->s.Core.fNeedReset);
1177 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1178 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1179 AssertRC(rc);
1180 break;
1181 }
1182 }
1183
1184 ASMNopPause();
1185 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1186 return VERR_SEM_DESTROYED;
1187 }
1188 }
1189#endif /* IN_RING3 || IN_RING0 */
1190#ifndef IN_RING3
1191# ifdef IN_RING0
1192 else
1193# endif
1194 {
1195 /*
1196 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1197 * so queue the exit request (ring-3).
1198 */
1199 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1200 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1201 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1202 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1203 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1204 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1205 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1206 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1207 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1208 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1209 }
1210#endif
1211 }
1212 else
1213 {
1214 /*
1215 * Not the final recursion.
1216 */
1217 Assert(pThis->s.Core.cWriteRecursions != 0);
1218#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1219 if (fNoVal)
1220 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1221 else
1222 {
1223 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1224 if (RT_FAILURE(rc9))
1225 return rc9;
1226 }
1227#endif
1228 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1229 }
1230
1231 return VINF_SUCCESS;
1232}
1233
1234
1235/**
1236 * Leave a critical section held exclusively.
1237 *
1238 * @returns VBox status code.
1239 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1240 * during the operation.
1241 * @param pThis Pointer to the read/write critical section.
1242 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1243 */
1244VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1245{
1246 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1247}
1248
1249
1250#if defined(IN_RING3) || defined(IN_RING0)
1251/**
1252 * PDMCritSectBothFF interface.
1253 *
1254 * @param pThis Pointer to the read/write critical section.
1255 */
1256void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1257{
1258 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1259}
1260#endif
1261
1262
1263/**
1264 * Checks the caller is the exclusive (write) owner of the critical section.
1265 *
1266 * @retval true if owner.
1267 * @retval false if not owner.
1268 * @param pThis Pointer to the read/write critical section.
1269 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1270 * RTCritSectRwIsWriteOwner.
1271 */
1272VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1273{
1274 /*
1275 * Validate handle.
1276 */
1277 AssertPtr(pThis);
1278 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1279
1280 /*
1281 * Check ownership.
1282 */
1283 RTNATIVETHREAD hNativeWriter;
1284 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1285 if (hNativeWriter == NIL_RTNATIVETHREAD)
1286 return false;
1287 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1288}
1289
1290
1291/**
1292 * Checks if the caller is one of the read owners of the critical section.
1293 *
1294 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1295 * enabled. Meaning, the answer is not trustworhty unless
1296 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1297 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1298 * creating the semaphore. And finally, if you used a locking class,
1299 * don't disable deadlock detection by setting cMsMinDeadlock to
1300 * RT_INDEFINITE_WAIT.
1301 *
1302 * In short, only use this for assertions.
1303 *
1304 * @returns @c true if reader, @c false if not.
1305 * @param pThis Pointer to the read/write critical section.
1306 * @param fWannaHear What you'd like to hear when lock validation is not
1307 * available. (For avoiding asserting all over the place.)
1308 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1309 */
1310VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1311{
1312 /*
1313 * Validate handle.
1314 */
1315 AssertPtr(pThis);
1316 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1317
1318 /*
1319 * Inspect the state.
1320 */
1321 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1322 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1323 {
1324 /*
1325 * It's in write mode, so we can only be a reader if we're also the
1326 * current writer.
1327 */
1328 RTNATIVETHREAD hWriter;
1329 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1330 if (hWriter == NIL_RTNATIVETHREAD)
1331 return false;
1332 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1333 }
1334
1335 /*
1336 * Read mode. If there are no current readers, then we cannot be a reader.
1337 */
1338 if (!(u64State & RTCSRW_CNT_RD_MASK))
1339 return false;
1340
1341#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1342 /*
1343 * Ask the lock validator.
1344 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1345 */
1346 NOREF(fWannaHear);
1347 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1348#else
1349 /*
1350 * Ok, we don't know, just tell the caller what he want to hear.
1351 */
1352 return fWannaHear;
1353#endif
1354}
1355
1356
1357/**
1358 * Gets the write recursion count.
1359 *
1360 * @returns The write recursion count (0 if bad critsect).
1361 * @param pThis Pointer to the read/write critical section.
1362 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1363 * RTCritSectRwGetWriteRecursion.
1364 */
1365VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1366{
1367 /*
1368 * Validate handle.
1369 */
1370 AssertPtr(pThis);
1371 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1372
1373 /*
1374 * Return the requested data.
1375 */
1376 return pThis->s.Core.cWriteRecursions;
1377}
1378
1379
1380/**
1381 * Gets the read recursion count of the current writer.
1382 *
1383 * @returns The read recursion count (0 if bad critsect).
1384 * @param pThis Pointer to the read/write critical section.
1385 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1386 * RTCritSectRwGetWriterReadRecursion.
1387 */
1388VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1389{
1390 /*
1391 * Validate handle.
1392 */
1393 AssertPtr(pThis);
1394 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1395
1396 /*
1397 * Return the requested data.
1398 */
1399 return pThis->s.Core.cWriterReads;
1400}
1401
1402
1403/**
1404 * Gets the current number of reads.
1405 *
1406 * This includes all read recursions, so it might be higher than the number of
1407 * read owners. It does not include reads done by the current writer.
1408 *
1409 * @returns The read count (0 if bad critsect).
1410 * @param pThis Pointer to the read/write critical section.
1411 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1412 * RTCritSectRwGetReadCount.
1413 */
1414VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1415{
1416 /*
1417 * Validate input.
1418 */
1419 AssertPtr(pThis);
1420 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1421
1422 /*
1423 * Return the requested data.
1424 */
1425 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1426 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1427 return 0;
1428 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1429}
1430
1431
1432/**
1433 * Checks if the read/write critical section is initialized or not.
1434 *
1435 * @retval true if initialized.
1436 * @retval false if not initialized.
1437 * @param pThis Pointer to the read/write critical section.
1438 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1439 */
1440VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1441{
1442 AssertPtr(pThis);
1443 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1444}
1445
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette