VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 61683

Last change on this file since 61683 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.3 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125#ifdef IN_RING0
126/**
127 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
128 *
129 * @param pThis Pointer to the read/write critical section.
130 */
131static void pdmR0CritSectRwYieldToRing3(PPDMCRITSECTRW pThis)
132{
133 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
134 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
135 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
136 AssertRC(rc);
137}
138#endif /* IN_RING0 */
139
140
141/**
142 * Worker that enters a read/write critical section with shard access.
143 *
144 * @returns VBox status code.
145 * @param pThis Pointer to the read/write critical section.
146 * @param rcBusy The busy return code for ring-0 and ring-3.
147 * @param fTryOnly Only try enter it, don't wait.
148 * @param pSrcPos The source position. (Can be NULL.)
149 * @param fNoVal No validation records.
150 */
151static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
152{
153 /*
154 * Validate input.
155 */
156 AssertPtr(pThis);
157 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
158
159#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
160 NOREF(pSrcPos);
161 NOREF(fNoVal);
162#endif
163
164#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
165 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
166 if (!fTryOnly)
167 {
168 int rc9;
169 RTNATIVETHREAD hNativeWriter;
170 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
171 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
172 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
173 else
174 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
175 if (RT_FAILURE(rc9))
176 return rc9;
177 }
178#endif
179
180 /*
181 * Get cracking...
182 */
183 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
184 uint64_t u64OldState = u64State;
185
186 for (;;)
187 {
188 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
189 {
190 /* It flows in the right direction, try follow it before it changes. */
191 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
192 c++;
193 Assert(c < RTCSRW_CNT_MASK / 2);
194 u64State &= ~RTCSRW_CNT_RD_MASK;
195 u64State |= c << RTCSRW_CNT_RD_SHIFT;
196 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
197 {
198#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
199 if (!fNoVal)
200 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
201#endif
202 break;
203 }
204 }
205 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
206 {
207 /* Wrong direction, but we're alone here and can simply try switch the direction. */
208 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
209 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
210 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
211 {
212 Assert(!pThis->s.Core.fNeedReset);
213#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
214 if (!fNoVal)
215 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
216#endif
217 break;
218 }
219 }
220 else
221 {
222 /* Is the writer perhaps doing a read recursion? */
223 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
224 RTNATIVETHREAD hNativeWriter;
225 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
226 if (hNativeSelf == hNativeWriter)
227 {
228#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
229 if (!fNoVal)
230 {
231 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
232 if (RT_FAILURE(rc9))
233 return rc9;
234 }
235#endif
236 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
237 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
238 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
239 return VINF_SUCCESS; /* don't break! */
240 }
241
242 /*
243 * If we're only trying, return already.
244 */
245 if (fTryOnly)
246 {
247 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
248 return VERR_SEM_BUSY;
249 }
250
251#if defined(IN_RING3) || defined(IN_RING0)
252# ifdef IN_RING0
253 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
254 && ASMIntAreEnabled())
255# endif
256 {
257 /*
258 * Add ourselves to the queue and wait for the direction to change.
259 */
260 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
261 c++;
262 Assert(c < RTCSRW_CNT_MASK / 2);
263
264 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
265 cWait++;
266 Assert(cWait <= c);
267 Assert(cWait < RTCSRW_CNT_MASK / 2);
268
269 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
270 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
271
272 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
273 {
274 for (uint32_t iLoop = 0; ; iLoop++)
275 {
276 int rc;
277# ifdef IN_RING3
278# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
279 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
280 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
281 if (RT_SUCCESS(rc))
282# else
283 RTTHREAD hThreadSelf = RTThreadSelf();
284 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
285# endif
286# endif
287 {
288 for (;;)
289 {
290 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
291 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
292 RT_INDEFINITE_WAIT);
293 if ( rc != VERR_INTERRUPTED
294 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
295 break;
296# ifdef IN_RING0
297 pdmR0CritSectRwYieldToRing3(pThis);
298# endif
299 }
300# ifdef IN_RING3
301 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
302# endif
303 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
304 return VERR_SEM_DESTROYED;
305 }
306 if (RT_FAILURE(rc))
307 {
308 /* Decrement the counts and return the error. */
309 for (;;)
310 {
311 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
312 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
313 c--;
314 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
315 cWait--;
316 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
317 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
318 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
319 break;
320 }
321 return rc;
322 }
323
324 Assert(pThis->s.Core.fNeedReset);
325 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
326 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
327 break;
328 AssertMsg(iLoop < 1, ("%u\n", iLoop));
329 }
330
331 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
332 for (;;)
333 {
334 u64OldState = u64State;
335
336 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
337 Assert(cWait > 0);
338 cWait--;
339 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
340 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
341
342 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
343 {
344 if (cWait == 0)
345 {
346 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
347 {
348 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
349 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
350 AssertRCReturn(rc, rc);
351 }
352 }
353 break;
354 }
355 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
356 }
357
358# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
359 if (!fNoVal)
360 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
361# endif
362 break;
363 }
364 }
365#endif /* IN_RING3 || IN_RING3 */
366#ifndef IN_RING3
367# ifdef IN_RING0
368 else
369# endif
370 {
371 /*
372 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
373 * back to ring-3 and do it there or return rcBusy.
374 */
375 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
376 if (rcBusy == VINF_SUCCESS)
377 {
378 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
379 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
380 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
381 * back to ring-3. Goes for both kind of crit sects. */
382 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
383 }
384 return rcBusy;
385 }
386#endif /* !IN_RING3 */
387 }
388
389 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
390 return VERR_SEM_DESTROYED;
391
392 ASMNopPause();
393 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
394 u64OldState = u64State;
395 }
396
397 /* got it! */
398 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
399 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
400 return VINF_SUCCESS;
401
402}
403
404
405/**
406 * Enter a critical section with shared (read) access.
407 *
408 * @returns VBox status code.
409 * @retval VINF_SUCCESS on success.
410 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
411 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
412 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
413 * during the operation.
414 *
415 * @param pThis Pointer to the read/write critical section.
416 * @param rcBusy The status code to return when we're in RC or R0 and the
417 * section is busy. Pass VINF_SUCCESS to acquired the
418 * critical section thru a ring-3 call if necessary.
419 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
420 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
421 * RTCritSectRwEnterShared.
422 */
423VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
424{
425#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
426 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
427#else
428 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
429 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
430#endif
431}
432
433
434/**
435 * Enter a critical section with shared (read) access.
436 *
437 * @returns VBox status code.
438 * @retval VINF_SUCCESS on success.
439 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
440 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
441 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
442 * during the operation.
443 *
444 * @param pThis Pointer to the read/write critical section.
445 * @param rcBusy The status code to return when we're in RC or R0 and the
446 * section is busy. Pass VINF_SUCCESS to acquired the
447 * critical section thru a ring-3 call if necessary.
448 * @param uId Where we're entering the section.
449 * @param SRC_POS The source position.
450 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
451 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
452 * RTCritSectRwEnterSharedDebug.
453 */
454VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
455{
456 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
457#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
458 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
459#else
460 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
461 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
462#endif
463}
464
465
466/**
467 * Try enter a critical section with shared (read) access.
468 *
469 * @returns VBox status code.
470 * @retval VINF_SUCCESS on success.
471 * @retval VERR_SEM_BUSY if the critsect was owned.
472 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
473 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
474 * during the operation.
475 *
476 * @param pThis Pointer to the read/write critical section.
477 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
478 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
479 * RTCritSectRwTryEnterShared.
480 */
481VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
482{
483#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
484 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
485#else
486 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
487 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
488#endif
489}
490
491
492/**
493 * Try enter a critical section with shared (read) access.
494 *
495 * @returns VBox status code.
496 * @retval VINF_SUCCESS on success.
497 * @retval VERR_SEM_BUSY if the critsect was owned.
498 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
499 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
500 * during the operation.
501 *
502 * @param pThis Pointer to the read/write critical section.
503 * @param uId Where we're entering the section.
504 * @param SRC_POS The source position.
505 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
506 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
507 * RTCritSectRwTryEnterSharedDebug.
508 */
509VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
510{
511 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
512#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
513 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
514#else
515 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
516 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
517#endif
518}
519
520
521#ifdef IN_RING3
522/**
523 * Enters a PDM read/write critical section with shared (read) access.
524 *
525 * @returns VINF_SUCCESS if entered successfully.
526 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
527 * during the operation.
528 *
529 * @param pThis Pointer to the read/write critical section.
530 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
531 */
532VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
533{
534 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
535}
536#endif
537
538
539/**
540 * Leave a critical section held with shared access.
541 *
542 * @returns VBox status code.
543 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
544 * during the operation.
545 * @param pThis Pointer to the read/write critical section.
546 * @param fNoVal No validation records (i.e. queued release).
547 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
548 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
549 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
550 */
551static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
552{
553 /*
554 * Validate handle.
555 */
556 AssertPtr(pThis);
557 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
558
559#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
560 NOREF(fNoVal);
561#endif
562
563 /*
564 * Check the direction and take action accordingly.
565 */
566 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
567 uint64_t u64OldState = u64State;
568 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
569 {
570#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
571 if (fNoVal)
572 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
573 else
574 {
575 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
576 if (RT_FAILURE(rc9))
577 return rc9;
578 }
579#endif
580 for (;;)
581 {
582 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
583 AssertReturn(c > 0, VERR_NOT_OWNER);
584 c--;
585
586 if ( c > 0
587 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
588 {
589 /* Don't change the direction. */
590 u64State &= ~RTCSRW_CNT_RD_MASK;
591 u64State |= c << RTCSRW_CNT_RD_SHIFT;
592 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
593 break;
594 }
595 else
596 {
597#if defined(IN_RING3) || defined(IN_RING0)
598# ifdef IN_RING0
599 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
600 && ASMIntAreEnabled())
601# endif
602 {
603 /* Reverse the direction and signal the writer threads. */
604 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
605 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
606 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
607 {
608 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
609 AssertRC(rc);
610 break;
611 }
612 }
613#endif /* IN_RING3 || IN_RING0 */
614#ifndef IN_RING3
615# ifdef IN_RING0
616 else
617# endif
618 {
619 /* Queue the exit request (ring-3). */
620 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
621 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
622 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
623 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
624 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
625 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
626 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
627 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
628 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
629 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
630 break;
631 }
632#endif
633 }
634
635 ASMNopPause();
636 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
637 u64OldState = u64State;
638 }
639 }
640 else
641 {
642 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
643 RTNATIVETHREAD hNativeWriter;
644 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
645 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
646 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
647#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
648 if (!fNoVal)
649 {
650 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
651 if (RT_FAILURE(rc))
652 return rc;
653 }
654#endif
655 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
656 }
657
658 return VINF_SUCCESS;
659}
660
661/**
662 * Leave a critical section held with shared access.
663 *
664 * @returns VBox status code.
665 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
666 * during the operation.
667 * @param pThis Pointer to the read/write critical section.
668 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
669 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
670 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
671 */
672VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
673{
674 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
675}
676
677
678#if defined(IN_RING3) || defined(IN_RING0)
679/**
680 * PDMCritSectBothFF interface.
681 *
682 * @param pThis Pointer to the read/write critical section.
683 */
684void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
685{
686 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
687}
688#endif
689
690
691/**
692 * Worker that enters a read/write critical section with exclusive access.
693 *
694 * @returns VBox status code.
695 * @param pThis Pointer to the read/write critical section.
696 * @param rcBusy The busy return code for ring-0 and ring-3.
697 * @param fTryOnly Only try enter it, don't wait.
698 * @param pSrcPos The source position. (Can be NULL.)
699 * @param fNoVal No validation records.
700 */
701static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
702{
703 /*
704 * Validate input.
705 */
706 AssertPtr(pThis);
707 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
708
709#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
710 NOREF(pSrcPos);
711 NOREF(fNoVal);
712#endif
713
714#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
715 RTTHREAD hThreadSelf = NIL_RTTHREAD;
716 if (!fTryOnly)
717 {
718 hThreadSelf = RTThreadSelfAutoAdopt();
719 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
720 if (RT_FAILURE(rc9))
721 return rc9;
722 }
723#endif
724
725 /*
726 * Check if we're already the owner and just recursing.
727 */
728 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
729 RTNATIVETHREAD hNativeWriter;
730 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
731 if (hNativeSelf == hNativeWriter)
732 {
733 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
734#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
735 if (!fNoVal)
736 {
737 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
738 if (RT_FAILURE(rc9))
739 return rc9;
740 }
741#endif
742 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
743 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
744 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
745 return VINF_SUCCESS;
746 }
747
748 /*
749 * Get cracking.
750 */
751 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
752 uint64_t u64OldState = u64State;
753
754 for (;;)
755 {
756 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
757 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
758 {
759 /* It flows in the right direction, try follow it before it changes. */
760 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
761 c++;
762 Assert(c < RTCSRW_CNT_MASK / 2);
763 u64State &= ~RTCSRW_CNT_WR_MASK;
764 u64State |= c << RTCSRW_CNT_WR_SHIFT;
765 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
766 break;
767 }
768 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
769 {
770 /* Wrong direction, but we're alone here and can simply try switch the direction. */
771 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
772 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
773 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
774 break;
775 }
776 else if (fTryOnly)
777 {
778 /* Wrong direction and we're not supposed to wait, just return. */
779 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
780 return VERR_SEM_BUSY;
781 }
782 else
783 {
784 /* Add ourselves to the write count and break out to do the wait. */
785 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
786 c++;
787 Assert(c < RTCSRW_CNT_MASK / 2);
788 u64State &= ~RTCSRW_CNT_WR_MASK;
789 u64State |= c << RTCSRW_CNT_WR_SHIFT;
790 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
791 break;
792 }
793
794 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
795 return VERR_SEM_DESTROYED;
796
797 ASMNopPause();
798 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
799 u64OldState = u64State;
800 }
801
802 /*
803 * If we're in write mode now try grab the ownership. Play fair if there
804 * are threads already waiting.
805 */
806 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
807#if defined(IN_RING3)
808 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
809 || fTryOnly)
810#endif
811 ;
812 if (fDone)
813 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
814 if (!fDone)
815 {
816 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
817
818#if defined(IN_RING3) || defined(IN_RING0)
819 if ( !fTryOnly
820# ifdef IN_RING0
821 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
822 && ASMIntAreEnabled()
823# endif
824 )
825 {
826
827 /*
828 * Wait for our turn.
829 */
830 for (uint32_t iLoop = 0; ; iLoop++)
831 {
832 int rc;
833# ifdef IN_RING3
834# ifdef PDMCRITSECTRW_STRICT
835 if (hThreadSelf == NIL_RTTHREAD)
836 hThreadSelf = RTThreadSelfAutoAdopt();
837 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
838 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
839 if (RT_SUCCESS(rc))
840# else
841 RTTHREAD hThreadSelf = RTThreadSelf();
842 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
843# endif
844# endif
845 {
846 for (;;)
847 {
848 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
849 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
850 RT_INDEFINITE_WAIT);
851 if ( rc != VERR_INTERRUPTED
852 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
853 break;
854# ifdef IN_RING0
855 pdmR0CritSectRwYieldToRing3(pThis);
856# endif
857 }
858# ifdef IN_RING3
859 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
860# endif
861 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
862 return VERR_SEM_DESTROYED;
863 }
864 if (RT_FAILURE(rc))
865 {
866 /* Decrement the counts and return the error. */
867 for (;;)
868 {
869 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
870 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
871 c--;
872 u64State &= ~RTCSRW_CNT_WR_MASK;
873 u64State |= c << RTCSRW_CNT_WR_SHIFT;
874 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
875 break;
876 }
877 return rc;
878 }
879
880 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
881 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
882 {
883 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
884 if (fDone)
885 break;
886 }
887 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
888 }
889
890 }
891 else
892#endif /* IN_RING3 || IN_RING0 */
893 {
894#ifdef IN_RING3
895 /* TryEnter call - decrement the number of (waiting) writers. */
896#else
897 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
898 ring-3 and do it there or return rcBusy. */
899#endif
900
901 for (;;)
902 {
903 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
904 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
905 c--;
906 u64State &= ~RTCSRW_CNT_WR_MASK;
907 u64State |= c << RTCSRW_CNT_WR_SHIFT;
908 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
909 break;
910 }
911
912#ifdef IN_RING3
913 return VERR_SEM_BUSY;
914#else
915 if (rcBusy == VINF_SUCCESS)
916 {
917 Assert(!fTryOnly);
918 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
919 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
920 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
921 * back to ring-3. Goes for both kind of crit sects. */
922 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
923 }
924 return rcBusy;
925#endif
926 }
927 }
928
929 /*
930 * Got it!
931 */
932 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
933 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
934 Assert(pThis->s.Core.cWriterReads == 0);
935#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
936 if (!fNoVal)
937 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
938#endif
939 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
940 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
941
942 return VINF_SUCCESS;
943}
944
945
946/**
947 * Try enter a critical section with exclusive (write) access.
948 *
949 * @returns VBox status code.
950 * @retval VINF_SUCCESS on success.
951 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
952 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
953 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
954 * during the operation.
955 *
956 * @param pThis Pointer to the read/write critical section.
957 * @param rcBusy The status code to return when we're in RC or R0 and the
958 * section is busy. Pass VINF_SUCCESS to acquired the
959 * critical section thru a ring-3 call if necessary.
960 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
961 * PDMCritSectRwTryEnterExclDebug,
962 * PDMCritSectEnterDebug, PDMCritSectEnter,
963 * RTCritSectRwEnterExcl.
964 */
965VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
966{
967#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
968 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
969#else
970 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
971 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
972#endif
973}
974
975
976/**
977 * Try enter a critical section with exclusive (write) access.
978 *
979 * @returns VBox status code.
980 * @retval VINF_SUCCESS on success.
981 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
982 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
983 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
984 * during the operation.
985 *
986 * @param pThis Pointer to the read/write critical section.
987 * @param rcBusy The status code to return when we're in RC or R0 and the
988 * section is busy. Pass VINF_SUCCESS to acquired the
989 * critical section thru a ring-3 call if necessary.
990 * @param uId Where we're entering the section.
991 * @param SRC_POS The source position.
992 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
993 * PDMCritSectRwTryEnterExclDebug,
994 * PDMCritSectEnterDebug, PDMCritSectEnter,
995 * RTCritSectRwEnterExclDebug.
996 */
997VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
998{
999 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1000#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1001 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1002#else
1003 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1004 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1005#endif
1006}
1007
1008
1009/**
1010 * Try enter a critical section with exclusive (write) access.
1011 *
1012 * @retval VINF_SUCCESS on success.
1013 * @retval VERR_SEM_BUSY if the critsect was owned.
1014 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1015 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1016 * during the operation.
1017 *
1018 * @param pThis Pointer to the read/write critical section.
1019 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1020 * PDMCritSectRwEnterExclDebug,
1021 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1022 * RTCritSectRwTryEnterExcl.
1023 */
1024VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
1025{
1026#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1027 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1028#else
1029 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1030 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1031#endif
1032}
1033
1034
1035/**
1036 * Try enter a critical section with exclusive (write) access.
1037 *
1038 * @retval VINF_SUCCESS on success.
1039 * @retval VERR_SEM_BUSY if the critsect was owned.
1040 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1041 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1042 * during the operation.
1043 *
1044 * @param pThis Pointer to the read/write critical section.
1045 * @param uId Where we're entering the section.
1046 * @param SRC_POS The source position.
1047 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1048 * PDMCritSectRwEnterExclDebug,
1049 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1050 * RTCritSectRwTryEnterExclDebug.
1051 */
1052VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1053{
1054 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1055#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1056 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1057#else
1058 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1059 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1060#endif
1061}
1062
1063
1064#ifdef IN_RING3
1065/**
1066 * Enters a PDM read/write critical section with exclusive (write) access.
1067 *
1068 * @returns VINF_SUCCESS if entered successfully.
1069 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1070 * during the operation.
1071 *
1072 * @param pThis Pointer to the read/write critical section.
1073 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1074 */
1075VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1076{
1077 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1078}
1079#endif /* IN_RING3 */
1080
1081
1082/**
1083 * Leave a critical section held exclusively.
1084 *
1085 * @returns VBox status code.
1086 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1087 * during the operation.
1088 * @param pThis Pointer to the read/write critical section.
1089 * @param fNoVal No validation records (i.e. queued release).
1090 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1091 */
1092static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1093{
1094 /*
1095 * Validate handle.
1096 */
1097 AssertPtr(pThis);
1098 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1099
1100#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1101 NOREF(fNoVal);
1102#endif
1103
1104 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1105 RTNATIVETHREAD hNativeWriter;
1106 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1107 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1108
1109 /*
1110 * Unwind one recursion. Is it the final one?
1111 */
1112 if (pThis->s.Core.cWriteRecursions == 1)
1113 {
1114 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1115#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1116 if (fNoVal)
1117 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1118 else
1119 {
1120 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1121 if (RT_FAILURE(rc9))
1122 return rc9;
1123 }
1124#endif
1125 /*
1126 * Update the state.
1127 */
1128#if defined(IN_RING3) || defined(IN_RING0)
1129# ifdef IN_RING0
1130 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1131 && ASMIntAreEnabled())
1132# endif
1133 {
1134 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1135 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1136 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1137
1138 for (;;)
1139 {
1140 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1141 uint64_t u64OldState = u64State;
1142
1143 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1144 Assert(c > 0);
1145 c--;
1146
1147 if ( c > 0
1148 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1149 {
1150 /* Don't change the direction, wake up the next writer if any. */
1151 u64State &= ~RTCSRW_CNT_WR_MASK;
1152 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1153 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1154 {
1155 if (c > 0)
1156 {
1157 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1158 AssertRC(rc);
1159 }
1160 break;
1161 }
1162 }
1163 else
1164 {
1165 /* Reverse the direction and signal the reader threads. */
1166 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1167 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1168 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1169 {
1170 Assert(!pThis->s.Core.fNeedReset);
1171 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1172 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1173 AssertRC(rc);
1174 break;
1175 }
1176 }
1177
1178 ASMNopPause();
1179 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1180 return VERR_SEM_DESTROYED;
1181 }
1182 }
1183#endif /* IN_RING3 || IN_RING0 */
1184#ifndef IN_RING3
1185# ifdef IN_RING0
1186 else
1187# endif
1188 {
1189 /*
1190 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1191 * so queue the exit request (ring-3).
1192 */
1193 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1194 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1195 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1196 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1197 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1198 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1199 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1200 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1201 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1202 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1203 }
1204#endif
1205 }
1206 else
1207 {
1208 /*
1209 * Not the final recursion.
1210 */
1211 Assert(pThis->s.Core.cWriteRecursions != 0);
1212#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1213 if (fNoVal)
1214 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1215 else
1216 {
1217 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1218 if (RT_FAILURE(rc9))
1219 return rc9;
1220 }
1221#endif
1222 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1223 }
1224
1225 return VINF_SUCCESS;
1226}
1227
1228
1229/**
1230 * Leave a critical section held exclusively.
1231 *
1232 * @returns VBox status code.
1233 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1234 * during the operation.
1235 * @param pThis Pointer to the read/write critical section.
1236 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1237 */
1238VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1239{
1240 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1241}
1242
1243
1244#if defined(IN_RING3) || defined(IN_RING0)
1245/**
1246 * PDMCritSectBothFF interface.
1247 *
1248 * @param pThis Pointer to the read/write critical section.
1249 */
1250void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1251{
1252 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1253}
1254#endif
1255
1256
1257/**
1258 * Checks the caller is the exclusive (write) owner of the critical section.
1259 *
1260 * @retval true if owner.
1261 * @retval false if not owner.
1262 * @param pThis Pointer to the read/write critical section.
1263 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1264 * RTCritSectRwIsWriteOwner.
1265 */
1266VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1267{
1268 /*
1269 * Validate handle.
1270 */
1271 AssertPtr(pThis);
1272 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1273
1274 /*
1275 * Check ownership.
1276 */
1277 RTNATIVETHREAD hNativeWriter;
1278 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1279 if (hNativeWriter == NIL_RTNATIVETHREAD)
1280 return false;
1281 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1282}
1283
1284
1285/**
1286 * Checks if the caller is one of the read owners of the critical section.
1287 *
1288 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1289 * enabled. Meaning, the answer is not trustworhty unless
1290 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1291 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1292 * creating the semaphore. And finally, if you used a locking class,
1293 * don't disable deadlock detection by setting cMsMinDeadlock to
1294 * RT_INDEFINITE_WAIT.
1295 *
1296 * In short, only use this for assertions.
1297 *
1298 * @returns @c true if reader, @c false if not.
1299 * @param pThis Pointer to the read/write critical section.
1300 * @param fWannaHear What you'd like to hear when lock validation is not
1301 * available. (For avoiding asserting all over the place.)
1302 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1303 */
1304VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1305{
1306 /*
1307 * Validate handle.
1308 */
1309 AssertPtr(pThis);
1310 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1311
1312 /*
1313 * Inspect the state.
1314 */
1315 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1316 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1317 {
1318 /*
1319 * It's in write mode, so we can only be a reader if we're also the
1320 * current writer.
1321 */
1322 RTNATIVETHREAD hWriter;
1323 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1324 if (hWriter == NIL_RTNATIVETHREAD)
1325 return false;
1326 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1327 }
1328
1329 /*
1330 * Read mode. If there are no current readers, then we cannot be a reader.
1331 */
1332 if (!(u64State & RTCSRW_CNT_RD_MASK))
1333 return false;
1334
1335#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1336 /*
1337 * Ask the lock validator.
1338 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1339 */
1340 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1341#else
1342 /*
1343 * Ok, we don't know, just tell the caller what he want to hear.
1344 */
1345 return fWannaHear;
1346#endif
1347}
1348
1349
1350/**
1351 * Gets the write recursion count.
1352 *
1353 * @returns The write recursion count (0 if bad critsect).
1354 * @param pThis Pointer to the read/write critical section.
1355 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1356 * RTCritSectRwGetWriteRecursion.
1357 */
1358VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1359{
1360 /*
1361 * Validate handle.
1362 */
1363 AssertPtr(pThis);
1364 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1365
1366 /*
1367 * Return the requested data.
1368 */
1369 return pThis->s.Core.cWriteRecursions;
1370}
1371
1372
1373/**
1374 * Gets the read recursion count of the current writer.
1375 *
1376 * @returns The read recursion count (0 if bad critsect).
1377 * @param pThis Pointer to the read/write critical section.
1378 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1379 * RTCritSectRwGetWriterReadRecursion.
1380 */
1381VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1382{
1383 /*
1384 * Validate handle.
1385 */
1386 AssertPtr(pThis);
1387 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1388
1389 /*
1390 * Return the requested data.
1391 */
1392 return pThis->s.Core.cWriterReads;
1393}
1394
1395
1396/**
1397 * Gets the current number of reads.
1398 *
1399 * This includes all read recursions, so it might be higher than the number of
1400 * read owners. It does not include reads done by the current writer.
1401 *
1402 * @returns The read count (0 if bad critsect).
1403 * @param pThis Pointer to the read/write critical section.
1404 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1405 * RTCritSectRwGetReadCount.
1406 */
1407VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1408{
1409 /*
1410 * Validate input.
1411 */
1412 AssertPtr(pThis);
1413 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1414
1415 /*
1416 * Return the requested data.
1417 */
1418 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1419 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1420 return 0;
1421 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1422}
1423
1424
1425/**
1426 * Checks if the read/write critical section is initialized or not.
1427 *
1428 * @retval true if initialized.
1429 * @retval false if not initialized.
1430 * @param pThis Pointer to the read/write critical section.
1431 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1432 */
1433VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1434{
1435 AssertPtr(pThis);
1436 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1437}
1438
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette