VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 57952

Last change on this file since 57952 was 57851, checked in by vboxsync, 9 years ago

VMM: unused parameter warning fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 55.2 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 57851 2015-09-22 13:10:34Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125#ifdef IN_RING0
126/**
127 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
128 *
129 * @param pThis Pointer to the read/write critical section.
130 */
131static void pdmR0CritSectRwYieldToRing3(PPDMCRITSECTRW pThis)
132{
133 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
134 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
135 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
136 AssertRC(rc);
137}
138#endif /* IN_RING0 */
139
140
141/**
142 * Worker that enters a read/write critical section with shard access.
143 *
144 * @returns VBox status code.
145 * @param pThis Pointer to the read/write critical section.
146 * @param rcBusy The busy return code for ring-0 and ring-3.
147 * @param fTryOnly Only try enter it, don't wait.
148 * @param pSrcPos The source position. (Can be NULL.)
149 * @param fNoVal No validation records.
150 */
151static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
152{
153 /*
154 * Validate input.
155 */
156 AssertPtr(pThis);
157 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
158
159#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
160 NOREF(pSrcPos);
161 NOREF(fNoVal);
162#endif
163
164#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
165 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
166 if (!fTryOnly)
167 {
168 int rc9;
169 RTNATIVETHREAD hNativeWriter;
170 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
171 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
172 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
173 else
174 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
175 if (RT_FAILURE(rc9))
176 return rc9;
177 }
178#endif
179
180 /*
181 * Get cracking...
182 */
183 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
184 uint64_t u64OldState = u64State;
185
186 for (;;)
187 {
188 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
189 {
190 /* It flows in the right direction, try follow it before it changes. */
191 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
192 c++;
193 Assert(c < RTCSRW_CNT_MASK / 2);
194 u64State &= ~RTCSRW_CNT_RD_MASK;
195 u64State |= c << RTCSRW_CNT_RD_SHIFT;
196 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
197 {
198#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
199 if (!fNoVal)
200 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
201#endif
202 break;
203 }
204 }
205 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
206 {
207 /* Wrong direction, but we're alone here and can simply try switch the direction. */
208 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
209 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
210 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
211 {
212 Assert(!pThis->s.Core.fNeedReset);
213#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
214 if (!fNoVal)
215 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
216#endif
217 break;
218 }
219 }
220 else
221 {
222 /* Is the writer perhaps doing a read recursion? */
223 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
224 RTNATIVETHREAD hNativeWriter;
225 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
226 if (hNativeSelf == hNativeWriter)
227 {
228#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
229 if (!fNoVal)
230 {
231 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
232 if (RT_FAILURE(rc9))
233 return rc9;
234 }
235#endif
236 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
237 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
238 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
239 return VINF_SUCCESS; /* don't break! */
240 }
241
242 /*
243 * If we're only trying, return already.
244 */
245 if (fTryOnly)
246 {
247 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
248 return VERR_SEM_BUSY;
249 }
250
251#if defined(IN_RING3) || defined(IN_RING0)
252# ifdef IN_RING0
253 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
254 && ASMIntAreEnabled())
255# endif
256 {
257 /*
258 * Add ourselves to the queue and wait for the direction to change.
259 */
260 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
261 c++;
262 Assert(c < RTCSRW_CNT_MASK / 2);
263
264 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
265 cWait++;
266 Assert(cWait <= c);
267 Assert(cWait < RTCSRW_CNT_MASK / 2);
268
269 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
270 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
271
272 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
273 {
274 for (uint32_t iLoop = 0; ; iLoop++)
275 {
276 int rc;
277# ifdef IN_RING3
278# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
279 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
280 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
281 if (RT_SUCCESS(rc))
282# else
283 RTTHREAD hThreadSelf = RTThreadSelf();
284 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
285# endif
286# endif
287 {
288 for (;;)
289 {
290 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
291 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
292 RT_INDEFINITE_WAIT);
293 if ( rc != VERR_INTERRUPTED
294 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
295 break;
296# ifdef IN_RING0
297 pdmR0CritSectRwYieldToRing3(pThis);
298# endif
299 }
300# ifdef IN_RING3
301 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
302# endif
303 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
304 return VERR_SEM_DESTROYED;
305 }
306 if (RT_FAILURE(rc))
307 {
308 /* Decrement the counts and return the error. */
309 for (;;)
310 {
311 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
312 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
313 c--;
314 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
315 cWait--;
316 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
317 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
318 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
319 break;
320 }
321 return rc;
322 }
323
324 Assert(pThis->s.Core.fNeedReset);
325 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
326 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
327 break;
328 AssertMsg(iLoop < 1, ("%u\n", iLoop));
329 }
330
331 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
332 for (;;)
333 {
334 u64OldState = u64State;
335
336 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
337 Assert(cWait > 0);
338 cWait--;
339 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
340 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
341
342 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
343 {
344 if (cWait == 0)
345 {
346 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
347 {
348 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
349 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
350 AssertRCReturn(rc, rc);
351 }
352 }
353 break;
354 }
355 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
356 }
357
358# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
359 if (!fNoVal)
360 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
361# endif
362 break;
363 }
364 }
365#endif /* IN_RING3 || IN_RING3 */
366#ifndef IN_RING3
367# ifdef IN_RING0
368 else
369# endif
370 {
371 /*
372 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
373 * back to ring-3 and do it there or return rcBusy.
374 */
375 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
376 if (rcBusy == VINF_SUCCESS)
377 {
378 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
379 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
380 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
381 * back to ring-3. Goes for both kind of crit sects. */
382 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
383 }
384 return rcBusy;
385 }
386#endif /* !IN_RING3 */
387 }
388
389 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
390 return VERR_SEM_DESTROYED;
391
392 ASMNopPause();
393 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
394 u64OldState = u64State;
395 }
396
397 /* got it! */
398 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
399 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
400 return VINF_SUCCESS;
401
402}
403
404
405/**
406 * Enter a critical section with shared (read) access.
407 *
408 * @returns VBox status code.
409 * @retval VINF_SUCCESS on success.
410 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
411 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
412 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
413 * during the operation.
414 *
415 * @param pThis Pointer to the read/write critical section.
416 * @param rcBusy The status code to return when we're in RC or R0 and the
417 * section is busy. Pass VINF_SUCCESS to acquired the
418 * critical section thru a ring-3 call if necessary.
419 * @param uId Where we're entering the section.
420 * @param pszFile The source position - file.
421 * @param iLine The source position - line.
422 * @param pszFunction The source position - function.
423 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
424 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
425 * RTCritSectRwEnterShared.
426 */
427VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
428{
429#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
430 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
431#else
432 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
433 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
434#endif
435}
436
437
438/**
439 * Enter a critical section with shared (read) access.
440 *
441 * @returns VBox status code.
442 * @retval VINF_SUCCESS on success.
443 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
444 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
445 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
446 * during the operation.
447 *
448 * @param pThis Pointer to the read/write critical section.
449 * @param rcBusy The status code to return when we're in RC or R0 and the
450 * section is busy. Pass VINF_SUCCESS to acquired the
451 * critical section thru a ring-3 call if necessary.
452 * @param uId Where we're entering the section.
453 * @param pszFile The source position - file.
454 * @param iLine The source position - line.
455 * @param pszFunction The source position - function.
456 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
457 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
458 * RTCritSectRwEnterSharedDebug.
459 */
460VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
461{
462 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
463#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
464 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
465#else
466 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
467 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
468#endif
469}
470
471
472/**
473 * Try enter a critical section with shared (read) access.
474 *
475 * @returns VBox status code.
476 * @retval VINF_SUCCESS on success.
477 * @retval VERR_SEM_BUSY if the critsect was owned.
478 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
479 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
480 * during the operation.
481 *
482 * @param pThis Pointer to the read/write critical section.
483 * @param uId Where we're entering the section.
484 * @param pszFile The source position - file.
485 * @param iLine The source position - line.
486 * @param pszFunction The source position - function.
487 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
488 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
489 * RTCritSectRwTryEnterShared.
490 */
491VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
492{
493#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
494 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
495#else
496 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
497 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
498#endif
499}
500
501
502/**
503 * Try enter a critical section with shared (read) access.
504 *
505 * @returns VBox status code.
506 * @retval VINF_SUCCESS on success.
507 * @retval VERR_SEM_BUSY if the critsect was owned.
508 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
509 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
510 * during the operation.
511 *
512 * @param pThis Pointer to the read/write critical section.
513 * @param uId Where we're entering the section.
514 * @param pszFile The source position - file.
515 * @param iLine The source position - line.
516 * @param pszFunction The source position - function.
517 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
518 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
519 * RTCritSectRwTryEnterSharedDebug.
520 */
521VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
522{
523 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
524#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
525 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
526#else
527 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
528 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
529#endif
530}
531
532
533#ifdef IN_RING3
534/**
535 * Enters a PDM read/write critical section with shared (read) access.
536 *
537 * @returns VINF_SUCCESS if entered successfully.
538 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
539 * during the operation.
540 *
541 * @param pThis Pointer to the read/write critical section.
542 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
543 */
544VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
545{
546 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
547}
548#endif
549
550
551/**
552 * Leave a critical section held with shared access.
553 *
554 * @returns VBox status code.
555 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
556 * during the operation.
557 * @param pThis Pointer to the read/write critical section.
558 * @param fNoVal No validation records (i.e. queued release).
559 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
560 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
561 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
562 */
563static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
564{
565 /*
566 * Validate handle.
567 */
568 AssertPtr(pThis);
569 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
570
571#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
572 NOREF(fNoVal);
573#endif
574
575 /*
576 * Check the direction and take action accordingly.
577 */
578 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
579 uint64_t u64OldState = u64State;
580 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
581 {
582#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
583 if (fNoVal)
584 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
585 else
586 {
587 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
588 if (RT_FAILURE(rc9))
589 return rc9;
590 }
591#endif
592 for (;;)
593 {
594 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
595 AssertReturn(c > 0, VERR_NOT_OWNER);
596 c--;
597
598 if ( c > 0
599 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
600 {
601 /* Don't change the direction. */
602 u64State &= ~RTCSRW_CNT_RD_MASK;
603 u64State |= c << RTCSRW_CNT_RD_SHIFT;
604 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
605 break;
606 }
607 else
608 {
609#if defined(IN_RING3) || defined(IN_RING0)
610# ifdef IN_RING0
611 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
612 && ASMIntAreEnabled())
613# endif
614 {
615 /* Reverse the direction and signal the writer threads. */
616 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
617 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
618 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
619 {
620 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
621 AssertRC(rc);
622 break;
623 }
624 }
625#endif /* IN_RING3 || IN_RING0 */
626#ifndef IN_RING3
627# ifdef IN_RING0
628 else
629# endif
630 {
631 /* Queue the exit request (ring-3). */
632 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
633 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
634 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
635 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
636 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
637 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
638 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
639 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
640 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
641 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
642 break;
643 }
644#endif
645 }
646
647 ASMNopPause();
648 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
649 u64OldState = u64State;
650 }
651 }
652 else
653 {
654 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
655 RTNATIVETHREAD hNativeWriter;
656 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
657 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
658 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
659#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
660 if (!fNoVal)
661 {
662 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
663 if (RT_FAILURE(rc))
664 return rc;
665 }
666#endif
667 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
668 }
669
670 return VINF_SUCCESS;
671}
672
673/**
674 * Leave a critical section held with shared access.
675 *
676 * @returns VBox status code.
677 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
678 * during the operation.
679 * @param pThis Pointer to the read/write critical section.
680 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
681 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
682 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
683 */
684VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
685{
686 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
687}
688
689
690#if defined(IN_RING3) || defined(IN_RING0)
691/**
692 * PDMCritSectBothFF interface.
693 *
694 * @param pThis Pointer to the read/write critical section.
695 */
696void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
697{
698 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
699}
700#endif
701
702
703/**
704 * Worker that enters a read/write critical section with exclusive access.
705 *
706 * @returns VBox status code.
707 * @param pThis Pointer to the read/write critical section.
708 * @param rcBusy The busy return code for ring-0 and ring-3.
709 * @param fTryOnly Only try enter it, don't wait.
710 * @param pSrcPos The source position. (Can be NULL.)
711 * @param fNoVal No validation records.
712 */
713static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
714{
715 /*
716 * Validate input.
717 */
718 AssertPtr(pThis);
719 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
720
721#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
722 NOREF(pSrcPos);
723 NOREF(fNoVal);
724#endif
725
726#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
727 RTTHREAD hThreadSelf = NIL_RTTHREAD;
728 if (!fTryOnly)
729 {
730 hThreadSelf = RTThreadSelfAutoAdopt();
731 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
732 if (RT_FAILURE(rc9))
733 return rc9;
734 }
735#endif
736
737 /*
738 * Check if we're already the owner and just recursing.
739 */
740 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
741 RTNATIVETHREAD hNativeWriter;
742 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
743 if (hNativeSelf == hNativeWriter)
744 {
745 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
746#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
747 if (!fNoVal)
748 {
749 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
750 if (RT_FAILURE(rc9))
751 return rc9;
752 }
753#endif
754 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
755 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
756 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
757 return VINF_SUCCESS;
758 }
759
760 /*
761 * Get cracking.
762 */
763 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
764 uint64_t u64OldState = u64State;
765
766 for (;;)
767 {
768 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
769 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
770 {
771 /* It flows in the right direction, try follow it before it changes. */
772 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
773 c++;
774 Assert(c < RTCSRW_CNT_MASK / 2);
775 u64State &= ~RTCSRW_CNT_WR_MASK;
776 u64State |= c << RTCSRW_CNT_WR_SHIFT;
777 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
778 break;
779 }
780 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
781 {
782 /* Wrong direction, but we're alone here and can simply try switch the direction. */
783 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
784 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
785 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
786 break;
787 }
788 else if (fTryOnly)
789 {
790 /* Wrong direction and we're not supposed to wait, just return. */
791 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
792 return VERR_SEM_BUSY;
793 }
794 else
795 {
796 /* Add ourselves to the write count and break out to do the wait. */
797 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
798 c++;
799 Assert(c < RTCSRW_CNT_MASK / 2);
800 u64State &= ~RTCSRW_CNT_WR_MASK;
801 u64State |= c << RTCSRW_CNT_WR_SHIFT;
802 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
803 break;
804 }
805
806 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
807 return VERR_SEM_DESTROYED;
808
809 ASMNopPause();
810 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
811 u64OldState = u64State;
812 }
813
814 /*
815 * If we're in write mode now try grab the ownership. Play fair if there
816 * are threads already waiting.
817 */
818 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
819#if defined(IN_RING3)
820 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
821 || fTryOnly)
822#endif
823 ;
824 if (fDone)
825 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
826 if (!fDone)
827 {
828 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
829
830#if defined(IN_RING3) || defined(IN_RING0)
831 if ( !fTryOnly
832# ifdef IN_RING0
833 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
834 && ASMIntAreEnabled()
835# endif
836 )
837 {
838
839 /*
840 * Wait for our turn.
841 */
842 for (uint32_t iLoop = 0; ; iLoop++)
843 {
844 int rc;
845# ifdef IN_RING3
846# ifdef PDMCRITSECTRW_STRICT
847 if (hThreadSelf == NIL_RTTHREAD)
848 hThreadSelf = RTThreadSelfAutoAdopt();
849 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
850 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
851 if (RT_SUCCESS(rc))
852# else
853 RTTHREAD hThreadSelf = RTThreadSelf();
854 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
855# endif
856# endif
857 {
858 for (;;)
859 {
860 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
861 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
862 RT_INDEFINITE_WAIT);
863 if ( rc != VERR_INTERRUPTED
864 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
865 break;
866# ifdef IN_RING0
867 pdmR0CritSectRwYieldToRing3(pThis);
868# endif
869 }
870# ifdef IN_RING3
871 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
872# endif
873 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
874 return VERR_SEM_DESTROYED;
875 }
876 if (RT_FAILURE(rc))
877 {
878 /* Decrement the counts and return the error. */
879 for (;;)
880 {
881 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
882 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
883 c--;
884 u64State &= ~RTCSRW_CNT_WR_MASK;
885 u64State |= c << RTCSRW_CNT_WR_SHIFT;
886 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
887 break;
888 }
889 return rc;
890 }
891
892 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
893 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
894 {
895 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
896 if (fDone)
897 break;
898 }
899 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
900 }
901
902 }
903 else
904#endif /* IN_RING3 || IN_RING0 */
905 {
906#ifdef IN_RING3
907 /* TryEnter call - decrement the number of (waiting) writers. */
908#else
909 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
910 ring-3 and do it there or return rcBusy. */
911#endif
912
913 for (;;)
914 {
915 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
916 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
917 c--;
918 u64State &= ~RTCSRW_CNT_WR_MASK;
919 u64State |= c << RTCSRW_CNT_WR_SHIFT;
920 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
921 break;
922 }
923
924#ifdef IN_RING3
925 return VERR_SEM_BUSY;
926#else
927 if (rcBusy == VINF_SUCCESS)
928 {
929 Assert(!fTryOnly);
930 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
931 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
932 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
933 * back to ring-3. Goes for both kind of crit sects. */
934 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
935 }
936 return rcBusy;
937#endif
938 }
939 }
940
941 /*
942 * Got it!
943 */
944 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
945 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
946 Assert(pThis->s.Core.cWriterReads == 0);
947#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
948 if (!fNoVal)
949 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
950#endif
951 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
952 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
953
954 return VINF_SUCCESS;
955}
956
957
958/**
959 * Try enter a critical section with exclusive (write) access.
960 *
961 * @returns VBox status code.
962 * @retval VINF_SUCCESS on success.
963 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
964 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
965 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
966 * during the operation.
967 *
968 * @param pThis Pointer to the read/write critical section.
969 * @param rcBusy The status code to return when we're in RC or R0 and the
970 * section is busy. Pass VINF_SUCCESS to acquired the
971 * critical section thru a ring-3 call if necessary.
972 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
973 * PDMCritSectRwTryEnterExclDebug,
974 * PDMCritSectEnterDebug, PDMCritSectEnter,
975 * RTCritSectRwEnterExcl.
976 */
977VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
978{
979#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
980 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
981#else
982 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
983 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
984#endif
985}
986
987
988/**
989 * Try enter a critical section with exclusive (write) access.
990 *
991 * @returns VBox status code.
992 * @retval VINF_SUCCESS on success.
993 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
994 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
995 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
996 * during the operation.
997 *
998 * @param pThis Pointer to the read/write critical section.
999 * @param rcBusy The status code to return when we're in RC or R0 and the
1000 * section is busy. Pass VINF_SUCCESS to acquired the
1001 * critical section thru a ring-3 call if necessary.
1002 * @param uId Where we're entering the section.
1003 * @param pszFile The source position - file.
1004 * @param iLine The source position - line.
1005 * @param pszFunction The source position - function.
1006 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1007 * PDMCritSectRwTryEnterExclDebug,
1008 * PDMCritSectEnterDebug, PDMCritSectEnter,
1009 * RTCritSectRwEnterExclDebug.
1010 */
1011VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1012{
1013 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1014#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1015 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1016#else
1017 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1018 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1019#endif
1020}
1021
1022
1023/**
1024 * Try enter a critical section with exclusive (write) access.
1025 *
1026 * @retval VINF_SUCCESS on success.
1027 * @retval VERR_SEM_BUSY if the critsect was owned.
1028 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1029 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1030 * during the operation.
1031 *
1032 * @param pThis Pointer to the read/write critical section.
1033 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1034 * PDMCritSectRwEnterExclDebug,
1035 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1036 * RTCritSectRwTryEnterExcl.
1037 */
1038VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
1039{
1040#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1041 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1042#else
1043 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1044 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1045#endif
1046}
1047
1048
1049/**
1050 * Try enter a critical section with exclusive (write) access.
1051 *
1052 * @retval VINF_SUCCESS on success.
1053 * @retval VERR_SEM_BUSY if the critsect was owned.
1054 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1055 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1056 * during the operation.
1057 *
1058 * @param pThis Pointer to the read/write critical section.
1059 * @param uId Where we're entering the section.
1060 * @param pszFile The source position - file.
1061 * @param iLine The source position - line.
1062 * @param pszFunction The source position - function.
1063 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1064 * PDMCritSectRwEnterExclDebug,
1065 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1066 * RTCritSectRwTryEnterExclDebug.
1067 */
1068VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1069{
1070 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1071#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1072 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1073#else
1074 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1075 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1076#endif
1077}
1078
1079
1080#ifdef IN_RING3
1081/**
1082 * Enters a PDM read/write critical section with exclusive (write) access.
1083 *
1084 * @returns VINF_SUCCESS if entered successfully.
1085 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1086 * during the operation.
1087 *
1088 * @param pThis Pointer to the read/write critical section.
1089 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1090 */
1091VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1092{
1093 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1094}
1095#endif /* IN_RING3 */
1096
1097
1098/**
1099 * Leave a critical section held exclusively.
1100 *
1101 * @returns VBox status code.
1102 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1103 * during the operation.
1104 * @param pThis Pointer to the read/write critical section.
1105 * @param fNoVal No validation records (i.e. queued release).
1106 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1107 */
1108static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1109{
1110 /*
1111 * Validate handle.
1112 */
1113 AssertPtr(pThis);
1114 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1115
1116#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1117 NOREF(fNoVal);
1118#endif
1119
1120 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1121 RTNATIVETHREAD hNativeWriter;
1122 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1123 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1124
1125 /*
1126 * Unwind one recursion. Is it the final one?
1127 */
1128 if (pThis->s.Core.cWriteRecursions == 1)
1129 {
1130 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1131#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1132 if (fNoVal)
1133 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1134 else
1135 {
1136 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1137 if (RT_FAILURE(rc9))
1138 return rc9;
1139 }
1140#endif
1141 /*
1142 * Update the state.
1143 */
1144#if defined(IN_RING3) || defined(IN_RING0)
1145# ifdef IN_RING0
1146 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1147 && ASMIntAreEnabled())
1148# endif
1149 {
1150 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1151 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1152 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1153
1154 for (;;)
1155 {
1156 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1157 uint64_t u64OldState = u64State;
1158
1159 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1160 Assert(c > 0);
1161 c--;
1162
1163 if ( c > 0
1164 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1165 {
1166 /* Don't change the direction, wake up the next writer if any. */
1167 u64State &= ~RTCSRW_CNT_WR_MASK;
1168 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1169 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1170 {
1171 if (c > 0)
1172 {
1173 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1174 AssertRC(rc);
1175 }
1176 break;
1177 }
1178 }
1179 else
1180 {
1181 /* Reverse the direction and signal the reader threads. */
1182 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1183 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1184 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1185 {
1186 Assert(!pThis->s.Core.fNeedReset);
1187 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1188 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1189 AssertRC(rc);
1190 break;
1191 }
1192 }
1193
1194 ASMNopPause();
1195 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1196 return VERR_SEM_DESTROYED;
1197 }
1198 }
1199#endif /* IN_RING3 || IN_RING0 */
1200#ifndef IN_RING3
1201# ifdef IN_RING0
1202 else
1203# endif
1204 {
1205 /*
1206 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1207 * so queue the exit request (ring-3).
1208 */
1209 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1210 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1211 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1212 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1213 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1214 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1215 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1216 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1217 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1218 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1219 }
1220#endif
1221 }
1222 else
1223 {
1224 /*
1225 * Not the final recursion.
1226 */
1227 Assert(pThis->s.Core.cWriteRecursions != 0);
1228#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1229 if (fNoVal)
1230 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1231 else
1232 {
1233 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1234 if (RT_FAILURE(rc9))
1235 return rc9;
1236 }
1237#endif
1238 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1239 }
1240
1241 return VINF_SUCCESS;
1242}
1243
1244
1245/**
1246 * Leave a critical section held exclusively.
1247 *
1248 * @returns VBox status code.
1249 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1250 * during the operation.
1251 * @param pThis Pointer to the read/write critical section.
1252 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1253 */
1254VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1255{
1256 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1257}
1258
1259
1260#if defined(IN_RING3) || defined(IN_RING0)
1261/**
1262 * PDMCritSectBothFF interface.
1263 *
1264 * @param pThis Pointer to the read/write critical section.
1265 */
1266void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1267{
1268 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1269}
1270#endif
1271
1272
1273/**
1274 * Checks the caller is the exclusive (write) owner of the critical section.
1275 *
1276 * @retval @c true if owner.
1277 * @retval @c false if not owner.
1278 * @param pThis Pointer to the read/write critical section.
1279 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1280 * RTCritSectRwIsWriteOwner.
1281 */
1282VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1283{
1284 /*
1285 * Validate handle.
1286 */
1287 AssertPtr(pThis);
1288 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1289
1290 /*
1291 * Check ownership.
1292 */
1293 RTNATIVETHREAD hNativeWriter;
1294 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1295 if (hNativeWriter == NIL_RTNATIVETHREAD)
1296 return false;
1297 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1298}
1299
1300
1301/**
1302 * Checks if the caller is one of the read owners of the critical section.
1303 *
1304 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1305 * enabled. Meaning, the answer is not trustworhty unless
1306 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1307 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1308 * creating the semaphore. And finally, if you used a locking class,
1309 * don't disable deadlock detection by setting cMsMinDeadlock to
1310 * RT_INDEFINITE_WAIT.
1311 *
1312 * In short, only use this for assertions.
1313 *
1314 * @returns @c true if reader, @c false if not.
1315 * @param pThis Pointer to the read/write critical section.
1316 * @param fWannaHear What you'd like to hear when lock validation is not
1317 * available. (For avoiding asserting all over the place.)
1318 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1319 */
1320VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1321{
1322 /*
1323 * Validate handle.
1324 */
1325 AssertPtr(pThis);
1326 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1327
1328 /*
1329 * Inspect the state.
1330 */
1331 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1332 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1333 {
1334 /*
1335 * It's in write mode, so we can only be a reader if we're also the
1336 * current writer.
1337 */
1338 RTNATIVETHREAD hWriter;
1339 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1340 if (hWriter == NIL_RTNATIVETHREAD)
1341 return false;
1342 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1343 }
1344
1345 /*
1346 * Read mode. If there are no current readers, then we cannot be a reader.
1347 */
1348 if (!(u64State & RTCSRW_CNT_RD_MASK))
1349 return false;
1350
1351#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1352 /*
1353 * Ask the lock validator.
1354 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1355 */
1356 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1357#else
1358 /*
1359 * Ok, we don't know, just tell the caller what he want to hear.
1360 */
1361 return fWannaHear;
1362#endif
1363}
1364
1365
1366/**
1367 * Gets the write recursion count.
1368 *
1369 * @returns The write recursion count (0 if bad critsect).
1370 * @param pThis Pointer to the read/write critical section.
1371 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1372 * RTCritSectRwGetWriteRecursion.
1373 */
1374VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1375{
1376 /*
1377 * Validate handle.
1378 */
1379 AssertPtr(pThis);
1380 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1381
1382 /*
1383 * Return the requested data.
1384 */
1385 return pThis->s.Core.cWriteRecursions;
1386}
1387
1388
1389/**
1390 * Gets the read recursion count of the current writer.
1391 *
1392 * @returns The read recursion count (0 if bad critsect).
1393 * @param pThis Pointer to the read/write critical section.
1394 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1395 * RTCritSectRwGetWriterReadRecursion.
1396 */
1397VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1398{
1399 /*
1400 * Validate handle.
1401 */
1402 AssertPtr(pThis);
1403 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1404
1405 /*
1406 * Return the requested data.
1407 */
1408 return pThis->s.Core.cWriterReads;
1409}
1410
1411
1412/**
1413 * Gets the current number of reads.
1414 *
1415 * This includes all read recursions, so it might be higher than the number of
1416 * read owners. It does not include reads done by the current writer.
1417 *
1418 * @returns The read count (0 if bad critsect).
1419 * @param pThis Pointer to the read/write critical section.
1420 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1421 * RTCritSectRwGetReadCount.
1422 */
1423VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1424{
1425 /*
1426 * Validate input.
1427 */
1428 AssertPtr(pThis);
1429 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1430
1431 /*
1432 * Return the requested data.
1433 */
1434 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1435 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1436 return 0;
1437 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1438}
1439
1440
1441/**
1442 * Checks if the read/write critical section is initialized or not.
1443 *
1444 * @retval @c true if initialized.
1445 * @retval @c false if not initialized.
1446 * @param pThis Pointer to the read/write critical section.
1447 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1448 */
1449VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1450{
1451 AssertPtr(pThis);
1452 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1453}
1454
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette