VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 50590

Last change on this file since 50590 was 50001, checked in by vboxsync, 11 years ago

PDMCritSect: Ditto VERR_INTERRUPTED fix for shared critical sections.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.5 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 50001 2013-12-24 21:28:55Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125#ifdef IN_RING0
126/**
127 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
128 *
129 * @param pThis Pointer to the read/write critical section.
130 */
131static void pdmR0CritSectRwYieldToRing3(PPDMCRITSECTRW pThis)
132{
133 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
134 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
135 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
136 AssertRC(rc);
137}
138#endif /* IN_RING0 */
139
140
141/**
142 * Worker that enters a read/write critical section with shard access.
143 *
144 * @returns VBox status code.
145 * @param pThis Pointer to the read/write critical section.
146 * @param rcBusy The busy return code for ring-0 and ring-3.
147 * @param fTryOnly Only try enter it, don't wait.
148 * @param pSrcPos The source position. (Can be NULL.)
149 * @param fNoVal No validation records.
150 */
151static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
152{
153 /*
154 * Validate input.
155 */
156 AssertPtr(pThis);
157 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
158
159#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
160 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
161 if (!fTryOnly)
162 {
163 int rc9;
164 RTNATIVETHREAD hNativeWriter;
165 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
166 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
167 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
168 else
169 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
170 if (RT_FAILURE(rc9))
171 return rc9;
172 }
173#endif
174
175 /*
176 * Get cracking...
177 */
178 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
179 uint64_t u64OldState = u64State;
180
181 for (;;)
182 {
183 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
184 {
185 /* It flows in the right direction, try follow it before it changes. */
186 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
187 c++;
188 Assert(c < RTCSRW_CNT_MASK / 2);
189 u64State &= ~RTCSRW_CNT_RD_MASK;
190 u64State |= c << RTCSRW_CNT_RD_SHIFT;
191 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
192 {
193#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
194 if (!fNoVal)
195 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
196#endif
197 break;
198 }
199 }
200 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
201 {
202 /* Wrong direction, but we're alone here and can simply try switch the direction. */
203 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
204 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
205 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
206 {
207 Assert(!pThis->s.Core.fNeedReset);
208#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
209 if (!fNoVal)
210 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
211#endif
212 break;
213 }
214 }
215 else
216 {
217 /* Is the writer perhaps doing a read recursion? */
218 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
219 RTNATIVETHREAD hNativeWriter;
220 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
221 if (hNativeSelf == hNativeWriter)
222 {
223#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
224 if (!fNoVal)
225 {
226 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
227 if (RT_FAILURE(rc9))
228 return rc9;
229 }
230#endif
231 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
232 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
233 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
234 return VINF_SUCCESS; /* don't break! */
235 }
236
237 /*
238 * If we're only trying, return already.
239 */
240 if (fTryOnly)
241 {
242 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
243 return VERR_SEM_BUSY;
244 }
245
246#if defined(IN_RING3) || defined(IN_RING0)
247# ifdef IN_RING0
248 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
249 && ASMIntAreEnabled())
250# endif
251 {
252 /*
253 * Add ourselves to the queue and wait for the direction to change.
254 */
255 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
256 c++;
257 Assert(c < RTCSRW_CNT_MASK / 2);
258
259 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
260 cWait++;
261 Assert(cWait <= c);
262 Assert(cWait < RTCSRW_CNT_MASK / 2);
263
264 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
265 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
266
267 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
268 {
269 for (uint32_t iLoop = 0; ; iLoop++)
270 {
271 int rc;
272# ifdef IN_RING3
273# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
274 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
275 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
276 if (RT_SUCCESS(rc))
277# else
278 RTTHREAD hThreadSelf = RTThreadSelf();
279 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
280# endif
281# endif
282 {
283 for (;;)
284 {
285 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
286 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
287 RT_INDEFINITE_WAIT);
288 if ( rc != VERR_INTERRUPTED
289 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
290 break;
291# ifdef IN_RING0
292 pdmR0CritSectRwYieldToRing3(pThis);
293# endif
294 }
295# ifdef IN_RING3
296 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
297# endif
298 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
299 return VERR_SEM_DESTROYED;
300 }
301 if (RT_FAILURE(rc))
302 {
303 /* Decrement the counts and return the error. */
304 for (;;)
305 {
306 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
307 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
308 c--;
309 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
310 cWait--;
311 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
312 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
313 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
314 break;
315 }
316 return rc;
317 }
318
319 Assert(pThis->s.Core.fNeedReset);
320 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
321 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
322 break;
323 AssertMsg(iLoop < 1, ("%u\n", iLoop));
324 }
325
326 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
327 for (;;)
328 {
329 u64OldState = u64State;
330
331 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
332 Assert(cWait > 0);
333 cWait--;
334 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
335 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
336
337 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
338 {
339 if (cWait == 0)
340 {
341 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
342 {
343 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
344 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
345 AssertRCReturn(rc, rc);
346 }
347 }
348 break;
349 }
350 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
351 }
352
353# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
354 if (!fNoVal)
355 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
356# endif
357 break;
358 }
359 }
360#endif /* IN_RING3 || IN_RING3 */
361#ifndef IN_RING3
362# ifdef IN_RING0
363 else
364# endif
365 {
366 /*
367 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
368 * back to ring-3 and do it there or return rcBusy.
369 */
370 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
371 if (rcBusy == VINF_SUCCESS)
372 {
373 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
374 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
375 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
376 * back to ring-3. Goes for both kind of crit sects. */
377 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
378 }
379 return rcBusy;
380 }
381#endif /* !IN_RING3 */
382 }
383
384 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
385 return VERR_SEM_DESTROYED;
386
387 ASMNopPause();
388 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
389 u64OldState = u64State;
390 }
391
392 /* got it! */
393 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
394 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
395 return VINF_SUCCESS;
396
397}
398
399
400/**
401 * Enter a critical section with shared (read) access.
402 *
403 * @returns VBox status code.
404 * @retval VINF_SUCCESS on success.
405 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
406 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
407 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
408 * during the operation.
409 *
410 * @param pThis Pointer to the read/write critical section.
411 * @param rcBusy The status code to return when we're in RC or R0 and the
412 * section is busy. Pass VINF_SUCCESS to acquired the
413 * critical section thru a ring-3 call if necessary.
414 * @param uId Where we're entering the section.
415 * @param pszFile The source position - file.
416 * @param iLine The source position - line.
417 * @param pszFunction The source position - function.
418 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
419 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
420 * RTCritSectRwEnterShared.
421 */
422VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
423{
424#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
425 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
426#else
427 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
428 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
429#endif
430}
431
432
433/**
434 * Enter a critical section with shared (read) access.
435 *
436 * @returns VBox status code.
437 * @retval VINF_SUCCESS on success.
438 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
439 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
440 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
441 * during the operation.
442 *
443 * @param pThis Pointer to the read/write critical section.
444 * @param rcBusy The status code to return when we're in RC or R0 and the
445 * section is busy. Pass VINF_SUCCESS to acquired the
446 * critical section thru a ring-3 call if necessary.
447 * @param uId Where we're entering the section.
448 * @param pszFile The source position - file.
449 * @param iLine The source position - line.
450 * @param pszFunction The source position - function.
451 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
452 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
453 * RTCritSectRwEnterSharedDebug.
454 */
455VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
456{
457 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
458#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
459 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
460#else
461 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
462 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
463#endif
464}
465
466
467/**
468 * Try enter a critical section with shared (read) access.
469 *
470 * @returns VBox status code.
471 * @retval VINF_SUCCESS on success.
472 * @retval VERR_SEM_BUSY if the critsect was owned.
473 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
474 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
475 * during the operation.
476 *
477 * @param pThis Pointer to the read/write critical section.
478 * @param uId Where we're entering the section.
479 * @param pszFile The source position - file.
480 * @param iLine The source position - line.
481 * @param pszFunction The source position - function.
482 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
483 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
484 * RTCritSectRwTryEnterShared.
485 */
486VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
487{
488#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
489 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
490#else
491 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
492 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
493#endif
494}
495
496
497/**
498 * Try enter a critical section with shared (read) access.
499 *
500 * @returns VBox status code.
501 * @retval VINF_SUCCESS on success.
502 * @retval VERR_SEM_BUSY if the critsect was owned.
503 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
504 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
505 * during the operation.
506 *
507 * @param pThis Pointer to the read/write critical section.
508 * @param uId Where we're entering the section.
509 * @param pszFile The source position - file.
510 * @param iLine The source position - line.
511 * @param pszFunction The source position - function.
512 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
513 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
514 * RTCritSectRwTryEnterSharedDebug.
515 */
516VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
517{
518 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
519#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
520 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
521#else
522 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
523 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
524#endif
525}
526
527
528#ifdef IN_RING3
529/**
530 * Enters a PDM read/write critical section with shared (read) access.
531 *
532 * @returns VINF_SUCCESS if entered successfully.
533 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
534 * during the operation.
535 *
536 * @param pThis Pointer to the read/write critical section.
537 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
538 */
539VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
540{
541 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
542}
543#endif
544
545
546/**
547 * Leave a critical section held with shared access.
548 *
549 * @returns VBox status code.
550 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
551 * during the operation.
552 * @param pThis Pointer to the read/write critical section.
553 * @param fNoVal No validation records (i.e. queued release).
554 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
555 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
556 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
557 */
558static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
559{
560 /*
561 * Validate handle.
562 */
563 AssertPtr(pThis);
564 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
565
566 /*
567 * Check the direction and take action accordingly.
568 */
569 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
570 uint64_t u64OldState = u64State;
571 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
572 {
573#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
574 if (fNoVal)
575 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
576 else
577 {
578 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
579 if (RT_FAILURE(rc9))
580 return rc9;
581 }
582#endif
583 for (;;)
584 {
585 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
586 AssertReturn(c > 0, VERR_NOT_OWNER);
587 c--;
588
589 if ( c > 0
590 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
591 {
592 /* Don't change the direction. */
593 u64State &= ~RTCSRW_CNT_RD_MASK;
594 u64State |= c << RTCSRW_CNT_RD_SHIFT;
595 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
596 break;
597 }
598 else
599 {
600#if defined(IN_RING3) || defined(IN_RING0)
601# ifdef IN_RING0
602 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
603 && ASMIntAreEnabled())
604# endif
605 {
606 /* Reverse the direction and signal the writer threads. */
607 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
608 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
609 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
610 {
611 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
612 AssertRC(rc);
613 break;
614 }
615 }
616#endif /* IN_RING3 || IN_RING0 */
617#ifndef IN_RING3
618# ifdef IN_RING0
619 else
620# endif
621 {
622 /* Queue the exit request (ring-3). */
623 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
624 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
625 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
626 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
627 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
628 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
629 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
630 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
631 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
632 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
633 break;
634 }
635#endif
636 }
637
638 ASMNopPause();
639 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
640 u64OldState = u64State;
641 }
642 }
643 else
644 {
645 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
646 RTNATIVETHREAD hNativeWriter;
647 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
648 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
649 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
650#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
651 if (!fNoVal)
652 {
653 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
654 if (RT_FAILURE(rc))
655 return rc;
656 }
657#endif
658 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
659 }
660
661 return VINF_SUCCESS;
662}
663
664/**
665 * Leave a critical section held with shared access.
666 *
667 * @returns VBox status code.
668 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
669 * during the operation.
670 * @param pThis Pointer to the read/write critical section.
671 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
672 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
673 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
674 */
675VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
676{
677 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
678}
679
680
681#if defined(IN_RING3) || defined(IN_RING0)
682/**
683 * PDMCritSectBothFF interface.
684 *
685 * @param pThis Pointer to the read/write critical section.
686 */
687void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
688{
689 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
690}
691#endif
692
693
694/**
695 * Worker that enters a read/write critical section with exclusive access.
696 *
697 * @returns VBox status code.
698 * @param pThis Pointer to the read/write critical section.
699 * @param rcBusy The busy return code for ring-0 and ring-3.
700 * @param fTryOnly Only try enter it, don't wait.
701 * @param pSrcPos The source position. (Can be NULL.)
702 * @param fNoVal No validation records.
703 */
704static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
705{
706 /*
707 * Validate input.
708 */
709 AssertPtr(pThis);
710 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
711
712#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
713 RTTHREAD hThreadSelf = NIL_RTTHREAD;
714 if (!fTryOnly)
715 {
716 hThreadSelf = RTThreadSelfAutoAdopt();
717 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
718 if (RT_FAILURE(rc9))
719 return rc9;
720 }
721#endif
722
723 /*
724 * Check if we're already the owner and just recursing.
725 */
726 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
727 RTNATIVETHREAD hNativeWriter;
728 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
729 if (hNativeSelf == hNativeWriter)
730 {
731 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
732#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
733 if (!fNoVal)
734 {
735 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
736 if (RT_FAILURE(rc9))
737 return rc9;
738 }
739#endif
740 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
741 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
742 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
743 return VINF_SUCCESS;
744 }
745
746 /*
747 * Get cracking.
748 */
749 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
750 uint64_t u64OldState = u64State;
751
752 for (;;)
753 {
754 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
755 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
756 {
757 /* It flows in the right direction, try follow it before it changes. */
758 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
759 c++;
760 Assert(c < RTCSRW_CNT_MASK / 2);
761 u64State &= ~RTCSRW_CNT_WR_MASK;
762 u64State |= c << RTCSRW_CNT_WR_SHIFT;
763 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
764 break;
765 }
766 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
767 {
768 /* Wrong direction, but we're alone here and can simply try switch the direction. */
769 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
770 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
771 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
772 break;
773 }
774 else if (fTryOnly)
775 {
776 /* Wrong direction and we're not supposed to wait, just return. */
777 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
778 return VERR_SEM_BUSY;
779 }
780 else
781 {
782 /* Add ourselves to the write count and break out to do the wait. */
783 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
784 c++;
785 Assert(c < RTCSRW_CNT_MASK / 2);
786 u64State &= ~RTCSRW_CNT_WR_MASK;
787 u64State |= c << RTCSRW_CNT_WR_SHIFT;
788 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
789 break;
790 }
791
792 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
793 return VERR_SEM_DESTROYED;
794
795 ASMNopPause();
796 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
797 u64OldState = u64State;
798 }
799
800 /*
801 * If we're in write mode now try grab the ownership. Play fair if there
802 * are threads already waiting.
803 */
804 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
805#if defined(IN_RING3)
806 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
807 || fTryOnly)
808#endif
809 ;
810 if (fDone)
811 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
812 if (!fDone)
813 {
814 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
815
816#if defined(IN_RING3) || defined(IN_RING0)
817 if ( !fTryOnly
818# ifdef IN_RING0
819 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
820 && ASMIntAreEnabled()
821# endif
822 )
823 {
824
825 /*
826 * Wait for our turn.
827 */
828 for (uint32_t iLoop = 0; ; iLoop++)
829 {
830 int rc;
831# ifdef IN_RING3
832# ifdef PDMCRITSECTRW_STRICT
833 if (hThreadSelf == NIL_RTTHREAD)
834 hThreadSelf = RTThreadSelfAutoAdopt();
835 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
836 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
837 if (RT_SUCCESS(rc))
838# else
839 RTTHREAD hThreadSelf = RTThreadSelf();
840 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
841# endif
842# endif
843 {
844 for (;;)
845 {
846 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
847 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
848 RT_INDEFINITE_WAIT);
849 if ( rc != VERR_INTERRUPTED
850 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
851 break;
852# ifdef IN_RING0
853 pdmR0CritSectRwYieldToRing3(pThis);
854# endif
855 }
856# ifdef IN_RING3
857 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
858# endif
859 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
860 return VERR_SEM_DESTROYED;
861 }
862 if (RT_FAILURE(rc))
863 {
864 /* Decrement the counts and return the error. */
865 for (;;)
866 {
867 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
868 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
869 c--;
870 u64State &= ~RTCSRW_CNT_WR_MASK;
871 u64State |= c << RTCSRW_CNT_WR_SHIFT;
872 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
873 break;
874 }
875 return rc;
876 }
877
878 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
879 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
880 {
881 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
882 if (fDone)
883 break;
884 }
885 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
886 }
887
888 }
889 else
890#endif /* IN_RING3 || IN_RING0 */
891 {
892#ifdef IN_RING3
893 /* TryEnter call - decrement the number of (waiting) writers. */
894#else
895 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
896 ring-3 and do it there or return rcBusy. */
897#endif
898
899 for (;;)
900 {
901 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
902 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
903 c--;
904 u64State &= ~RTCSRW_CNT_WR_MASK;
905 u64State |= c << RTCSRW_CNT_WR_SHIFT;
906 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
907 break;
908 }
909
910#ifdef IN_RING3
911 return VERR_SEM_BUSY;
912#else
913 if (rcBusy == VINF_SUCCESS)
914 {
915 Assert(!fTryOnly);
916 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
917 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
918 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
919 * back to ring-3. Goes for both kind of crit sects. */
920 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
921 }
922 return rcBusy;
923#endif
924 }
925 }
926
927 /*
928 * Got it!
929 */
930 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
931 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
932 Assert(pThis->s.Core.cWriterReads == 0);
933#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
934 if (!fNoVal)
935 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
936#endif
937 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
938 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
939
940 return VINF_SUCCESS;
941}
942
943
944/**
945 * Try enter a critical section with exclusive (write) access.
946 *
947 * @returns VBox status code.
948 * @retval VINF_SUCCESS on success.
949 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
950 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
951 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
952 * during the operation.
953 *
954 * @param pThis Pointer to the read/write critical section.
955 * @param rcBusy The status code to return when we're in RC or R0 and the
956 * section is busy. Pass VINF_SUCCESS to acquired the
957 * critical section thru a ring-3 call if necessary.
958 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
959 * PDMCritSectRwTryEnterExclDebug,
960 * PDMCritSectEnterDebug, PDMCritSectEnter,
961 * RTCritSectRwEnterExcl.
962 */
963VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
964{
965#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
966 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
967#else
968 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
969 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
970#endif
971}
972
973
974/**
975 * Try enter a critical section with exclusive (write) access.
976 *
977 * @returns VBox status code.
978 * @retval VINF_SUCCESS on success.
979 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
980 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
981 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
982 * during the operation.
983 *
984 * @param pThis Pointer to the read/write critical section.
985 * @param rcBusy The status code to return when we're in RC or R0 and the
986 * section is busy. Pass VINF_SUCCESS to acquired the
987 * critical section thru a ring-3 call if necessary.
988 * @param uId Where we're entering the section.
989 * @param pszFile The source position - file.
990 * @param iLine The source position - line.
991 * @param pszFunction The source position - function.
992 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
993 * PDMCritSectRwTryEnterExclDebug,
994 * PDMCritSectEnterDebug, PDMCritSectEnter,
995 * RTCritSectRwEnterExclDebug.
996 */
997VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
998{
999 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1000#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1001 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1002#else
1003 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1004 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1005#endif
1006}
1007
1008
1009/**
1010 * Try enter a critical section with exclusive (write) access.
1011 *
1012 * @retval VINF_SUCCESS on success.
1013 * @retval VERR_SEM_BUSY if the critsect was owned.
1014 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1015 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1016 * during the operation.
1017 *
1018 * @param pThis Pointer to the read/write critical section.
1019 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1020 * PDMCritSectRwEnterExclDebug,
1021 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1022 * RTCritSectRwTryEnterExcl.
1023 */
1024VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
1025{
1026#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1027 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1028#else
1029 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1030 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1031#endif
1032}
1033
1034
1035/**
1036 * Try enter a critical section with exclusive (write) access.
1037 *
1038 * @retval VINF_SUCCESS on success.
1039 * @retval VERR_SEM_BUSY if the critsect was owned.
1040 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1041 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1042 * during the operation.
1043 *
1044 * @param pThis Pointer to the read/write critical section.
1045 * @param uId Where we're entering the section.
1046 * @param pszFile The source position - file.
1047 * @param iLine The source position - line.
1048 * @param pszFunction The source position - function.
1049 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1050 * PDMCritSectRwEnterExclDebug,
1051 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1052 * RTCritSectRwTryEnterExclDebug.
1053 */
1054VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1055{
1056 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1057#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1058 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1059#else
1060 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1061 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1062#endif
1063}
1064
1065
1066#ifdef IN_RING3
1067/**
1068 * Enters a PDM read/write critical section with exclusive (write) access.
1069 *
1070 * @returns VINF_SUCCESS if entered successfully.
1071 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1072 * during the operation.
1073 *
1074 * @param pThis Pointer to the read/write critical section.
1075 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1076 */
1077VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1078{
1079 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1080}
1081#endif /* IN_RING3 */
1082
1083
1084/**
1085 * Leave a critical section held exclusively.
1086 *
1087 * @returns VBox status code.
1088 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1089 * during the operation.
1090 * @param pThis Pointer to the read/write critical section.
1091 * @param fNoVal No validation records (i.e. queued release).
1092 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1093 */
1094static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1095{
1096 /*
1097 * Validate handle.
1098 */
1099 AssertPtr(pThis);
1100 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1101
1102 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1103 RTNATIVETHREAD hNativeWriter;
1104 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1105 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1106
1107 /*
1108 * Unwind one recursion. Is it the final one?
1109 */
1110 if (pThis->s.Core.cWriteRecursions == 1)
1111 {
1112 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1113#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1114 if (fNoVal)
1115 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1116 else
1117 {
1118 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1119 if (RT_FAILURE(rc9))
1120 return rc9;
1121 }
1122#endif
1123 /*
1124 * Update the state.
1125 */
1126#if defined(IN_RING3) || defined(IN_RING0)
1127# ifdef IN_RING0
1128 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1129 && ASMIntAreEnabled())
1130# endif
1131 {
1132 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1133 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1134 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1135
1136 for (;;)
1137 {
1138 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1139 uint64_t u64OldState = u64State;
1140
1141 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1142 Assert(c > 0);
1143 c--;
1144
1145 if ( c > 0
1146 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1147 {
1148 /* Don't change the direction, wake up the next writer if any. */
1149 u64State &= ~RTCSRW_CNT_WR_MASK;
1150 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1151 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1152 {
1153 if (c > 0)
1154 {
1155 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1156 AssertRC(rc);
1157 }
1158 break;
1159 }
1160 }
1161 else
1162 {
1163 /* Reverse the direction and signal the reader threads. */
1164 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1165 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1166 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1167 {
1168 Assert(!pThis->s.Core.fNeedReset);
1169 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1170 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1171 AssertRC(rc);
1172 break;
1173 }
1174 }
1175
1176 ASMNopPause();
1177 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1178 return VERR_SEM_DESTROYED;
1179 }
1180 }
1181#endif /* IN_RING3 || IN_RING0 */
1182#ifndef IN_RING3
1183# ifdef IN_RING0
1184 else
1185# endif
1186 {
1187 /*
1188 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1189 * so queue the exit request (ring-3).
1190 */
1191 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1192 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1193 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1194 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1195 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1196 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1197 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1198 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1199 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1200 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1201 }
1202#endif
1203 }
1204 else
1205 {
1206 /*
1207 * Not the final recursion.
1208 */
1209 Assert(pThis->s.Core.cWriteRecursions != 0);
1210#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1211 if (fNoVal)
1212 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1213 else
1214 {
1215 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1216 if (RT_FAILURE(rc9))
1217 return rc9;
1218 }
1219#endif
1220 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1221 }
1222
1223 return VINF_SUCCESS;
1224}
1225
1226
1227/**
1228 * Leave a critical section held exclusively.
1229 *
1230 * @returns VBox status code.
1231 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1232 * during the operation.
1233 * @param pThis Pointer to the read/write critical section.
1234 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1235 */
1236VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1237{
1238 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1239}
1240
1241
1242#if defined(IN_RING3) || defined(IN_RING0)
1243/**
1244 * PDMCritSectBothFF interface.
1245 *
1246 * @param pThis Pointer to the read/write critical section.
1247 */
1248void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1249{
1250 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1251}
1252#endif
1253
1254
1255/**
1256 * Checks the caller is the exclusive (write) owner of the critical section.
1257 *
1258 * @retval @c true if owner.
1259 * @retval @c false if not owner.
1260 * @param pThis Pointer to the read/write critical section.
1261 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1262 * RTCritSectRwIsWriteOwner.
1263 */
1264VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1265{
1266 /*
1267 * Validate handle.
1268 */
1269 AssertPtr(pThis);
1270 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1271
1272 /*
1273 * Check ownership.
1274 */
1275 RTNATIVETHREAD hNativeWriter;
1276 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1277 if (hNativeWriter == NIL_RTNATIVETHREAD)
1278 return false;
1279 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1280}
1281
1282
1283/**
1284 * Checks if the caller is one of the read owners of the critical section.
1285 *
1286 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1287 * enabled. Meaning, the answer is not trustworhty unless
1288 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1289 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1290 * creating the semaphore. And finally, if you used a locking class,
1291 * don't disable deadlock detection by setting cMsMinDeadlock to
1292 * RT_INDEFINITE_WAIT.
1293 *
1294 * In short, only use this for assertions.
1295 *
1296 * @returns @c true if reader, @c false if not.
1297 * @param pThis Pointer to the read/write critical section.
1298 * @param fWannaHear What you'd like to hear when lock validation is not
1299 * available. (For avoiding asserting all over the place.)
1300 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1301 */
1302VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1303{
1304 /*
1305 * Validate handle.
1306 */
1307 AssertPtr(pThis);
1308 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1309
1310 /*
1311 * Inspect the state.
1312 */
1313 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1314 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1315 {
1316 /*
1317 * It's in write mode, so we can only be a reader if we're also the
1318 * current writer.
1319 */
1320 RTNATIVETHREAD hWriter;
1321 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1322 if (hWriter == NIL_RTNATIVETHREAD)
1323 return false;
1324 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1325 }
1326
1327 /*
1328 * Read mode. If there are no current readers, then we cannot be a reader.
1329 */
1330 if (!(u64State & RTCSRW_CNT_RD_MASK))
1331 return false;
1332
1333#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1334 /*
1335 * Ask the lock validator.
1336 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1337 */
1338 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1339#else
1340 /*
1341 * Ok, we don't know, just tell the caller what he want to hear.
1342 */
1343 return fWannaHear;
1344#endif
1345}
1346
1347
1348/**
1349 * Gets the write recursion count.
1350 *
1351 * @returns The write recursion count (0 if bad critsect).
1352 * @param pThis Pointer to the read/write critical section.
1353 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1354 * RTCritSectRwGetWriteRecursion.
1355 */
1356VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1357{
1358 /*
1359 * Validate handle.
1360 */
1361 AssertPtr(pThis);
1362 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1363
1364 /*
1365 * Return the requested data.
1366 */
1367 return pThis->s.Core.cWriteRecursions;
1368}
1369
1370
1371/**
1372 * Gets the read recursion count of the current writer.
1373 *
1374 * @returns The read recursion count (0 if bad critsect).
1375 * @param pThis Pointer to the read/write critical section.
1376 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1377 * RTCritSectRwGetWriterReadRecursion.
1378 */
1379VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1380{
1381 /*
1382 * Validate handle.
1383 */
1384 AssertPtr(pThis);
1385 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1386
1387 /*
1388 * Return the requested data.
1389 */
1390 return pThis->s.Core.cWriterReads;
1391}
1392
1393
1394/**
1395 * Gets the current number of reads.
1396 *
1397 * This includes all read recursions, so it might be higher than the number of
1398 * read owners. It does not include reads done by the current writer.
1399 *
1400 * @returns The read count (0 if bad critsect).
1401 * @param pThis Pointer to the read/write critical section.
1402 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1403 * RTCritSectRwGetReadCount.
1404 */
1405VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1406{
1407 /*
1408 * Validate input.
1409 */
1410 AssertPtr(pThis);
1411 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1412
1413 /*
1414 * Return the requested data.
1415 */
1416 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1417 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1418 return 0;
1419 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1420}
1421
1422
1423/**
1424 * Checks if the read/write critical section is initialized or not.
1425 *
1426 * @retval @c true if initialized.
1427 * @retval @c false if not initialized.
1428 * @param pThis Pointer to the read/write critical section.
1429 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1430 */
1431VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1432{
1433 AssertPtr(pThis);
1434 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1435}
1436
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette