VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 45293

Last change on this file since 45293 was 45293, checked in by vboxsync, 12 years ago

PGMCritSectRw: Prep for ring-0 and raw-mode context operation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 47.7 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 45293 2013-04-02 18:42:50Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
126{
127 /*
128 * Validate input.
129 */
130 AssertPtr(pThis);
131 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
132
133#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
134 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
135 if (!fTryOnly)
136 {
137 int rc9;
138 RTNATIVETHREAD hNativeWriter;
139 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
140 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
141 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
142 else
143 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
144 if (RT_FAILURE(rc9))
145 return rc9;
146 }
147#endif
148
149 /*
150 * Get cracking...
151 */
152 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
153 uint64_t u64OldState = u64State;
154
155 for (;;)
156 {
157 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
158 {
159 /* It flows in the right direction, try follow it before it changes. */
160 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
161 c++;
162 Assert(c < RTCSRW_CNT_MASK / 2);
163 u64State &= ~RTCSRW_CNT_RD_MASK;
164 u64State |= c << RTCSRW_CNT_RD_SHIFT;
165 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
166 {
167#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
168 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
169#endif
170 break;
171 }
172 }
173 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
174 {
175 /* Wrong direction, but we're alone here and can simply try switch the direction. */
176 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
177 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
178 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
179 {
180 Assert(!pThis->s.Core.fNeedReset);
181#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
182 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
183#endif
184 break;
185 }
186 }
187 else
188 {
189 /* Is the writer perhaps doing a read recursion? */
190 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
191 RTNATIVETHREAD hNativeWriter;
192 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
193 if (hNativeSelf == hNativeWriter)
194 {
195#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
196 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
197 if (RT_FAILURE(rc9))
198 return rc9;
199#endif
200 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
201 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
202 return VINF_SUCCESS; /* don't break! */
203 }
204
205 /* If we're only trying, return already. */
206 if (fTryOnly)
207 return VERR_SEM_BUSY;
208
209#if defined(IN_RING3)
210 /*
211 * Add ourselves to the queue and wait for the direction to change.
212 */
213 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
214 c++;
215 Assert(c < RTCSRW_CNT_MASK / 2);
216
217 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
218 cWait++;
219 Assert(cWait <= c);
220 Assert(cWait < RTCSRW_CNT_MASK / 2);
221
222 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
223 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
224
225 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
226 {
227 for (uint32_t iLoop = 0; ; iLoop++)
228 {
229 int rc;
230# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
231 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
232 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
233 if (RT_SUCCESS(rc))
234# else
235 RTTHREAD hThreadSelf = RTThreadSelf();
236 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
237# endif
238 {
239 do
240 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
241 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
242 RT_INDEFINITE_WAIT);
243 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
244 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
245 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
246 return VERR_SEM_DESTROYED;
247 }
248 if (RT_FAILURE(rc))
249 {
250 /* Decrement the counts and return the error. */
251 for (;;)
252 {
253 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
254 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
255 c--;
256 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
257 cWait--;
258 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
259 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
260 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
261 break;
262 }
263 return rc;
264 }
265
266 Assert(pThis->s.Core.fNeedReset);
267 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
268 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
269 break;
270 AssertMsg(iLoop < 1, ("%u\n", iLoop));
271 }
272
273 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
274 for (;;)
275 {
276 u64OldState = u64State;
277
278 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
279 Assert(cWait > 0);
280 cWait--;
281 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
282 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
283
284 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
285 {
286 if (cWait == 0)
287 {
288 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
289 {
290 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
291 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
292 AssertRCReturn(rc, rc);
293 }
294 }
295 break;
296 }
297 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
298 }
299
300# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
301 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
302# endif
303 break;
304 }
305
306#else
307 /*
308 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
309 * back to ring-3 and do it there or return rcBusy.
310 */
311 if (rcBusy == VINF_SUCCESS)
312 {
313 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
314 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
315 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
316 * back to ring-3. Goes for both kind of crit sects. */
317 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
318 }
319 return rcBusy;
320#endif
321 }
322
323 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
324 return VERR_SEM_DESTROYED;
325
326 ASMNopPause();
327 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
328 u64OldState = u64State;
329 }
330
331 /* got it! */
332 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
333 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
334 return VINF_SUCCESS;
335
336}
337
338
339/**
340 * Enter a critical section with shared (read) access.
341 *
342 * @returns VBox status code.
343 * @retval VINF_SUCCESS on success.
344 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
345 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
346 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
347 * during the operation.
348 *
349 * @param pThis Pointer to the read/write critical section.
350 * @param rcBusy The status code to return when we're in RC or R0 and the
351 * section is busy. Pass VINF_SUCCESS to acquired the
352 * critical section thru a ring-3 call if necessary.
353 * @param uId Where we're entering the section.
354 * @param pszFile The source position - file.
355 * @param iLine The source position - line.
356 * @param pszFunction The source position - function.
357 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
358 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
359 * RTCritSectRwEnterShared.
360 */
361VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
362{
363#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
364 return pdmCritSectRwEnterShared(pThis, rcBusy, NULL, false /*fTryOnly*/);
365#else
366 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
367 return pdmCritSectRwEnterShared(pThis, rcBusy, &SrcPos, false /*fTryOnly*/);
368#endif
369}
370
371
372/**
373 * Enter a critical section with shared (read) access.
374 *
375 * @returns VBox status code.
376 * @retval VINF_SUCCESS on success.
377 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
378 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
379 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
380 * during the operation.
381 *
382 * @param pThis Pointer to the read/write critical section.
383 * @param rcBusy The status code to return when we're in RC or R0 and the
384 * section is busy. Pass VINF_SUCCESS to acquired the
385 * critical section thru a ring-3 call if necessary.
386 * @param uId Where we're entering the section.
387 * @param pszFile The source position - file.
388 * @param iLine The source position - line.
389 * @param pszFunction The source position - function.
390 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
391 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
392 * RTCritSectRwEnterSharedDebug.
393 */
394VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
395{
396#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
397 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, false /*fTryOnly*/);
398#else
399 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
400 return pdmCritSectRwEnterShared(pThis, rcBusy, &SrcPos, false /*fTryOnly*/);
401#endif
402}
403
404
405/**
406 * Try enter a critical section with shared (read) access.
407 *
408 * @returns VBox status code.
409 * @retval VINF_SUCCESS on success.
410 * @retval VERR_SEM_BUSY if the critsect was owned.
411 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
412 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
413 * during the operation.
414 *
415 * @param pThis Pointer to the read/write critical section.
416 * @param uId Where we're entering the section.
417 * @param pszFile The source position - file.
418 * @param iLine The source position - line.
419 * @param pszFunction The source position - function.
420 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
421 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
422 * RTCritSectRwTryEnterShared.
423 */
424VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
425{
426#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
427 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, true /*fTryOnly*/);
428#else
429 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
430 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryOnly*/);
431#endif
432}
433
434
435/**
436 * Try enter a critical section with shared (read) access.
437 *
438 * @returns VBox status code.
439 * @retval VINF_SUCCESS on success.
440 * @retval VERR_SEM_BUSY if the critsect was owned.
441 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
442 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
443 * during the operation.
444 *
445 * @param pThis Pointer to the read/write critical section.
446 * @param uId Where we're entering the section.
447 * @param pszFile The source position - file.
448 * @param iLine The source position - line.
449 * @param pszFunction The source position - function.
450 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
451 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
452 * RTCritSectRwTryEnterSharedDebug.
453 */
454VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
455{
456#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
457 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, true /*fTryOnly*/);
458#else
459 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
460 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryOnly*/);
461#endif
462}
463
464
465#ifdef IN_RING3
466/**
467 * Enters a PDM read/write critical section with shared (read) access.
468 *
469 * @returns VINF_SUCCESS if entered successfully.
470 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
471 * during the operation.
472 *
473 * @param pThis Pointer to the read/write critical section.
474 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
475 */
476VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
477{
478 int rc = pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/);
479 if ( rc == VINF_SUCCESS
480 && fCallRing3
481 && pThis->s.Core.pValidatorRead)
482 {
483 Assert(pThis->s.Core.pValidatorWrite);
484 if (pThis->s.Core.hNativeWriter == NIL_RTNATIVETHREAD)
485 RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
486 else
487 RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
488 }
489 return rc;
490}
491#endif /* IN_RING3 */
492
493
494/**
495 * Leave a critical section held with shared access.
496 *
497 * @returns VBox status code.
498 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
499 * during the operation.
500 * @param pThis Pointer to the read/write critical section.
501 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
502 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
503 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
504 */
505VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
506{
507 /*
508 * Validate handle.
509 */
510 AssertPtr(pThis);
511 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
512
513 /*
514 * Check the direction and take action accordingly.
515 */
516 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
517 uint64_t u64OldState = u64State;
518 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
519 {
520#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
521 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
522 if (RT_FAILURE(rc9))
523 return rc9;
524#endif
525 for (;;)
526 {
527 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
528 AssertReturn(c > 0, VERR_NOT_OWNER);
529 c--;
530
531 if ( c > 0
532 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
533 {
534 /* Don't change the direction. */
535 u64State &= ~RTCSRW_CNT_RD_MASK;
536 u64State |= c << RTCSRW_CNT_RD_SHIFT;
537 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
538 break;
539 }
540 else
541 {
542#if defined(IN_RING3)
543 /* Reverse the direction and signal the writer threads. */
544 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
545 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
546 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
547 {
548 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
549 AssertRC(rc);
550 break;
551 }
552#else
553 /* Queue the exit request (ring-3). */
554 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
555 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
556 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
557 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
558 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
559 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
560 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
561 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
562 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
563 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
564#endif
565 }
566
567 ASMNopPause();
568 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
569 u64OldState = u64State;
570 }
571 }
572 else
573 {
574 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
575 RTNATIVETHREAD hNativeWriter;
576 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
577 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
578 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
579#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
580 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
581 if (RT_FAILURE(rc))
582 return rc;
583#endif
584 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
585 }
586
587 return VINF_SUCCESS;
588}
589
590
591static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
592{
593 /*
594 * Validate input.
595 */
596 AssertPtr(pThis);
597 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
598
599#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
600 RTTHREAD hThreadSelf = NIL_RTTHREAD;
601 if (!fTryOnly)
602 {
603 hThreadSelf = RTThreadSelfAutoAdopt();
604 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
605 if (RT_FAILURE(rc9))
606 return rc9;
607 }
608#endif
609
610 /*
611 * Check if we're already the owner and just recursing.
612 */
613 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
614 RTNATIVETHREAD hNativeWriter;
615 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
616 if (hNativeSelf == hNativeWriter)
617 {
618 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
619#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
620 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
621 if (RT_FAILURE(rc9))
622 return rc9;
623#endif
624 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
625 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
626 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
627 return VINF_SUCCESS;
628 }
629
630 /*
631 * Get cracking.
632 */
633 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
634 uint64_t u64OldState = u64State;
635
636 for (;;)
637 {
638 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
639 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
640 {
641 /* It flows in the right direction, try follow it before it changes. */
642 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
643 c++;
644 Assert(c < RTCSRW_CNT_MASK / 2);
645 u64State &= ~RTCSRW_CNT_WR_MASK;
646 u64State |= c << RTCSRW_CNT_WR_SHIFT;
647 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
648 break;
649 }
650 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
651 {
652 /* Wrong direction, but we're alone here and can simply try switch the direction. */
653 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
654 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
655 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
656 break;
657 }
658 else if (fTryOnly)
659 /* Wrong direction and we're not supposed to wait, just return. */
660 return VERR_SEM_BUSY;
661 else
662 {
663 /* Add ourselves to the write count and break out to do the wait. */
664 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
665 c++;
666 Assert(c < RTCSRW_CNT_MASK / 2);
667 u64State &= ~RTCSRW_CNT_WR_MASK;
668 u64State |= c << RTCSRW_CNT_WR_SHIFT;
669 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
670 break;
671 }
672
673 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
674 return VERR_SEM_DESTROYED;
675
676 ASMNopPause();
677 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
678 u64OldState = u64State;
679 }
680
681 /*
682 * If we're in write mode now try grab the ownership. Play fair if there
683 * are threads already waiting.
684 */
685 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
686#if defined(IN_RING3)
687 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
688 || fTryOnly)
689#endif
690 ;
691 if (fDone)
692 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
693 if (!fDone)
694 {
695#if defined(IN_RING3)
696 /*
697 * Wait for our turn.
698 */
699 for (uint32_t iLoop = 0; ; iLoop++)
700 {
701 int rc;
702# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
703 if (!fTryOnly)
704 {
705 if (hThreadSelf == NIL_RTTHREAD)
706 hThreadSelf = RTThreadSelfAutoAdopt();
707 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
708 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
709 }
710 else
711 rc = VINF_SUCCESS;
712 if (RT_SUCCESS(rc))
713# else
714 RTTHREAD hThreadSelf = RTThreadSelf();
715 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
716# endif
717 {
718 do
719 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
720 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
721 RT_INDEFINITE_WAIT);
722 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
723 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
724 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
725 return VERR_SEM_DESTROYED;
726 }
727 if (RT_FAILURE(rc))
728 {
729 /* Decrement the counts and return the error. */
730 for (;;)
731 {
732 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
733 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
734 c--;
735 u64State &= ~RTCSRW_CNT_WR_MASK;
736 u64State |= c << RTCSRW_CNT_WR_SHIFT;
737 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
738 break;
739 }
740 return rc;
741 }
742
743 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
744 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
745 {
746 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
747 if (fDone)
748 break;
749 }
750 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
751 }
752
753#else
754 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
755 ring-3 and do it there or return rcBusy. */
756 for (;;)
757 {
758 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
759 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
760 c--;
761 u64State &= ~RTCSRW_CNT_WR_MASK;
762 u64State |= c << RTCSRW_CNT_WR_SHIFT;
763 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
764 break;
765 }
766
767 if (rcBusy == VINF_SUCCESS)
768 {
769 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
770 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
771 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
772 * back to ring-3. Goes for both kind of crit sects. */
773 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
774 }
775 return rcBusy;
776
777#endif
778 }
779
780 /*
781 * Got it!
782 */
783 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
784 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
785 Assert(pThis->s.Core.cWriterReads == 0);
786#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
787 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
788#endif
789 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
790 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
791
792 return VINF_SUCCESS;
793}
794
795
796/**
797 * Try enter a critical section with exclusive (write) access.
798 *
799 * @returns VBox status code.
800 * @retval VINF_SUCCESS on success.
801 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
802 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
803 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
804 * during the operation.
805 *
806 * @param pThis Pointer to the read/write critical section.
807 * @param rcBusy The status code to return when we're in RC or R0 and the
808 * section is busy. Pass VINF_SUCCESS to acquired the
809 * critical section thru a ring-3 call if necessary.
810 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
811 * PDMCritSectRwTryEnterExclDebug,
812 * PDMCritSectEnterDebug, PDMCritSectEnter,
813 * RTCritSectRwEnterExcl.
814 */
815VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
816{
817#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
818 return pdmCritSectRwEnterExcl(pThis, rcBusy, NULL, false /*fTryAgain*/);
819#else
820 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
821 return pdmCritSectRwEnterExcl(pThis, rcBusy, &SrcPos, false /*fTryAgain*/);
822#endif
823}
824
825
826/**
827 * Try enter a critical section with exclusive (write) access.
828 *
829 * @returns VBox status code.
830 * @retval VINF_SUCCESS on success.
831 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
832 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
833 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
834 * during the operation.
835 *
836 * @param pThis Pointer to the read/write critical section.
837 * @param rcBusy The status code to return when we're in RC or R0 and the
838 * section is busy. Pass VINF_SUCCESS to acquired the
839 * critical section thru a ring-3 call if necessary.
840 * @param uId Where we're entering the section.
841 * @param pszFile The source position - file.
842 * @param iLine The source position - line.
843 * @param pszFunction The source position - function.
844 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
845 * PDMCritSectRwTryEnterExclDebug,
846 * PDMCritSectEnterDebug, PDMCritSectEnter,
847 * RTCritSectRwEnterExclDebug.
848 */
849VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
850{
851#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
852 return pdmCritSectRwEnterExcl(pThis, rcBusy, NULL, false /*fTryAgain*/);
853#else
854 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
855 return pdmCritSectRwEnterExcl(pThis, rcBusy, &SrcPos, false /*fTryAgain*/);
856#endif
857}
858
859
860/**
861 * Try enter a critical section with exclusive (write) access.
862 *
863 * @retval VINF_SUCCESS on success.
864 * @retval VERR_SEM_BUSY if the critsect was owned.
865 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
866 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
867 * during the operation.
868 *
869 * @param pThis Pointer to the read/write critical section.
870 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
871 * PDMCritSectRwEnterExclDebug,
872 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
873 * RTCritSectRwTryEnterExcl.
874 */
875VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
876{
877#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
878 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, true /*fTryAgain*/);
879#else
880 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
881 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryAgain*/);
882#endif
883}
884
885
886/**
887 * Try enter a critical section with exclusive (write) access.
888 *
889 * @retval VINF_SUCCESS on success.
890 * @retval VERR_SEM_BUSY if the critsect was owned.
891 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
892 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
893 * during the operation.
894 *
895 * @param pThis Pointer to the read/write critical section.
896 * @param uId Where we're entering the section.
897 * @param pszFile The source position - file.
898 * @param iLine The source position - line.
899 * @param pszFunction The source position - function.
900 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
901 * PDMCritSectRwEnterExclDebug,
902 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
903 * RTCritSectRwTryEnterExclDebug.
904 */
905VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
906{
907#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
908 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, true /*fTryAgain*/);
909#else
910 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
911 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryAgain*/);
912#endif
913}
914
915
916#ifdef IN_RING3
917/**
918 * Enters a PDM read/write critical section with exclusive (write) access.
919 *
920 * @returns VINF_SUCCESS if entered successfully.
921 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
922 * during the operation.
923 *
924 * @param pThis Pointer to the read/write critical section.
925 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
926 */
927VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
928{
929 int rc = pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/);
930 if ( rc == VINF_SUCCESS
931 && fCallRing3
932 && pThis->s.Core.pValidatorWrite
933 && pThis->s.Core.pValidatorWrite->hThread != NIL_RTTHREAD)
934 RTLockValidatorRecExclReleaseOwnerUnchecked(pThis->s.Core.pValidatorWrite);
935 return rc;
936}
937#endif /* IN_RING3 */
938
939
940/**
941 * Leave a critical section held exclusively.
942 *
943 * @returns VBox status code.
944 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
945 * during the operation.
946 * @param pThis Pointer to the read/write critical section.
947 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
948 */
949VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
950{
951 /*
952 * Validate handle.
953 */
954 AssertPtr(pThis);
955 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
956
957 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
958 RTNATIVETHREAD hNativeWriter;
959 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
960 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
961
962 /*
963 * Unwind one recursion. Is it the final one?
964 */
965 if (pThis->s.Core.cWriteRecursions == 1)
966 {
967 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
968#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
969 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
970 if (RT_FAILURE(rc9))
971 return rc9;
972#endif
973 /*
974 * Update the state.
975 */
976#if defined(IN_RING3)
977 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
978 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
979 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
980
981 for (;;)
982 {
983 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
984 uint64_t u64OldState = u64State;
985
986 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
987 Assert(c > 0);
988 c--;
989
990 if ( c > 0
991 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
992 {
993 /* Don't change the direction, wake up the next writer if any. */
994 u64State &= ~RTCSRW_CNT_WR_MASK;
995 u64State |= c << RTCSRW_CNT_WR_SHIFT;
996 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
997 {
998 if (c > 0)
999 {
1000 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1001 AssertRC(rc);
1002 }
1003 break;
1004 }
1005 }
1006 else
1007 {
1008 /* Reverse the direction and signal the reader threads. */
1009 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1010 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1011 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1012 {
1013 Assert(!pThis->s.Core.fNeedReset);
1014 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1015 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1016 AssertRC(rc);
1017 break;
1018 }
1019 }
1020
1021 ASMNopPause();
1022 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1023 return VERR_SEM_DESTROYED;
1024 }
1025#else
1026 /*
1027 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1028 * so queue the exit request (ring-3).
1029 */
1030 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1031 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1032 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1033 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1034 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1035 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1036 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1037 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1038 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1039 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1040#endif
1041 }
1042 else
1043 {
1044 /*
1045 * Not the final recursion.
1046 */
1047 Assert(pThis->s.Core.cWriteRecursions != 0);
1048#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1049 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1050 if (RT_FAILURE(rc9))
1051 return rc9;
1052#endif
1053 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1054 }
1055
1056 return VINF_SUCCESS;
1057}
1058
1059
1060/**
1061 * Checks the caller is the exclusive (write) owner of the critical section.
1062 *
1063 * @retval @c true if owner.
1064 * @retval @c false if not owner.
1065 * @param pThis Pointer to the read/write critical section.
1066 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1067 * RTCritSectRwIsWriteOwner.
1068 */
1069VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1070{
1071 /*
1072 * Validate handle.
1073 */
1074 AssertPtr(pThis);
1075 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1076
1077 /*
1078 * Check ownership.
1079 */
1080 RTNATIVETHREAD hNativeWriter;
1081 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1082 if (hNativeWriter == NIL_RTNATIVETHREAD)
1083 return false;
1084 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1085}
1086
1087
1088/**
1089 * Checks if the caller is one of the read owners of the critical section.
1090 *
1091 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1092 * enabled. Meaning, the answer is not trustworhty unless
1093 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1094 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1095 * creating the semaphore. And finally, if you used a locking class,
1096 * don't disable deadlock detection by setting cMsMinDeadlock to
1097 * RT_INDEFINITE_WAIT.
1098 *
1099 * In short, only use this for assertions.
1100 *
1101 * @returns @c true if reader, @c false if not.
1102 * @param pThis Pointer to the read/write critical section.
1103 * @param fWannaHear What you'd like to hear when lock validation is not
1104 * available. (For avoiding asserting all over the place.)
1105 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1106 */
1107VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1108{
1109 /*
1110 * Validate handle.
1111 */
1112 AssertPtr(pThis);
1113 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1114
1115 /*
1116 * Inspect the state.
1117 */
1118 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1119 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1120 {
1121 /*
1122 * It's in write mode, so we can only be a reader if we're also the
1123 * current writer.
1124 */
1125 RTNATIVETHREAD hWriter;
1126 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1127 if (hWriter == NIL_RTNATIVETHREAD)
1128 return false;
1129 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1130 }
1131
1132 /*
1133 * Read mode. If there are no current readers, then we cannot be a reader.
1134 */
1135 if (!(u64State & RTCSRW_CNT_RD_MASK))
1136 return false;
1137
1138#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1139 /*
1140 * Ask the lock validator.
1141 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1142 */
1143 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1144#else
1145 /*
1146 * Ok, we don't know, just tell the caller what he want to hear.
1147 */
1148 return fWannaHear;
1149#endif
1150}
1151
1152
1153/**
1154 * Gets the write recursion count.
1155 *
1156 * @returns The write recursion count (0 if bad critsect).
1157 * @param pThis Pointer to the read/write critical section.
1158 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1159 * RTCritSectRwGetWriteRecursion.
1160 */
1161VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1162{
1163 /*
1164 * Validate handle.
1165 */
1166 AssertPtr(pThis);
1167 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1168
1169 /*
1170 * Return the requested data.
1171 */
1172 return pThis->s.Core.cWriteRecursions;
1173}
1174
1175
1176/**
1177 * Gets the read recursion count of the current writer.
1178 *
1179 * @returns The read recursion count (0 if bad critsect).
1180 * @param pThis Pointer to the read/write critical section.
1181 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1182 * RTCritSectRwGetWriterReadRecursion.
1183 */
1184VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1185{
1186 /*
1187 * Validate handle.
1188 */
1189 AssertPtr(pThis);
1190 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1191
1192 /*
1193 * Return the requested data.
1194 */
1195 return pThis->s.Core.cWriterReads;
1196}
1197
1198
1199/**
1200 * Gets the current number of reads.
1201 *
1202 * This includes all read recursions, so it might be higher than the number of
1203 * read owners. It does not include reads done by the current writer.
1204 *
1205 * @returns The read count (0 if bad critsect).
1206 * @param pThis Pointer to the read/write critical section.
1207 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1208 * RTCritSectRwGetReadCount.
1209 */
1210VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1211{
1212 /*
1213 * Validate input.
1214 */
1215 AssertPtr(pThis);
1216 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1217
1218 /*
1219 * Return the requested data.
1220 */
1221 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1222 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1223 return 0;
1224 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1225}
1226
1227
1228/**
1229 * Checks if the read/write critical section is initialized or not.
1230 *
1231 * @retval @c true if initialized.
1232 * @retval @c false if not initialized.
1233 * @param pThis Pointer to the read/write critical section.
1234 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1235 */
1236VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1237{
1238 AssertPtr(pThis);
1239 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1240}
1241
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette