VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 45299

Last change on this file since 45299 was 45299, checked in by vboxsync, 12 years ago

PDMCritSectRw: Fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.5 KB
Line 
1/* $Id: PDMAllCritSectRw.cpp 45299 2013-04-03 09:47:49Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 NOREF(pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
85 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125/**
126 * Worker that enters a read/write critical section with shard access.
127 *
128 * @returns VBox status code.
129 * @param pThis Pointer to the read/write critical section.
130 * @param rcBusy The busy return code for ring-0 and ring-3.
131 * @param fTryOnly Only try enter it, don't wait.
132 * @param pSrcPos The source position. (Can be NULL.)
133 * @param fNoVal No validation records.
134 */
135static int pdmCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
136{
137 /*
138 * Validate input.
139 */
140 AssertPtr(pThis);
141 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
142
143#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
144 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
145 if (!fTryOnly)
146 {
147 int rc9;
148 RTNATIVETHREAD hNativeWriter;
149 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
150 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pThis))
151 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
152 else
153 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
154 if (RT_FAILURE(rc9))
155 return rc9;
156 }
157#endif
158
159 /*
160 * Get cracking...
161 */
162 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
163 uint64_t u64OldState = u64State;
164
165 for (;;)
166 {
167 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
168 {
169 /* It flows in the right direction, try follow it before it changes. */
170 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
171 c++;
172 Assert(c < RTCSRW_CNT_MASK / 2);
173 u64State &= ~RTCSRW_CNT_RD_MASK;
174 u64State |= c << RTCSRW_CNT_RD_SHIFT;
175 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
176 {
177#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
178 if (!fNoVal)
179 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
180#endif
181 break;
182 }
183 }
184 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
185 {
186 /* Wrong direction, but we're alone here and can simply try switch the direction. */
187 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
188 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
189 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
190 {
191 Assert(!pThis->s.Core.fNeedReset);
192#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
193 if (!fNoVal)
194 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
195#endif
196 break;
197 }
198 }
199 else
200 {
201 /* Is the writer perhaps doing a read recursion? */
202 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
203 RTNATIVETHREAD hNativeWriter;
204 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
205 if (hNativeSelf == hNativeWriter)
206 {
207#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
208 if (!fNoVal)
209 {
210 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
211 if (RT_FAILURE(rc9))
212 return rc9;
213 }
214#endif
215 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
216 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
217 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
218 return VINF_SUCCESS; /* don't break! */
219 }
220
221 /*
222 * If we're only trying, return already.
223 */
224 if (fTryOnly)
225 {
226 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
227 return VERR_SEM_BUSY;
228 }
229
230#if defined(IN_RING3)
231 /*
232 * Add ourselves to the queue and wait for the direction to change.
233 */
234 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
235 c++;
236 Assert(c < RTCSRW_CNT_MASK / 2);
237
238 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
239 cWait++;
240 Assert(cWait <= c);
241 Assert(cWait < RTCSRW_CNT_MASK / 2);
242
243 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
244 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
245
246 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
247 {
248 for (uint32_t iLoop = 0; ; iLoop++)
249 {
250 int rc;
251# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
252 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
253 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
254 if (RT_SUCCESS(rc))
255# else
256 RTTHREAD hThreadSelf = RTThreadSelf();
257 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
258# endif
259 {
260 do
261 rc = SUPSemEventMultiWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
262 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
263 RT_INDEFINITE_WAIT);
264 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
265 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
266 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
267 return VERR_SEM_DESTROYED;
268 }
269 if (RT_FAILURE(rc))
270 {
271 /* Decrement the counts and return the error. */
272 for (;;)
273 {
274 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
275 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
276 c--;
277 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
278 cWait--;
279 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
280 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
281 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
282 break;
283 }
284 return rc;
285 }
286
287 Assert(pThis->s.Core.fNeedReset);
288 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
289 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
290 break;
291 AssertMsg(iLoop < 1, ("%u\n", iLoop));
292 }
293
294 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
295 for (;;)
296 {
297 u64OldState = u64State;
298
299 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
300 Assert(cWait > 0);
301 cWait--;
302 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
303 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
304
305 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
306 {
307 if (cWait == 0)
308 {
309 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
310 {
311 int rc = SUPSemEventMultiReset(pThis->s.CTX_SUFF(pVM)->pSession,
312 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
313 AssertRCReturn(rc, rc);
314 }
315 }
316 break;
317 }
318 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
319 }
320
321# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
322 if (!fNoVal)
323 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
324# endif
325 break;
326 }
327
328#else
329 /*
330 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
331 * back to ring-3 and do it there or return rcBusy.
332 */
333 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
334 if (rcBusy == VINF_SUCCESS)
335 {
336 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
337 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
338 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
339 * back to ring-3. Goes for both kind of crit sects. */
340 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
341 }
342 return rcBusy;
343#endif
344 }
345
346 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
347 return VERR_SEM_DESTROYED;
348
349 ASMNopPause();
350 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
351 u64OldState = u64State;
352 }
353
354 /* got it! */
355 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
356 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
357 return VINF_SUCCESS;
358
359}
360
361
362/**
363 * Enter a critical section with shared (read) access.
364 *
365 * @returns VBox status code.
366 * @retval VINF_SUCCESS on success.
367 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
368 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
369 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
370 * during the operation.
371 *
372 * @param pThis Pointer to the read/write critical section.
373 * @param rcBusy The status code to return when we're in RC or R0 and the
374 * section is busy. Pass VINF_SUCCESS to acquired the
375 * critical section thru a ring-3 call if necessary.
376 * @param uId Where we're entering the section.
377 * @param pszFile The source position - file.
378 * @param iLine The source position - line.
379 * @param pszFunction The source position - function.
380 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
381 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
382 * RTCritSectRwEnterShared.
383 */
384VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
385{
386#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
387 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
388#else
389 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
390 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
391#endif
392}
393
394
395/**
396 * Enter a critical section with shared (read) access.
397 *
398 * @returns VBox status code.
399 * @retval VINF_SUCCESS on success.
400 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
401 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
402 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
403 * during the operation.
404 *
405 * @param pThis Pointer to the read/write critical section.
406 * @param rcBusy The status code to return when we're in RC or R0 and the
407 * section is busy. Pass VINF_SUCCESS to acquired the
408 * critical section thru a ring-3 call if necessary.
409 * @param uId Where we're entering the section.
410 * @param pszFile The source position - file.
411 * @param iLine The source position - line.
412 * @param pszFunction The source position - function.
413 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
414 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
415 * RTCritSectRwEnterSharedDebug.
416 */
417VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
418{
419#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
420 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
421#else
422 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
423 return pdmCritSectRwEnterShared(pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
424#endif
425}
426
427
428/**
429 * Try enter a critical section with shared (read) access.
430 *
431 * @returns VBox status code.
432 * @retval VINF_SUCCESS on success.
433 * @retval VERR_SEM_BUSY if the critsect was owned.
434 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
435 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
436 * during the operation.
437 *
438 * @param pThis Pointer to the read/write critical section.
439 * @param uId Where we're entering the section.
440 * @param pszFile The source position - file.
441 * @param iLine The source position - line.
442 * @param pszFunction The source position - function.
443 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
444 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
445 * RTCritSectRwTryEnterShared.
446 */
447VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
448{
449#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
450 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
451#else
452 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
453 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
454#endif
455}
456
457
458/**
459 * Try enter a critical section with shared (read) access.
460 *
461 * @returns VBox status code.
462 * @retval VINF_SUCCESS on success.
463 * @retval VERR_SEM_BUSY if the critsect was owned.
464 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
465 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
466 * during the operation.
467 *
468 * @param pThis Pointer to the read/write critical section.
469 * @param uId Where we're entering the section.
470 * @param pszFile The source position - file.
471 * @param iLine The source position - line.
472 * @param pszFunction The source position - function.
473 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
474 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
475 * RTCritSectRwTryEnterSharedDebug.
476 */
477VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
478{
479#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
480 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
481#else
482 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
483 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
484#endif
485}
486
487
488#ifdef IN_RING3
489/**
490 * Enters a PDM read/write critical section with shared (read) access.
491 *
492 * @returns VINF_SUCCESS if entered successfully.
493 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
494 * during the operation.
495 *
496 * @param pThis Pointer to the read/write critical section.
497 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
498 */
499VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
500{
501 return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/, fCallRing3);
502}
503#endif
504
505
506/**
507 * Leave a critical section held with shared access.
508 *
509 * @returns VBox status code.
510 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
511 * during the operation.
512 * @param pThis Pointer to the read/write critical section.
513 * @param fNoVal No validation records (i.e. queued release).
514 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
515 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
516 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
517 */
518static int pdmCritSectRwLeaveSharedWorker(PPDMCRITSECTRW pThis, bool fNoVal)
519{
520 /*
521 * Validate handle.
522 */
523 AssertPtr(pThis);
524 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
525
526 /*
527 * Check the direction and take action accordingly.
528 */
529 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
530 uint64_t u64OldState = u64State;
531 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
532 {
533#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
534 if (fNoVal)
535 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
536 else
537 {
538 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
539 if (RT_FAILURE(rc9))
540 return rc9;
541 }
542#endif
543 for (;;)
544 {
545 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
546 AssertReturn(c > 0, VERR_NOT_OWNER);
547 c--;
548
549 if ( c > 0
550 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
551 {
552 /* Don't change the direction. */
553 u64State &= ~RTCSRW_CNT_RD_MASK;
554 u64State |= c << RTCSRW_CNT_RD_SHIFT;
555 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
556 break;
557 }
558 else
559 {
560#if defined(IN_RING3)
561 /* Reverse the direction and signal the writer threads. */
562 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
563 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
564 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
565 {
566 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
567 AssertRC(rc);
568 break;
569 }
570#else
571 /* Queue the exit request (ring-3). */
572 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
573 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
574 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
575 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
576 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
577 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
578 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
579 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
580 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
581 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
582#endif
583 }
584
585 ASMNopPause();
586 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
587 u64OldState = u64State;
588 }
589 }
590 else
591 {
592 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
593 RTNATIVETHREAD hNativeWriter;
594 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
595 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
596 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
597#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
598 if (!fNoVal)
599 {
600 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
601 if (RT_FAILURE(rc))
602 return rc;
603 }
604#endif
605 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
606 }
607
608 return VINF_SUCCESS;
609}
610
611/**
612 * Leave a critical section held with shared access.
613 *
614 * @returns VBox status code.
615 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
616 * during the operation.
617 * @param pThis Pointer to the read/write critical section.
618 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
619 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
620 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
621 */
622VMMDECL(int) PDMCritSectRwLeaveShared(PPDMCRITSECTRW pThis)
623{
624 return pdmCritSectRwLeaveSharedWorker(pThis, false /*fNoVal*/);
625}
626
627
628#if defined(IN_RING3) || defined(IN_RING0)
629/**
630 * PDMCritSectBothFF interface.
631 *
632 * @param pThis Pointer to the read/write critical section.
633 */
634void pdmCritSectRwLeaveSharedQueued(PPDMCRITSECTRW pThis)
635{
636 pdmCritSectRwLeaveSharedWorker(pThis, true /*fNoVal*/);
637}
638#endif
639
640
641/**
642 * Worker that enters a read/write critical section with exclusive access.
643 *
644 * @returns VBox status code.
645 * @param pThis Pointer to the read/write critical section.
646 * @param rcBusy The busy return code for ring-0 and ring-3.
647 * @param fTryOnly Only try enter it, don't wait.
648 * @param pSrcPos The source position. (Can be NULL.)
649 * @param fNoVal No validation records.
650 */
651static int pdmCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
652{
653 /*
654 * Validate input.
655 */
656 AssertPtr(pThis);
657 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
658
659#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
660 RTTHREAD hThreadSelf = NIL_RTTHREAD;
661 if (!fTryOnly)
662 {
663 hThreadSelf = RTThreadSelfAutoAdopt();
664 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
665 if (RT_FAILURE(rc9))
666 return rc9;
667 }
668#endif
669
670 /*
671 * Check if we're already the owner and just recursing.
672 */
673 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
674 RTNATIVETHREAD hNativeWriter;
675 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
676 if (hNativeSelf == hNativeWriter)
677 {
678 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
679#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
680 if (!fNoVal)
681 {
682 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
683 if (RT_FAILURE(rc9))
684 return rc9;
685 }
686#endif
687 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
688 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
689 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
690 return VINF_SUCCESS;
691 }
692
693 /*
694 * Get cracking.
695 */
696 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
697 uint64_t u64OldState = u64State;
698
699 for (;;)
700 {
701 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
702 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
703 {
704 /* It flows in the right direction, try follow it before it changes. */
705 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
706 c++;
707 Assert(c < RTCSRW_CNT_MASK / 2);
708 u64State &= ~RTCSRW_CNT_WR_MASK;
709 u64State |= c << RTCSRW_CNT_WR_SHIFT;
710 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
711 break;
712 }
713 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
714 {
715 /* Wrong direction, but we're alone here and can simply try switch the direction. */
716 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
717 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
718 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
719 break;
720 }
721 else if (fTryOnly)
722 {
723 /* Wrong direction and we're not supposed to wait, just return. */
724 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
725 return VERR_SEM_BUSY;
726 }
727 else
728 {
729 /* Add ourselves to the write count and break out to do the wait. */
730 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
731 c++;
732 Assert(c < RTCSRW_CNT_MASK / 2);
733 u64State &= ~RTCSRW_CNT_WR_MASK;
734 u64State |= c << RTCSRW_CNT_WR_SHIFT;
735 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
736 break;
737 }
738
739 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
740 return VERR_SEM_DESTROYED;
741
742 ASMNopPause();
743 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
744 u64OldState = u64State;
745 }
746
747 /*
748 * If we're in write mode now try grab the ownership. Play fair if there
749 * are threads already waiting.
750 */
751 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
752#if defined(IN_RING3)
753 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
754 || fTryOnly)
755#endif
756 ;
757 if (fDone)
758 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
759 if (!fDone)
760 {
761 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
762
763#if defined(IN_RING3)
764 if (!fTryOnly)
765 {
766 /*
767 * Wait for our turn.
768 */
769 for (uint32_t iLoop = 0; ; iLoop++)
770 {
771 int rc;
772# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
773 if (hThreadSelf == NIL_RTTHREAD)
774 hThreadSelf = RTThreadSelfAutoAdopt();
775 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
776 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
777 if (RT_SUCCESS(rc))
778# else
779 RTTHREAD hThreadSelf = RTThreadSelf();
780 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
781# endif
782 {
783 do
784 rc = SUPSemEventWaitNoResume(pThis->s.CTX_SUFF(pVM)->pSession,
785 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
786 RT_INDEFINITE_WAIT);
787 while (rc == VERR_INTERRUPTED && pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC);
788 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
789 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
790 return VERR_SEM_DESTROYED;
791 }
792 if (RT_FAILURE(rc))
793 {
794 /* Decrement the counts and return the error. */
795 for (;;)
796 {
797 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
798 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
799 c--;
800 u64State &= ~RTCSRW_CNT_WR_MASK;
801 u64State |= c << RTCSRW_CNT_WR_SHIFT;
802 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
803 break;
804 }
805 return rc;
806 }
807
808 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
809 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
810 {
811 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
812 if (fDone)
813 break;
814 }
815 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
816 }
817
818 }
819 else
820#endif /* IN_RING3 */
821 {
822#ifdef IN_RING3
823 /* TryEnter call - decrement the number of (waiting) writers. */
824#else
825 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
826 ring-3 and do it there or return rcBusy. */
827#endif
828
829 for (;;)
830 {
831 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
832 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
833 c--;
834 u64State &= ~RTCSRW_CNT_WR_MASK;
835 u64State |= c << RTCSRW_CNT_WR_SHIFT;
836 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
837 break;
838 }
839
840#ifdef IN_RING3
841 return VERR_SEM_BUSY;
842#else
843 if (rcBusy == VINF_SUCCESS)
844 {
845 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
846 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
847 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
848 * back to ring-3. Goes for both kind of crit sects. */
849 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
850 }
851 return rcBusy;
852#endif
853 }
854 }
855
856 /*
857 * Got it!
858 */
859 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
860 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
861 Assert(pThis->s.Core.cWriterReads == 0);
862#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
863 if (!fNoVal)
864 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
865#endif
866 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
867 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
868
869 return VINF_SUCCESS;
870}
871
872
873/**
874 * Try enter a critical section with exclusive (write) access.
875 *
876 * @returns VBox status code.
877 * @retval VINF_SUCCESS on success.
878 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
879 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
880 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
881 * during the operation.
882 *
883 * @param pThis Pointer to the read/write critical section.
884 * @param rcBusy The status code to return when we're in RC or R0 and the
885 * section is busy. Pass VINF_SUCCESS to acquired the
886 * critical section thru a ring-3 call if necessary.
887 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
888 * PDMCritSectRwTryEnterExclDebug,
889 * PDMCritSectEnterDebug, PDMCritSectEnter,
890 * RTCritSectRwEnterExcl.
891 */
892VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
893{
894#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
895 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
896#else
897 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
898 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
899#endif
900}
901
902
903/**
904 * Try enter a critical section with exclusive (write) access.
905 *
906 * @returns VBox status code.
907 * @retval VINF_SUCCESS on success.
908 * @retval @a rcBusy if in ring-0 or raw-mode context and it is busy.
909 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
910 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
911 * during the operation.
912 *
913 * @param pThis Pointer to the read/write critical section.
914 * @param rcBusy The status code to return when we're in RC or R0 and the
915 * section is busy. Pass VINF_SUCCESS to acquired the
916 * critical section thru a ring-3 call if necessary.
917 * @param uId Where we're entering the section.
918 * @param pszFile The source position - file.
919 * @param iLine The source position - line.
920 * @param pszFunction The source position - function.
921 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
922 * PDMCritSectRwTryEnterExclDebug,
923 * PDMCritSectEnterDebug, PDMCritSectEnter,
924 * RTCritSectRwEnterExclDebug.
925 */
926VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
927{
928#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
929 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
930#else
931 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
932 return pdmCritSectRwEnterExcl(pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
933#endif
934}
935
936
937/**
938 * Try enter a critical section with exclusive (write) access.
939 *
940 * @retval VINF_SUCCESS on success.
941 * @retval VERR_SEM_BUSY if the critsect was owned.
942 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
943 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
944 * during the operation.
945 *
946 * @param pThis Pointer to the read/write critical section.
947 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
948 * PDMCritSectRwEnterExclDebug,
949 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
950 * RTCritSectRwTryEnterExcl.
951 */
952VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
953{
954#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
955 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
956#else
957 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
958 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
959#endif
960}
961
962
963/**
964 * Try enter a critical section with exclusive (write) access.
965 *
966 * @retval VINF_SUCCESS on success.
967 * @retval VERR_SEM_BUSY if the critsect was owned.
968 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
969 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
970 * during the operation.
971 *
972 * @param pThis Pointer to the read/write critical section.
973 * @param uId Where we're entering the section.
974 * @param pszFile The source position - file.
975 * @param iLine The source position - line.
976 * @param pszFunction The source position - function.
977 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
978 * PDMCritSectRwEnterExclDebug,
979 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
980 * RTCritSectRwTryEnterExclDebug.
981 */
982VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
983{
984#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
985 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
986#else
987 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
988 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
989#endif
990}
991
992
993#ifdef IN_RING3
994/**
995 * Enters a PDM read/write critical section with exclusive (write) access.
996 *
997 * @returns VINF_SUCCESS if entered successfully.
998 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
999 * during the operation.
1000 *
1001 * @param pThis Pointer to the read/write critical section.
1002 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1003 */
1004VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
1005{
1006 return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/, fCallRing3 /*fNoVal*/);
1007}
1008#endif /* IN_RING3 */
1009
1010
1011/**
1012 * Leave a critical section held exclusively.
1013 *
1014 * @returns VBox status code.
1015 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1016 * during the operation.
1017 * @param pThis Pointer to the read/write critical section.
1018 * @param fNoVal No validation records (i.e. queued release).
1019 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1020 */
1021static int pdmCritSectRwLeaveExclWorker(PPDMCRITSECTRW pThis, bool fNoVal)
1022{
1023 /*
1024 * Validate handle.
1025 */
1026 AssertPtr(pThis);
1027 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1028
1029 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pThis);
1030 RTNATIVETHREAD hNativeWriter;
1031 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1032 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1033
1034 /*
1035 * Unwind one recursion. Is it the final one?
1036 */
1037 if (pThis->s.Core.cWriteRecursions == 1)
1038 {
1039 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1040#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1041 if (fNoVal)
1042 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1043 else
1044 {
1045 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1046 if (RT_FAILURE(rc9))
1047 return rc9;
1048 }
1049#endif
1050 /*
1051 * Update the state.
1052 */
1053#if defined(IN_RING3)
1054 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1055 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1056 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1057
1058 for (;;)
1059 {
1060 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1061 uint64_t u64OldState = u64State;
1062
1063 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1064 Assert(c > 0);
1065 c--;
1066
1067 if ( c > 0
1068 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1069 {
1070 /* Don't change the direction, wake up the next writer if any. */
1071 u64State &= ~RTCSRW_CNT_WR_MASK;
1072 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1073 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1074 {
1075 if (c > 0)
1076 {
1077 int rc = SUPSemEventSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1078 AssertRC(rc);
1079 }
1080 break;
1081 }
1082 }
1083 else
1084 {
1085 /* Reverse the direction and signal the reader threads. */
1086 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1087 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1088 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1089 {
1090 Assert(!pThis->s.Core.fNeedReset);
1091 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1092 int rc = SUPSemEventMultiSignal(pThis->s.CTX_SUFF(pVM)->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1093 AssertRC(rc);
1094 break;
1095 }
1096 }
1097
1098 ASMNopPause();
1099 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1100 return VERR_SEM_DESTROYED;
1101 }
1102#else
1103 /*
1104 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1105 * so queue the exit request (ring-3).
1106 */
1107 PVM pVM = pThis->s.CTX_SUFF(pVM); AssertPtr(pVM);
1108 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1109 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1110 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1111 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1112 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1113 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1114 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1115 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1116 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1117#endif
1118 }
1119 else
1120 {
1121 /*
1122 * Not the final recursion.
1123 */
1124 Assert(pThis->s.Core.cWriteRecursions != 0);
1125#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1126 if (fNoVal)
1127 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1128 else
1129 {
1130 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1131 if (RT_FAILURE(rc9))
1132 return rc9;
1133 }
1134#endif
1135 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1136 }
1137
1138 return VINF_SUCCESS;
1139}
1140
1141
1142/**
1143 * Leave a critical section held exclusively.
1144 *
1145 * @returns VBox status code.
1146 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1147 * during the operation.
1148 * @param pThis Pointer to the read/write critical section.
1149 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1150 */
1151VMMDECL(int) PDMCritSectRwLeaveExcl(PPDMCRITSECTRW pThis)
1152{
1153 return pdmCritSectRwLeaveExclWorker(pThis, false /*fNoVal*/);
1154}
1155
1156
1157#if defined(IN_RING3) || defined(IN_RING0)
1158/**
1159 * PDMCritSectBothFF interface.
1160 *
1161 * @param pThis Pointer to the read/write critical section.
1162 */
1163void pdmCritSectRwLeaveExclQueued(PPDMCRITSECTRW pThis)
1164{
1165 pdmCritSectRwLeaveExclWorker(pThis, true /*fNoVal*/);
1166}
1167#endif
1168
1169
1170/**
1171 * Checks the caller is the exclusive (write) owner of the critical section.
1172 *
1173 * @retval @c true if owner.
1174 * @retval @c false if not owner.
1175 * @param pThis Pointer to the read/write critical section.
1176 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1177 * RTCritSectRwIsWriteOwner.
1178 */
1179VMMDECL(bool) PDMCritSectRwIsWriteOwner(PPDMCRITSECTRW pThis)
1180{
1181 /*
1182 * Validate handle.
1183 */
1184 AssertPtr(pThis);
1185 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1186
1187 /*
1188 * Check ownership.
1189 */
1190 RTNATIVETHREAD hNativeWriter;
1191 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1192 if (hNativeWriter == NIL_RTNATIVETHREAD)
1193 return false;
1194 return hNativeWriter == pdmCritSectRwGetNativeSelf(pThis);
1195}
1196
1197
1198/**
1199 * Checks if the caller is one of the read owners of the critical section.
1200 *
1201 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1202 * enabled. Meaning, the answer is not trustworhty unless
1203 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1204 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1205 * creating the semaphore. And finally, if you used a locking class,
1206 * don't disable deadlock detection by setting cMsMinDeadlock to
1207 * RT_INDEFINITE_WAIT.
1208 *
1209 * In short, only use this for assertions.
1210 *
1211 * @returns @c true if reader, @c false if not.
1212 * @param pThis Pointer to the read/write critical section.
1213 * @param fWannaHear What you'd like to hear when lock validation is not
1214 * available. (For avoiding asserting all over the place.)
1215 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1216 */
1217VMMDECL(bool) PDMCritSectRwIsReadOwner(PPDMCRITSECTRW pThis, bool fWannaHear)
1218{
1219 /*
1220 * Validate handle.
1221 */
1222 AssertPtr(pThis);
1223 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1224
1225 /*
1226 * Inspect the state.
1227 */
1228 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1229 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1230 {
1231 /*
1232 * It's in write mode, so we can only be a reader if we're also the
1233 * current writer.
1234 */
1235 RTNATIVETHREAD hWriter;
1236 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1237 if (hWriter == NIL_RTNATIVETHREAD)
1238 return false;
1239 return hWriter == pdmCritSectRwGetNativeSelf(pThis);
1240 }
1241
1242 /*
1243 * Read mode. If there are no current readers, then we cannot be a reader.
1244 */
1245 if (!(u64State & RTCSRW_CNT_RD_MASK))
1246 return false;
1247
1248#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1249 /*
1250 * Ask the lock validator.
1251 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1252 */
1253 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1254#else
1255 /*
1256 * Ok, we don't know, just tell the caller what he want to hear.
1257 */
1258 return fWannaHear;
1259#endif
1260}
1261
1262
1263/**
1264 * Gets the write recursion count.
1265 *
1266 * @returns The write recursion count (0 if bad critsect).
1267 * @param pThis Pointer to the read/write critical section.
1268 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1269 * RTCritSectRwGetWriteRecursion.
1270 */
1271VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1272{
1273 /*
1274 * Validate handle.
1275 */
1276 AssertPtr(pThis);
1277 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1278
1279 /*
1280 * Return the requested data.
1281 */
1282 return pThis->s.Core.cWriteRecursions;
1283}
1284
1285
1286/**
1287 * Gets the read recursion count of the current writer.
1288 *
1289 * @returns The read recursion count (0 if bad critsect).
1290 * @param pThis Pointer to the read/write critical section.
1291 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1292 * RTCritSectRwGetWriterReadRecursion.
1293 */
1294VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1295{
1296 /*
1297 * Validate handle.
1298 */
1299 AssertPtr(pThis);
1300 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1301
1302 /*
1303 * Return the requested data.
1304 */
1305 return pThis->s.Core.cWriterReads;
1306}
1307
1308
1309/**
1310 * Gets the current number of reads.
1311 *
1312 * This includes all read recursions, so it might be higher than the number of
1313 * read owners. It does not include reads done by the current writer.
1314 *
1315 * @returns The read count (0 if bad critsect).
1316 * @param pThis Pointer to the read/write critical section.
1317 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1318 * RTCritSectRwGetReadCount.
1319 */
1320VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1321{
1322 /*
1323 * Validate input.
1324 */
1325 AssertPtr(pThis);
1326 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1327
1328 /*
1329 * Return the requested data.
1330 */
1331 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1332 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1333 return 0;
1334 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1335}
1336
1337
1338/**
1339 * Checks if the read/write critical section is initialized or not.
1340 *
1341 * @retval @c true if initialized.
1342 * @retval @c false if not initialized.
1343 * @param pThis Pointer to the read/write critical section.
1344 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1345 */
1346VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1347{
1348 AssertPtr(pThis);
1349 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1350}
1351
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette