VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 23011

Last change on this file since 23011 was 22890, checked in by vboxsync, 15 years ago

VM::cCPUs -> VM::cCpus so it matches all the other cCpus and aCpus members.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.8 KB
Line 
1/* $Id: PDMAllCritSect.cpp 22890 2009-09-09 23:11:31Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vmm.h>
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/hwaccm.h>
34
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#ifdef IN_RING3
39# include <iprt/semaphore.h>
40#endif
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** The number loops to spin for in ring-3. */
47#define PDMCRITSECT_SPIN_COUNT_R3 20
48/** The number loops to spin for in ring-0. */
49#define PDMCRITSECT_SPIN_COUNT_R0 256
50/** The number loops to spin for in the raw-mode context. */
51#define PDMCRITSECT_SPIN_COUNT_RC 256
52
53/** @def PDMCRITSECT_STRICT
54 * Enables/disables PDM critsect strictness like deadlock detection. */
55#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
56# define PDMCRITSECT_STRICT
57#endif
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 VERR_SEM_DESTROYED);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've wont the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
97
98# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
99 pCritSect->s.Core.Strict.pszEnterFile = NULL;
100 pCritSect->s.Core.Strict.u32EnterLine = 0;
101 pCritSect->s.Core.Strict.uEnterId = 0;
102 RTTHREAD hSelf = RTThreadSelf();
103 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
104 RTThreadWriteLockInc(hSelf);
105# endif
106
107 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
108 return VINF_SUCCESS;
109}
110
111
112#ifdef IN_RING3
113/**
114 * Deals with the contended case in ring-3.
115 *
116 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
117 * @param pCritSect The critsect.
118 * @param hNativeSelf The native thread handle.
119 */
120static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
121{
122 /*
123 * Start waiting.
124 */
125 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
126 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
127 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
128
129 /*
130 * The wait loop.
131 */
132 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
133 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
134# ifdef PDMCRITSECT_STRICT
135 RTTHREAD hSelf = RTThreadSelf();
136 if (hSelf == NIL_RTTHREAD)
137 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
138# endif
139 for (;;)
140 {
141# ifdef PDMCRITSECT_STRICT
142 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
143# endif
144 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
145# ifdef PDMCRITSECT_STRICT
146 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
147# endif
148 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
149 return VERR_SEM_DESTROYED;
150 if (rc == VINF_SUCCESS)
151 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
152 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
153 }
154 /* won't get here */
155}
156#endif /* IN_RING3 */
157
158
159/**
160 * Enters a PDM critical section.
161 *
162 * @returns VINF_SUCCESS if entered successfully.
163 * @returns rcBusy when encountering a busy critical section in GC/R0.
164 * @returns VERR_SEM_DESTROYED if the critical section is dead.
165 *
166 * @param pCritSect The PDM critical section to enter.
167 * @param rcBusy The status code to return when we're in GC or R0
168 * and the section is busy.
169 */
170VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
171{
172 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
173
174 /*
175 * If the critical section has already been destroyed, then inform the caller.
176 */
177 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
178 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
179 VERR_SEM_DESTROYED);
180
181 /*
182 * See if we're lucky.
183 */
184 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
185 /* Not owned ... */
186 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
187 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
188
189 /* ... or nested. */
190 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
191 {
192 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
193 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
194 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
195 return VINF_SUCCESS;
196 }
197
198 /*
199 * Spin for a bit without incrementing the counter.
200 */
201 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
202 * cpu systems. */
203 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
204 while (cSpinsLeft-- > 0)
205 {
206 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
207 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
208 ASMNopPause();
209 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
210 cli'ed pendingpreemption check up front using sti w/ instruction fusing
211 for avoiding races. Hmm ... This is assuming the other party is actually
212 executing code on another CPU ... which we could keep track of if we
213 wanted. */
214 }
215
216#ifdef IN_RING3
217 /*
218 * Take the slow path.
219 */
220 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
221#else
222 /*
223 * Return busy.
224 */
225 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
226 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
227 return rcBusy;
228#endif
229}
230
231
232/**
233 * Try enter a critical section.
234 *
235 * @retval VINF_SUCCESS on success.
236 * @retval VERR_SEM_BUSY if the critsect was owned.
237 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
238 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
239 *
240 * @param pCritSect The critical section.
241 */
242VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
243{
244 /*
245 * If the critical section has already been destroyed, then inform the caller.
246 */
247 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
248 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
249 VERR_SEM_DESTROYED);
250
251 /*
252 * See if we're lucky.
253 */
254 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
255 /* Not owned ... */
256 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
257 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
258
259 /* ... or nested. */
260 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
261 {
262 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
263 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
264 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
265 return VINF_SUCCESS;
266 }
267
268 /* no spinning */
269
270 /*
271 * Return busy.
272 */
273#ifdef IN_RING3
274 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
275#else
276 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
277#endif
278 LogFlow(("PDMCritSectTryEnter: locked\n"));
279 return VERR_SEM_BUSY;
280}
281
282
283#ifdef IN_RING3
284/**
285 * Enters a PDM critical section.
286 *
287 * @returns VINF_SUCCESS if entered successfully.
288 * @returns rcBusy when encountering a busy critical section in GC/R0.
289 * @returns VERR_SEM_DESTROYED if the critical section is dead.
290 *
291 * @param pCritSect The PDM critical section to enter.
292 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
293 */
294VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
295{
296 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
297 if ( rc == VINF_SUCCESS
298 && fCallRing3
299 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
300 {
301 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
302 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
303 }
304 return rc;
305}
306#endif /* IN_RING3 */
307
308
309/**
310 * Leaves a critical section entered with PDMCritSectEnter().
311 *
312 * @param pCritSect The PDM critical section to leave.
313 */
314VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
315{
316 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
317 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
318 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
319 Assert(pCritSect->s.Core.cNestings >= 1);
320
321 /*
322 * Nested leave.
323 */
324 if (pCritSect->s.Core.cNestings > 1)
325 {
326 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
327 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
328 return;
329 }
330
331#ifdef IN_RING0
332# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
333 if (1) /* SUPSemEventSignal is safe */
334# else
335 if (ASMIntAreEnabled())
336# endif
337#endif
338#if defined(IN_RING3) || defined(IN_RING0)
339 {
340 /*
341 * Leave for real.
342 */
343 /* update members. */
344# ifdef IN_RING3
345 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
346 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
347# if defined(PDMCRITSECT_STRICT)
348 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
349 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
350 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
351# endif
352# endif
353 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
354 Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
355 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
356 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
357
358 /* stop and decrement lockers. */
359 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
360 ASMCompilerBarrier();
361 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
362 {
363 /* Someone is waiting, wake up one of them. */
364 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
365 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
366 int rc = SUPSemEventSignal(pSession, hEvent);
367 AssertRC(rc);
368 }
369
370# ifdef IN_RING3
371 /* Signal exit event. */
372 if (hEventToSignal != NIL_RTSEMEVENT)
373 {
374 LogBird(("Signalling %#x\n", hEventToSignal));
375 int rc = RTSemEventSignal(hEventToSignal);
376 AssertRC(rc);
377 }
378# endif
379
380# if defined(DEBUG_bird) && defined(IN_RING0)
381 VMMTrashVolatileXMMRegs();
382# endif
383 }
384#endif /* IN_RING3 || IN_RING0 */
385#ifdef IN_RING0
386 else
387#endif
388#if defined(IN_RING0) || defined(IN_RC)
389 {
390 /*
391 * Try leave it.
392 */
393 if (pCritSect->s.Core.cLockers == 0)
394 {
395 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
396 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
397 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
398 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
399
400 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
401 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
402 return;
403
404 /* darn, someone raced in on us. */
405 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
406 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
407 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
408 }
409 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
410
411 /*
412 * Queue the request.
413 */
414 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
415 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
416 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
417 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
418 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
419 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
420 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
421 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
422 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
423 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
424 }
425#endif /* IN_RING0 || IN_RC */
426}
427
428
429#if defined(IN_RING3) || defined(IN_RING0)
430/**
431 * Process the critical sections queued for ring-3 'leave'.
432 *
433 * @param pVCpu The VMCPU handle.
434 */
435VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
436{
437 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
438
439 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
440 for (RTUINT i = 0; i < c; i++)
441 {
442# ifdef IN_RING3
443 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
444# else
445 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
446# endif
447
448 PDMCritSectLeave(pCritSect);
449 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
450 }
451
452 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
453 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
454}
455#endif /* IN_RING3 || IN_RING0 */
456
457
458/**
459 * Checks the caller is the owner of the critical section.
460 *
461 * @returns true if owner.
462 * @returns false if not owner.
463 * @param pCritSect The critical section.
464 */
465VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
466{
467#ifdef IN_RING3
468 return RTCritSectIsOwner(&pCritSect->s.Core);
469#else
470 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
471 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
472 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
473 return false;
474 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
475#endif
476}
477
478
479/**
480 * Checks the specified VCPU is the owner of the critical section.
481 *
482 * @returns true if owner.
483 * @returns false if not owner.
484 * @param pCritSect The critical section.
485 * @param idCpu VCPU id
486 */
487VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
488{
489#ifdef IN_RING3
490 NOREF(idCpu);
491 return RTCritSectIsOwner(&pCritSect->s.Core);
492#else
493 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
494 AssertPtr(pVM);
495 Assert(idCpu < pVM->cCpus);
496 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
497 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
498#endif
499}
500
501
502/**
503 * Checks if somebody currently owns the critical section.
504 *
505 * @returns true if locked.
506 * @returns false if not locked.
507 *
508 * @param pCritSect The critical section.
509 *
510 * @remarks This doesn't prove that no deadlocks will occur later on; it's
511 * just a debugging tool
512 */
513VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
514{
515 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
516 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
517}
518
519
520/**
521 * Checks if a critical section is initialized or not.
522 *
523 * @returns true if initialized.
524 * @returns false if not initialized.
525 * @param pCritSect The critical section.
526 */
527VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
528{
529 return RTCritSectIsInitialized(&pCritSect->s.Core);
530}
531
532
533/**
534 * Gets the recursion depth.
535 *
536 * @returns The recursion depth.
537 * @param pCritSect The critical section.
538 */
539VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
540{
541 return RTCritSectGetRecursion(&pCritSect->s.Core);
542}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette