VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 19240

Last change on this file since 19240 was 19141, checked in by vboxsync, 16 years ago

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 7.6 KB
Line 
1/* $Id: PDMAllCritSect.cpp 19141 2009-04-23 13:52:18Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdm.h>
29#include <VBox/mm.h>
30#include <VBox/vm.h>
31#include <VBox/err.h>
32#include <VBox/hwaccm.h>
33
34#include <VBox/log.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#ifdef IN_RING3
38# include <iprt/semaphore.h>
39#endif
40
41
42/**
43 * Enters a PDM critical section.
44 *
45 * @returns VINF_SUCCESS if entered successfully.
46 * @returns rcBusy when encountering a busy critical section in GC/R0.
47 * @returns VERR_SEM_DESTROYED if the critical section is dead.
48 *
49 * @param pCritSect The PDM critical section to enter.
50 * @param rcBusy The status code to return when we're in GC or R0
51 * and the section is busy.
52 */
53VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
54{
55 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
56#ifdef IN_RING3
57 NOREF(rcBusy);
58
59 STAM_REL_STATS({if (pCritSect->s.Core.cLockers >= 0 && !RTCritSectIsOwner(&pCritSect->s.Core))
60 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3); });
61 int rc = RTCritSectEnter(&pCritSect->s.Core);
62 STAM_STATS({ if (pCritSect->s.Core.cNestings == 1) STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); });
63 return rc;
64
65#else /* !IN_RING3 */
66 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
67 VERR_SEM_DESTROYED);
68 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
69 Assert(pVM);
70 PVMCPU pVCpu = VMMGetCpu(pVM);
71
72 /*
73 * Try to take the lock.
74 */
75 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
76 {
77 pCritSect->s.Core.cNestings = 1;
78 Assert(pVCpu->hNativeThread);
79 ASMAtomicXchgSize(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
80 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
81 return VINF_SUCCESS;
82 }
83
84 /*
85 * Nested?
86 */
87 if (pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread)
88 {
89 pCritSect->s.Core.cNestings++;
90 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
91 return VINF_SUCCESS;
92 }
93
94 /*
95 * Failed.
96 */
97 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
98 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
99 return rcBusy;
100#endif /* !IN_RING3 */
101}
102
103
104#ifdef IN_RING3
105/**
106 * Enters a PDM critical section.
107 *
108 * @returns VINF_SUCCESS if entered successfully.
109 * @returns rcBusy when encountering a busy critical section in GC/R0.
110 * @returns VERR_SEM_DESTROYED if the critical section is dead.
111 *
112 * @param pCritSect The PDM critical section to enter.
113 * @param fCallHost Whether this is a VMMGCCallHost() or VMMR0CallHost() request.
114 */
115VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallHost)
116{
117 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
118 if ( rc == VINF_SUCCESS
119 && fCallHost
120 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
121 {
122 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
123 ASMAtomicUoWriteSize(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
124 }
125 return rc;
126}
127#endif /* IN_RING3 */
128
129
130/**
131 * Leaves a critical section entered with PDMCritSectEnter().
132 *
133 * @param pCritSect The PDM critical section to leave.
134 */
135VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
136{
137#ifdef IN_RING3
138# ifdef VBOX_WITH_STATISTICS
139 if (pCritSect->s.Core.cNestings == 1)
140 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
141# endif
142 RTSEMEVENT EventToSignal = pCritSect->s.EventToSignal;
143 if (RT_LIKELY(EventToSignal == NIL_RTSEMEVENT))
144 {
145 int rc = RTCritSectLeave(&pCritSect->s.Core);
146 AssertRC(rc);
147 }
148 else
149 {
150 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
151 int rc = RTCritSectLeave(&pCritSect->s.Core);
152 AssertRC(rc);
153 LogBird(("signalling %#x\n", EventToSignal));
154 rc = RTSemEventSignal(EventToSignal);
155 AssertRC(rc);
156 }
157
158#else /* !IN_RING3 */
159 Assert(VALID_PTR(pCritSect));
160 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
161 Assert(pCritSect->s.Core.cNestings > 0);
162 Assert(pCritSect->s.Core.cLockers >= 0);
163 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
164 Assert(pVM);
165 PVMCPU pVCpu = VMMGetCpu(pVM);
166 Assert(pVCpu);
167 AssertMsg(pCritSect->s.Core.NativeThreadOwner == pVCpu->hNativeThread, ("Owner %RX64 emt=%RX64\n", pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread));
168
169 /*
170 * Deal with nested attempts first.
171 * (We're exploiting nesting to avoid queuing multiple R3 leaves for the same section.)
172 */
173 pCritSect->s.Core.cNestings--;
174 if (pCritSect->s.Core.cNestings > 0)
175 {
176 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
177 return;
178 }
179
180 /*
181 * Try leave it.
182 */
183 if (pCritSect->s.Core.cLockers == 0)
184 {
185 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
186 ASMAtomicXchgSize(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
187 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
188 return;
189
190 /* darn, someone raced in on us. */
191 Assert(pVCpu->hNativeThread);
192 ASMAtomicXchgSize(&pCritSect->s.Core.NativeThreadOwner, pVCpu->hNativeThread);
193 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
194 }
195 pCritSect->s.Core.cNestings = 1;
196
197 /*
198 * Queue the request.
199 */
200 RTUINT i = pVM->pdm.s.cQueuedCritSectLeaves++;
201 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
202 AssertFatal(i < RT_ELEMENTS(pVM->pdm.s.apQueuedCritSectsLeaves));
203 pVM->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
204 VM_FF_SET(pVM, VM_FF_PDM_CRITSECT);
205 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
206 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
207 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
208#endif /* !IN_RING3 */
209}
210
211
212/**
213 * Checks the caller is the owner of the critical section.
214 *
215 * @returns true if owner.
216 * @returns false if not owner.
217 * @param pCritSect The critical section.
218 */
219VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
220{
221#ifdef IN_RING3
222 return RTCritSectIsOwner(&pCritSect->s.Core);
223#else
224 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
225 Assert(pVM);
226 return pCritSect->s.Core.NativeThreadOwner == VMMGetCpu(pVM)->hNativeThread;
227#endif
228}
229
230
231/**
232 * Checks if a critical section is initialized or not.
233 *
234 * @returns true if initialized.
235 * @returns false if not initialized.
236 * @param pCritSect The critical section.
237 */
238VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
239{
240 return pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC;
241}
242
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette