VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMMAll.cpp@ 48580

Last change on this file since 48580 was 48441, checked in by vboxsync, 11 years ago

VMM/VMMAll: Fix nasty wake up with SMP VMs and thread-context hooks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 10.2 KB
Line 
1/* $Id: VMMAll.cpp 48441 2013-09-11 17:35:55Z vboxsync $ */
2/** @file
3 * VMM All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include "VMMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/vmcpuset.h>
27#include <VBox/param.h>
28#include <iprt/thread.h>
29#include <iprt/mp.h>
30
31
32/*******************************************************************************
33* Global Variables *
34*******************************************************************************/
35/** User counter for the vmmInitFormatTypes function (pro forma). */
36static volatile uint32_t g_cFormatTypeUsers = 0;
37
38
39/**
40 * Helper that formats a decimal number in the range 0..9999.
41 *
42 * @returns The length of the formatted number.
43 * @param pszBuf Output buffer with sufficient space.
44 * @param uNum The number to format.
45 */
46static unsigned vmmFormatTypeShortNumber(char *pszBuf, uint32_t uNumber)
47{
48 unsigned off = 0;
49 if (uNumber >= 10)
50 {
51 if (uNumber >= 100)
52 {
53 if (uNumber >= 1000)
54 pszBuf[off++] = ((uNumber / 1000) % 10) + '0';
55 pszBuf[off++] = ((uNumber / 100) % 10) + '0';
56 }
57 pszBuf[off++] = ((uNumber / 10) % 10) + '0';
58 }
59 pszBuf[off++] = (uNumber % 10) + '0';
60 pszBuf[off] = '\0';
61 return off;
62}
63
64
65/**
66 * @callback_method_impl{FNRTSTRFORMATTYPE, vmsetcpu}
67 */
68static DECLCALLBACK(size_t) vmmFormatTypeVmCpuSet(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
69 const char *pszType, void const *pvValue,
70 int cchWidth, int cchPrecision, unsigned fFlags,
71 void *pvUser)
72{
73 PCVMCPUSET pSet = (PCVMCPUSET)pvValue;
74 uint32_t cCpus = 0;
75 uint32_t iCpu = RT_ELEMENTS(pSet->au32Bitmap) * 32;
76 while (iCpu--)
77 if (VMCPUSET_IS_PRESENT(pSet, iCpu))
78 cCpus++;
79
80 char szTmp[32];
81 AssertCompile(RT_ELEMENTS(pSet->au32Bitmap) * 32 < 999);
82 if (cCpus == 1)
83 {
84 iCpu = RT_ELEMENTS(pSet->au32Bitmap) * 32;
85 while (iCpu--)
86 if (VMCPUSET_IS_PRESENT(pSet, iCpu))
87 {
88 szTmp[0] = 'c';
89 szTmp[1] = 'p';
90 szTmp[2] = 'u';
91 return pfnOutput(pvArgOutput, szTmp, 3 + vmmFormatTypeShortNumber(&szTmp[3], iCpu));
92 }
93 cCpus = 0;
94 }
95 if (cCpus == 0)
96 return pfnOutput(pvArgOutput, RT_STR_TUPLE("<empty>"));
97 if (cCpus == RT_ELEMENTS(pSet->au32Bitmap) * 32)
98 return pfnOutput(pvArgOutput, RT_STR_TUPLE("<full>"));
99
100 /*
101 * Print cpus that are present: {1,2,7,9 ... }
102 */
103 size_t cchRet = pfnOutput(pvArgOutput, "{", 1);
104
105 cCpus = 0;
106 iCpu = 0;
107 while (iCpu < RT_ELEMENTS(pSet->au32Bitmap) * 32)
108 {
109 if (VMCPUSET_IS_PRESENT(pSet, iCpu))
110 {
111 /* Output the first cpu number. */
112 int off = 0;
113 if (cCpus != 0)
114 szTmp[off++] = ',';
115 off += vmmFormatTypeShortNumber(&szTmp[off], iCpu);
116
117 /* Check for sequence. */
118 uint32_t const iStart = ++iCpu;
119 while ( iCpu < RT_ELEMENTS(pSet->au32Bitmap) * 32
120 && VMCPUSET_IS_PRESENT(pSet, iCpu))
121 iCpu++;
122 if (iCpu != iStart)
123 {
124 szTmp[off++] = '-';
125 off += vmmFormatTypeShortNumber(&szTmp[off], iCpu);
126 }
127
128 /* Terminate and output. */
129 szTmp[off] = '\0';
130 cchRet += pfnOutput(pvArgOutput, szTmp, off);
131 }
132 iCpu++;
133 }
134
135 cchRet += pfnOutput(pvArgOutput, "}", 1);
136 NOREF(pvUser);
137 return cchRet;
138}
139
140
141/**
142 * Registers the VMM wide format types.
143 *
144 * Called by VMMR3Init, VMMR0Init and VMMRCInit.
145 */
146int vmmInitFormatTypes(void)
147{
148 int rc = VINF_SUCCESS;
149 if (ASMAtomicIncU32(&g_cFormatTypeUsers) == 1)
150 rc = RTStrFormatTypeRegister("vmcpuset", vmmFormatTypeVmCpuSet, NULL);
151 return rc;
152}
153
154
155#ifndef IN_RC
156/**
157 * Counterpart to vmmInitFormatTypes, called by VMMR3Term and VMMR0Term.
158 */
159void vmmTermFormatTypes(void)
160{
161 if (ASMAtomicDecU32(&g_cFormatTypeUsers) == 0)
162 RTStrFormatTypeDeregister("vmcpuset");
163}
164#endif
165
166
167/**
168 * Gets the bottom of the hypervisor stack - RC Ptr.
169 *
170 * (The returned address is not actually writable, only after it's decremented
171 * by a push/ret/whatever does it become writable.)
172 *
173 * @returns bottom of the stack.
174 * @param pVCpu Pointer to the VMCPU.
175 */
176VMM_INT_DECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu)
177{
178 return (RTRCPTR)pVCpu->vmm.s.pbEMTStackBottomRC;
179}
180
181
182/**
183 * Gets the ID of the virtual CPU associated with the calling thread.
184 *
185 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
186 *
187 * @param pVM Pointer to the VM.
188 * @internal
189 */
190VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM)
191{
192#if defined(IN_RING3)
193 return VMR3GetVMCPUId(pVM);
194
195#elif defined(IN_RING0)
196 if (pVM->cCpus == 1)
197 return 0;
198
199 /* Search first by host cpu id (most common case)
200 * and then by native thread id (page fusion case).
201 */
202 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
203 {
204 /** @todo r=ramshankar: This doesn't buy us anything in terms of performance
205 * leaving it here for hysterical raisins and as a reference if we
206 * implemented a hashing approach in the future. */
207 RTCPUID idHostCpu = RTMpCpuId();
208
209 /** @todo optimize for large number of VCPUs when that becomes more common. */
210 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
211 {
212 PVMCPU pVCpu = &pVM->aCpus[idCpu];
213
214 if (pVCpu->idHostCpu == idHostCpu)
215 return pVCpu->idCpu;
216 }
217 }
218
219 /* RTThreadGetNativeSelf had better be cheap. */
220 RTNATIVETHREAD hThread = RTThreadNativeSelf();
221
222 /** @todo optimize for large number of VCPUs when that becomes more common. */
223 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
224 {
225 PVMCPU pVCpu = &pVM->aCpus[idCpu];
226
227 if (pVCpu->hNativeThreadR0 == hThread)
228 return pVCpu->idCpu;
229 }
230 return NIL_VMCPUID;
231
232#else /* RC: Always EMT(0) */
233 NOREF(pVM);
234 return 0;
235#endif
236}
237
238
239/**
240 * Returns the VMCPU of the calling EMT.
241 *
242 * @returns The VMCPU pointer. NULL if not an EMT.
243 *
244 * @param pVM Pointer to the VM.
245 * @internal
246 */
247VMMDECL(PVMCPU) VMMGetCpu(PVM pVM)
248{
249#ifdef IN_RING3
250 VMCPUID idCpu = VMR3GetVMCPUId(pVM);
251 if (idCpu == NIL_VMCPUID)
252 return NULL;
253 Assert(idCpu < pVM->cCpus);
254 return &pVM->aCpus[idCpu];
255
256#elif defined(IN_RING0)
257 if (pVM->cCpus == 1)
258 return &pVM->aCpus[0];
259
260 /*
261 * Search first by host cpu id (most common case)
262 * and then by native thread id (page fusion case).
263 */
264 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
265 {
266 /** @todo r=ramshankar: This doesn't buy us anything in terms of performance
267 * leaving it here for hysterical raisins and as a reference if we
268 * implemented a hashing approach in the future. */
269 RTCPUID idHostCpu = RTMpCpuId();
270
271 /** @todo optimize for large number of VCPUs when that becomes more common. */
272 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
273 {
274 PVMCPU pVCpu = &pVM->aCpus[idCpu];
275
276 if (pVCpu->idHostCpu == idHostCpu)
277 return pVCpu;
278 }
279 }
280
281 /* RTThreadGetNativeSelf had better be cheap. */
282 RTNATIVETHREAD hThread = RTThreadNativeSelf();
283
284 /** @todo optimize for large number of VCPUs when that becomes more common. */
285 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
286 {
287 PVMCPU pVCpu = &pVM->aCpus[idCpu];
288
289 if (pVCpu->hNativeThreadR0 == hThread)
290 return pVCpu;
291 }
292 return NULL;
293
294#else /* RC: Always EMT(0) */
295 return &pVM->aCpus[0];
296#endif /* IN_RING0 */
297}
298
299
300/**
301 * Returns the VMCPU of the first EMT thread.
302 *
303 * @returns The VMCPU pointer.
304 * @param pVM Pointer to the VM.
305 * @internal
306 */
307VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM)
308{
309 Assert(pVM->cCpus == 1);
310 return &pVM->aCpus[0];
311}
312
313
314/**
315 * Returns the VMCPU of the specified virtual CPU.
316 *
317 * @returns The VMCPU pointer. NULL if idCpu is invalid.
318 *
319 * @param pVM Pointer to the VM.
320 * @param idCpu The ID of the virtual CPU.
321 * @internal
322 */
323VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, RTCPUID idCpu)
324{
325 AssertReturn(idCpu < pVM->cCpus, NULL);
326 return &pVM->aCpus[idCpu];
327}
328
329
330/**
331 * Gets the VBOX_SVN_REV.
332 *
333 * This is just to avoid having to compile a bunch of big files
334 * and requires less Makefile mess.
335 *
336 * @returns VBOX_SVN_REV.
337 */
338VMM_INT_DECL(uint32_t) VMMGetSvnRev(void)
339{
340 return VBOX_SVN_REV;
341}
342
343
344/**
345 * Queries the current switcher
346 *
347 * @returns active switcher
348 * @param pVM Pointer to the VM.
349 */
350VMM_INT_DECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM)
351{
352 return pVM->vmm.s.enmSwitcher;
353}
354
355
356/**
357 * Checks whether we're in a ring-3 call or not.
358 *
359 * @returns true / false.
360 * @param pVCpu The caller's cross context VM structure.
361 * @thread EMT
362 */
363VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu)
364{
365#ifdef RT_ARCH_X86
366 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
367#else
368 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
369#endif
370}
371
372
373/**
374 * Returns the build type for matching components.
375 *
376 * @returns Build type value.
377 */
378uint32_t vmmGetBuildType(void)
379{
380 uint32_t uRet = 0xbeef0000;
381#ifdef DEBUG
382 uRet |= RT_BIT_32(0);
383#endif
384#ifdef VBOX_WITH_STATISTICS
385 uRet |= RT_BIT_32(1);
386#endif
387 return uRet;
388}
389
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette