VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMInternal.h@ 38838

Last change on this file since 38838 was 38838, checked in by vboxsync, 13 years ago

VMM,++: Try fix the async reset, suspend and power-off problems in PDM wrt conflicting VMM requests. Split them into priority requests and normal requests. The priority requests can safely be processed when PDM is doing async state change waits, the normal ones cannot. (The problem I bumped into was a unmap-chunk request from PGM being processed during PDMR3Reset, causing a recursive VMMR3EmtRendezvous deadlock.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.0 KB
Line 
1/* $Id: VMInternal.h 38838 2011-09-23 11:21:55Z vboxsync $ */
2/** @file
3 * VM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___VMInternal_h
19#define ___VMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/vmm/vmapi.h>
23#include <iprt/assert.h>
24#include <iprt/critsect.h>
25#include <setjmp.h>
26
27
28
29/** @defgroup grp_vm_int Internals
30 * @ingroup grp_vm
31 * @internal
32 * @{
33 */
34
35
36/**
37 * VM state change callback.
38 */
39typedef struct VMATSTATE
40{
41 /** Pointer to the next one. */
42 struct VMATSTATE *pNext;
43 /** Pointer to the callback. */
44 PFNVMATSTATE pfnAtState;
45 /** The user argument. */
46 void *pvUser;
47} VMATSTATE;
48/** Pointer to a VM state change callback. */
49typedef VMATSTATE *PVMATSTATE;
50
51
52/**
53 * VM error callback.
54 */
55typedef struct VMATERROR
56{
57 /** Pointer to the next one. */
58 struct VMATERROR *pNext;
59 /** Pointer to the callback. */
60 PFNVMATERROR pfnAtError;
61 /** The user argument. */
62 void *pvUser;
63} VMATERROR;
64/** Pointer to a VM error callback. */
65typedef VMATERROR *PVMATERROR;
66
67
68/**
69 * Chunk of memory allocated off the hypervisor heap in which
70 * we copy the error details.
71 */
72typedef struct VMERROR
73{
74 /** The size of the chunk. */
75 uint32_t cbAllocated;
76 /** The current offset into the chunk.
77 * We start by putting the filename and function immediately
78 * after the end of the buffer. */
79 uint32_t off;
80 /** Offset from the start of this structure to the file name. */
81 uint32_t offFile;
82 /** The line number. */
83 uint32_t iLine;
84 /** Offset from the start of this structure to the function name. */
85 uint32_t offFunction;
86 /** Offset from the start of this structure to the formatted message text. */
87 uint32_t offMessage;
88 /** The VBox status code. */
89 int32_t rc;
90} VMERROR, *PVMERROR;
91
92
93/**
94 * VM runtime error callback.
95 */
96typedef struct VMATRUNTIMEERROR
97{
98 /** Pointer to the next one. */
99 struct VMATRUNTIMEERROR *pNext;
100 /** Pointer to the callback. */
101 PFNVMATRUNTIMEERROR pfnAtRuntimeError;
102 /** The user argument. */
103 void *pvUser;
104} VMATRUNTIMEERROR;
105/** Pointer to a VM error callback. */
106typedef VMATRUNTIMEERROR *PVMATRUNTIMEERROR;
107
108
109/**
110 * Chunk of memory allocated off the hypervisor heap in which
111 * we copy the runtime error details.
112 */
113typedef struct VMRUNTIMEERROR
114{
115 /** The size of the chunk. */
116 uint32_t cbAllocated;
117 /** The current offset into the chunk.
118 * We start by putting the error ID immediately
119 * after the end of the buffer. */
120 uint32_t off;
121 /** Offset from the start of this structure to the error ID. */
122 uint32_t offErrorId;
123 /** Offset from the start of this structure to the formatted message text. */
124 uint32_t offMessage;
125 /** Error flags. */
126 uint32_t fFlags;
127} VMRUNTIMEERROR, *PVMRUNTIMEERROR;
128
129/** The halt method. */
130typedef enum
131{
132 /** The usual invalid value. */
133 VMHALTMETHOD_INVALID = 0,
134 /** Use the method used during bootstrapping. */
135 VMHALTMETHOD_BOOTSTRAP,
136 /** Use the default method. */
137 VMHALTMETHOD_DEFAULT,
138 /** The old spin/yield/block method. */
139 VMHALTMETHOD_OLD,
140 /** The first go at a block/spin method. */
141 VMHALTMETHOD_1,
142 /** The first go at a more global approach. */
143 VMHALTMETHOD_GLOBAL_1,
144 /** The end of valid methods. (not inclusive of course) */
145 VMHALTMETHOD_END,
146 /** The usual 32-bit max value. */
147 VMHALTMETHOD_32BIT_HACK = 0x7fffffff
148} VMHALTMETHOD;
149
150
151/**
152 * VM Internal Data (part of the VM structure).
153 *
154 * @todo Move this and all related things to VMM. The VM component was, to some
155 * extent at least, a bad ad hoc design which should all have been put in
156 * VMM. @see pg_vm.
157 */
158typedef struct VMINT
159{
160 /** VM Error Message. */
161 R3PTRTYPE(PVMERROR) pErrorR3;
162 /** VM Runtime Error Message. */
163 R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
164 /** The VM was/is-being teleported and has not yet been fully resumed. */
165 bool fTeleportedAndNotFullyResumedYet;
166} VMINT;
167/** Pointer to the VM Internal Data (part of the VM structure). */
168typedef VMINT *PVMINT;
169
170
171/**
172 * VM internal data kept in the UVM.
173 */
174typedef struct VMINTUSERPERVM
175{
176 /** Head of the standard request queue. Atomic. */
177 volatile PVMREQ pNormalReqs;
178 /** Head of the priority request queue. Atomic. */
179 volatile PVMREQ pPriorityReqs;
180 /** The last index used during alloc/free. */
181 volatile uint32_t iReqFree;
182 /** Number of free request packets. */
183 volatile uint32_t cReqFree;
184 /** Array of pointers to lists of free request packets. Atomic. */
185 volatile PVMREQ apReqFree[16-4];
186
187 /** The reference count of the UVM handle. */
188 volatile uint32_t cUvmRefs;
189
190#ifdef VBOX_WITH_STATISTICS
191 /** Number of VMR3ReqAlloc returning a new packet. */
192 STAMCOUNTER StatReqAllocNew;
193 /** Number of VMR3ReqAlloc causing races. */
194 STAMCOUNTER StatReqAllocRaces;
195 /** Number of VMR3ReqAlloc returning a recycled packet. */
196 STAMCOUNTER StatReqAllocRecycled;
197 /** Number of VMR3ReqFree calls. */
198 STAMCOUNTER StatReqFree;
199 /** Number of times the request was actually freed. */
200 STAMCOUNTER StatReqFreeOverflow;
201 /** Number of requests served. */
202 STAMCOUNTER StatReqProcessed;
203 /** Number of times there are more than one request and the others needed to be
204 * pushed back onto the list. */
205 STAMCOUNTER StatReqMoreThan1;
206 /** Number of times we've raced someone when pushing the other requests back
207 * onto the list. */
208 STAMCOUNTER StatReqPushBackRaces;
209#endif
210
211 /** Pointer to the support library session.
212 * Mainly for creation and destruction. */
213 PSUPDRVSESSION pSession;
214
215 /** Force EMT to terminate. */
216 bool volatile fTerminateEMT;
217
218 /** Critical section for pAtState and enmPrevVMState. */
219 RTCRITSECT AtStateCritSect;
220 /** List of registered state change callbacks. */
221 PVMATSTATE pAtState;
222 /** List of registered state change callbacks. */
223 PVMATSTATE *ppAtStateNext;
224 /** The previous VM state.
225 * This is mainly used for the 'Resetting' state, but may come in handy later
226 * and when debugging. */
227 VMSTATE enmPrevVMState;
228
229 /** Critical section for pAtError and pAtRuntimeError. */
230 RTCRITSECT AtErrorCritSect;
231
232 /** List of registered error callbacks. */
233 PVMATERROR pAtError;
234 /** List of registered error callbacks. */
235 PVMATERROR *ppAtErrorNext;
236 /** The error message count.
237 * This is incremented every time an error is raised. */
238 uint32_t volatile cErrors;
239
240 /** The runtime error message count.
241 * This is incremented every time a runtime error is raised. */
242 uint32_t volatile cRuntimeErrors;
243 /** List of registered error callbacks. */
244 PVMATRUNTIMEERROR pAtRuntimeError;
245 /** List of registered error callbacks. */
246 PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;
247
248 /** @name Generic Halt data
249 * @{
250 */
251 /** The current halt method.
252 * Can be selected by CFGM option 'VM/HaltMethod'. */
253 VMHALTMETHOD enmHaltMethod;
254 /** The index into g_aHaltMethods of the current halt method. */
255 uint32_t volatile iHaltMethod;
256 /** @} */
257
258 /** @todo Do NOT add new members here or reuse the current, we need to store the config for
259 * each halt method separately because we're racing on SMP guest rigs. */
260 union
261 {
262 /**
263 * Method 1 & 2 - Block whenever possible, and when lagging behind
264 * switch to spinning with regular blocking every 5-200ms (defaults)
265 * depending on the accumulated lag. The blocking interval is adjusted
266 * with the average oversleeping of the last 64 times.
267 *
268 * The difference between 1 and 2 is that we use native absolute
269 * time APIs for the blocking instead of the millisecond based IPRT
270 * interface.
271 */
272 struct
273 {
274 /** The max interval without blocking (when spinning). */
275 uint32_t u32MinBlockIntervalCfg;
276 /** The minimum interval between blocking (when spinning). */
277 uint32_t u32MaxBlockIntervalCfg;
278 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
279 uint32_t u32LagBlockIntervalDivisorCfg;
280 /** When to start spinning (lag / nano secs). */
281 uint32_t u32StartSpinningCfg;
282 /** When to stop spinning (lag / nano secs). */
283 uint32_t u32StopSpinningCfg;
284 } Method12;
285
286 /**
287 * The GVMM manages halted and waiting EMTs.
288 */
289 struct
290 {
291 /** The threshold between spinning and blocking. */
292 uint32_t cNsSpinBlockThresholdCfg;
293 } Global1;
294 } Halt;
295
296 /** Pointer to the DBGC instance data. */
297 void *pvDBGC;
298
299 /** TLS index for the VMINTUSERPERVMCPU pointer. */
300 RTTLS idxTLS;
301
302 /** The VM name. (Set after the config constructure has been called.) */
303 char *pszName;
304 /** The VM UUID. (Set after the config constructure has been called.) */
305 RTUUID Uuid;
306} VMINTUSERPERVM;
307
308/** Pointer to the VM internal data kept in the UVM. */
309typedef VMINTUSERPERVM *PVMINTUSERPERVM;
310
311
312/**
313 * VMCPU internal data kept in the UVM.
314 *
315 * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
316 */
317typedef struct VMINTUSERPERVMCPU
318{
319 /** Head of the normal request queue. Atomic. */
320 volatile PVMREQ pNormalReqs;
321 /** Head of the priority request queue. Atomic. */
322 volatile PVMREQ pPriorityReqs;
323
324 /** The handle to the EMT thread. */
325 RTTHREAD ThreadEMT;
326 /** The native of the EMT thread. */
327 RTNATIVETHREAD NativeThreadEMT;
328 /** Wait event semaphore. */
329 RTSEMEVENT EventSemWait;
330 /** Wait/Idle indicator. */
331 bool volatile fWait;
332 /** Align the next bit. */
333 bool afAlignment[7];
334
335 /** @name Generic Halt data
336 * @{
337 */
338 /** The average time (ns) between two halts in the last second. (updated once per second) */
339 uint32_t HaltInterval;
340 /** The average halt frequency for the last second. (updated once per second) */
341 uint32_t HaltFrequency;
342 /** The number of halts in the current period. */
343 uint32_t cHalts;
344 uint32_t padding; /**< alignment padding. */
345 /** When we started counting halts in cHalts (RTTimeNanoTS). */
346 uint64_t u64HaltsStartTS;
347 /** @} */
348
349 /** Union containing data and config for the different halt algorithms. */
350 union
351 {
352 /**
353 * Method 1 & 2 - Block whenever possible, and when lagging behind
354 * switch to spinning with regular blocking every 5-200ms (defaults)
355 * depending on the accumulated lag. The blocking interval is adjusted
356 * with the average oversleeping of the last 64 times.
357 *
358 * The difference between 1 and 2 is that we use native absolute
359 * time APIs for the blocking instead of the millisecond based IPRT
360 * interface.
361 */
362 struct
363 {
364 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
365 uint32_t cBlocks;
366 /** Align the next member. */
367 uint32_t u32Alignment;
368 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
369 uint64_t cNSBlockedTooLongAvg;
370 /** Total time spend oversleeping when blocking. */
371 uint64_t cNSBlockedTooLong;
372 /** Total time spent blocking. */
373 uint64_t cNSBlocked;
374 /** The timestamp (RTTimeNanoTS) of the last block. */
375 uint64_t u64LastBlockTS;
376
377 /** When we started spinning relentlessly in order to catch up some of the oversleeping.
378 * This is 0 when we're not spinning. */
379 uint64_t u64StartSpinTS;
380 } Method12;
381
382#if 0
383 /**
384 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
385 * sprinkle it with yields.
386 */
387 struct
388 {
389 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
390 uint32_t cBlocks;
391 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
392 uint64_t cBlockedTooLongNSAvg;
393 /** Total time spend oversleeping when blocking. */
394 uint64_t cBlockedTooLongNS;
395 /** Total time spent blocking. */
396 uint64_t cBlockedNS;
397 /** The timestamp (RTTimeNanoTS) of the last block. */
398 uint64_t u64LastBlockTS;
399
400 /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
401 uint32_t cYields;
402 /** Avg. time spend oversleeping when yielding. */
403 uint32_t cYieldTooLongNSAvg;
404 /** Total time spend oversleeping when yielding. */
405 uint64_t cYieldTooLongNS;
406 /** Total time spent yielding. */
407 uint64_t cYieldedNS;
408 /** The timestamp (RTTimeNanoTS) of the last block. */
409 uint64_t u64LastYieldTS;
410
411 /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
412 uint64_t u64StartSpinTS;
413 } Method34;
414#endif
415 } Halt;
416
417 /** Profiling the halted state; yielding vs blocking.
418 * @{ */
419 STAMPROFILE StatHaltYield;
420 STAMPROFILE StatHaltBlock;
421 STAMPROFILE StatHaltBlockOverslept;
422 STAMPROFILE StatHaltBlockInsomnia;
423 STAMPROFILE StatHaltBlockOnTime;
424 STAMPROFILE StatHaltTimers;
425 STAMPROFILE StatHaltPoll;
426 /** @} */
427} VMINTUSERPERVMCPU;
428#ifdef IN_RING3
429AssertCompileMemberAlignment(VMINTUSERPERVMCPU, u64HaltsStartTS, 8);
430AssertCompileMemberAlignment(VMINTUSERPERVMCPU, Halt.Method12.cNSBlockedTooLongAvg, 8);
431AssertCompileMemberAlignment(VMINTUSERPERVMCPU, StatHaltYield, 8);
432#endif
433
434/** Pointer to the VM internal data kept in the UVM. */
435typedef VMINTUSERPERVMCPU *PVMINTUSERPERVMCPU;
436
437RT_C_DECLS_BEGIN
438
439DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg);
440int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod);
441DECLCALLBACK(int) vmR3Destroy(PVM pVM);
442DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
443void vmSetErrorCopy(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list args);
444DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage);
445DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa);
446void vmSetRuntimeErrorCopy(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va);
447void vmR3SetGuruMeditation(PVM pVM);
448void vmR3SetTerminated(PVM pVM);
449
450RT_C_DECLS_END
451
452
453/** @} */
454
455#endif
456
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette