VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMInternal.h@ 74442

Last change on this file since 74442 was 73351, checked in by vboxsync, 6 years ago

VBoxGuest,VMMDev,DBGF,VM: Added bug check report to VBoxGuest/VMMDev and hooked it up to DBGF. Made DBGF remember the last reported bug check, adding an info handler for displaying it. Added VM reset counters w/ getters for use in bug check reporting.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.0 KB
Line 
1/* $Id: VMInternal.h 73351 2018-07-25 13:02:11Z vboxsync $ */
2/** @file
3 * VM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___VMInternal_h
19#define ___VMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/vmm/vmapi.h>
23#include <iprt/assert.h>
24#include <iprt/critsect.h>
25#include <setjmp.h>
26
27
28
29/** @defgroup grp_vm_int Internals
30 * @ingroup grp_vm
31 * @internal
32 * @{
33 */
34
35
36/**
37 * VM state change callback.
38 */
39typedef struct VMATSTATE
40{
41 /** Pointer to the next one. */
42 struct VMATSTATE *pNext;
43 /** Pointer to the callback. */
44 PFNVMATSTATE pfnAtState;
45 /** The user argument. */
46 void *pvUser;
47} VMATSTATE;
48/** Pointer to a VM state change callback. */
49typedef VMATSTATE *PVMATSTATE;
50
51
52/**
53 * VM error callback.
54 */
55typedef struct VMATERROR
56{
57 /** Pointer to the next one. */
58 struct VMATERROR *pNext;
59 /** Pointer to the callback. */
60 PFNVMATERROR pfnAtError;
61 /** The user argument. */
62 void *pvUser;
63} VMATERROR;
64/** Pointer to a VM error callback. */
65typedef VMATERROR *PVMATERROR;
66
67
68/**
69 * Chunk of memory allocated off the hypervisor heap in which
70 * we copy the error details.
71 */
72typedef struct VMERROR
73{
74 /** The size of the chunk. */
75 uint32_t cbAllocated;
76 /** The current offset into the chunk.
77 * We start by putting the filename and function immediately
78 * after the end of the buffer. */
79 uint32_t off;
80 /** Offset from the start of this structure to the file name. */
81 uint32_t offFile;
82 /** The line number. */
83 uint32_t iLine;
84 /** Offset from the start of this structure to the function name. */
85 uint32_t offFunction;
86 /** Offset from the start of this structure to the formatted message text. */
87 uint32_t offMessage;
88 /** The VBox status code. */
89 int32_t rc;
90} VMERROR, *PVMERROR;
91
92
93/**
94 * VM runtime error callback.
95 */
96typedef struct VMATRUNTIMEERROR
97{
98 /** Pointer to the next one. */
99 struct VMATRUNTIMEERROR *pNext;
100 /** Pointer to the callback. */
101 PFNVMATRUNTIMEERROR pfnAtRuntimeError;
102 /** The user argument. */
103 void *pvUser;
104} VMATRUNTIMEERROR;
105/** Pointer to a VM error callback. */
106typedef VMATRUNTIMEERROR *PVMATRUNTIMEERROR;
107
108
109/**
110 * Chunk of memory allocated off the hypervisor heap in which
111 * we copy the runtime error details.
112 */
113typedef struct VMRUNTIMEERROR
114{
115 /** The size of the chunk. */
116 uint32_t cbAllocated;
117 /** The current offset into the chunk.
118 * We start by putting the error ID immediately
119 * after the end of the buffer. */
120 uint32_t off;
121 /** Offset from the start of this structure to the error ID. */
122 uint32_t offErrorId;
123 /** Offset from the start of this structure to the formatted message text. */
124 uint32_t offMessage;
125 /** Error flags. */
126 uint32_t fFlags;
127} VMRUNTIMEERROR, *PVMRUNTIMEERROR;
128
129/** The halt method. */
130typedef enum
131{
132 /** The usual invalid value. */
133 VMHALTMETHOD_INVALID = 0,
134 /** Use the method used during bootstrapping. */
135 VMHALTMETHOD_BOOTSTRAP,
136 /** Use the default method. */
137 VMHALTMETHOD_DEFAULT,
138 /** The old spin/yield/block method. */
139 VMHALTMETHOD_OLD,
140 /** The first go at a block/spin method. */
141 VMHALTMETHOD_1,
142 /** The first go at a more global approach. */
143 VMHALTMETHOD_GLOBAL_1,
144 /** The end of valid methods. (not inclusive of course) */
145 VMHALTMETHOD_END,
146 /** The usual 32-bit max value. */
147 VMHALTMETHOD_32BIT_HACK = 0x7fffffff
148} VMHALTMETHOD;
149
150
151/**
152 * VM Internal Data (part of the VM structure).
153 *
154 * @todo Move this and all related things to VMM. The VM component was, to some
155 * extent at least, a bad ad hoc design which should all have been put in
156 * VMM. @see pg_vm.
157 */
158typedef struct VMINT
159{
160 /** VM Error Message. */
161 R3PTRTYPE(PVMERROR) pErrorR3;
162 /** VM Runtime Error Message. */
163 R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
164 /** The VM was/is-being teleported and has not yet been fully resumed. */
165 bool fTeleportedAndNotFullyResumedYet;
166 /** The VM should power off instead of reset. */
167 bool fPowerOffInsteadOfReset;
168 /** Reset counter (soft + hard). */
169 uint32_t cResets;
170 /** Hard reset counter. */
171 uint32_t cHardResets;
172 /** Soft reset counter. */
173 uint32_t cSoftResets;
174} VMINT;
175/** Pointer to the VM Internal Data (part of the VM structure). */
176typedef VMINT *PVMINT;
177
178
179#ifdef IN_RING3
180
181/**
182 * VM internal data kept in the UVM.
183 */
184typedef struct VMINTUSERPERVM
185{
186 /** Head of the standard request queue. Atomic. */
187 volatile PVMREQ pNormalReqs;
188 /** Head of the priority request queue. Atomic. */
189 volatile PVMREQ pPriorityReqs;
190 /** The last index used during alloc/free. */
191 volatile uint32_t iReqFree;
192 /** Number of free request packets. */
193 volatile uint32_t cReqFree;
194 /** Array of pointers to lists of free request packets. Atomic. */
195 volatile PVMREQ apReqFree[16 - (HC_ARCH_BITS == 32 ? 5 : 4)];
196
197 /** The reference count of the UVM handle. */
198 volatile uint32_t cUvmRefs;
199
200 /** Number of active EMTs. */
201 volatile uint32_t cActiveEmts;
202
203# ifdef VBOX_WITH_STATISTICS
204# if HC_ARCH_BITS == 32
205 uint32_t uPadding;
206# endif
207 /** Number of VMR3ReqAlloc returning a new packet. */
208 STAMCOUNTER StatReqAllocNew;
209 /** Number of VMR3ReqAlloc causing races. */
210 STAMCOUNTER StatReqAllocRaces;
211 /** Number of VMR3ReqAlloc returning a recycled packet. */
212 STAMCOUNTER StatReqAllocRecycled;
213 /** Number of VMR3ReqFree calls. */
214 STAMCOUNTER StatReqFree;
215 /** Number of times the request was actually freed. */
216 STAMCOUNTER StatReqFreeOverflow;
217 /** Number of requests served. */
218 STAMCOUNTER StatReqProcessed;
219 /** Number of times there are more than one request and the others needed to be
220 * pushed back onto the list. */
221 STAMCOUNTER StatReqMoreThan1;
222 /** Number of times we've raced someone when pushing the other requests back
223 * onto the list. */
224 STAMCOUNTER StatReqPushBackRaces;
225# endif
226
227 /** Pointer to the support library session.
228 * Mainly for creation and destruction. */
229 PSUPDRVSESSION pSession;
230
231 /** Force EMT to terminate. */
232 bool volatile fTerminateEMT;
233
234 /** Critical section for pAtState and enmPrevVMState. */
235 RTCRITSECT AtStateCritSect;
236 /** List of registered state change callbacks. */
237 PVMATSTATE pAtState;
238 /** List of registered state change callbacks. */
239 PVMATSTATE *ppAtStateNext;
240 /** The previous VM state.
241 * This is mainly used for the 'Resetting' state, but may come in handy later
242 * and when debugging. */
243 VMSTATE enmPrevVMState;
244
245 /** Reason for the most recent suspend operation. */
246 VMSUSPENDREASON enmSuspendReason;
247 /** Reason for the most recent operation. */
248 VMRESUMEREASON enmResumeReason;
249
250 /** Critical section for pAtError and pAtRuntimeError. */
251 RTCRITSECT AtErrorCritSect;
252
253 /** List of registered error callbacks. */
254 PVMATERROR pAtError;
255 /** List of registered error callbacks. */
256 PVMATERROR *ppAtErrorNext;
257 /** The error message count.
258 * This is incremented every time an error is raised. */
259 uint32_t volatile cErrors;
260
261 /** The runtime error message count.
262 * This is incremented every time a runtime error is raised. */
263 uint32_t volatile cRuntimeErrors;
264 /** List of registered error callbacks. */
265 PVMATRUNTIMEERROR pAtRuntimeError;
266 /** List of registered error callbacks. */
267 PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;
268
269 /** @name Generic Halt data
270 * @{
271 */
272 /** The current halt method.
273 * Can be selected by CFGM option 'VM/HaltMethod'. */
274 VMHALTMETHOD enmHaltMethod;
275 /** The index into g_aHaltMethods of the current halt method. */
276 uint32_t volatile iHaltMethod;
277 /** @} */
278
279 /** @todo Do NOT add new members here or reuse the current, we need to store the config for
280 * each halt method separately because we're racing on SMP guest rigs. */
281 union
282 {
283 /**
284 * Method 1 & 2 - Block whenever possible, and when lagging behind
285 * switch to spinning with regular blocking every 5-200ms (defaults)
286 * depending on the accumulated lag. The blocking interval is adjusted
287 * with the average oversleeping of the last 64 times.
288 *
289 * The difference between 1 and 2 is that we use native absolute
290 * time APIs for the blocking instead of the millisecond based IPRT
291 * interface.
292 */
293 struct
294 {
295 /** The max interval without blocking (when spinning). */
296 uint32_t u32MinBlockIntervalCfg;
297 /** The minimum interval between blocking (when spinning). */
298 uint32_t u32MaxBlockIntervalCfg;
299 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
300 uint32_t u32LagBlockIntervalDivisorCfg;
301 /** When to start spinning (lag / nano secs). */
302 uint32_t u32StartSpinningCfg;
303 /** When to stop spinning (lag / nano secs). */
304 uint32_t u32StopSpinningCfg;
305 } Method12;
306
307 /**
308 * The GVMM manages halted and waiting EMTs.
309 */
310 struct
311 {
312 /** The threshold between spinning and blocking. */
313 uint32_t cNsSpinBlockThresholdCfg;
314 } Global1;
315 } Halt;
316
317 /** Pointer to the DBGC instance data. */
318 void *pvDBGC;
319
320 /** TLS index for the VMINTUSERPERVMCPU pointer. */
321 RTTLS idxTLS;
322
323 /** The VM name. (Set after the config constructure has been called.) */
324 char *pszName;
325 /** The VM UUID. (Set after the config constructure has been called.) */
326 RTUUID Uuid;
327} VMINTUSERPERVM;
328# ifdef VBOX_WITH_STATISTICS
329AssertCompileMemberAlignment(VMINTUSERPERVM, StatReqAllocNew, 8);
330# endif
331
332/** Pointer to the VM internal data kept in the UVM. */
333typedef VMINTUSERPERVM *PVMINTUSERPERVM;
334
335
336/**
337 * VMCPU internal data kept in the UVM.
338 *
339 * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
340 */
341typedef struct VMINTUSERPERVMCPU
342{
343 /** Head of the normal request queue. Atomic. */
344 volatile PVMREQ pNormalReqs;
345 /** Head of the priority request queue. Atomic. */
346 volatile PVMREQ pPriorityReqs;
347
348 /** The handle to the EMT thread. */
349 RTTHREAD ThreadEMT;
350 /** The native of the EMT thread. */
351 RTNATIVETHREAD NativeThreadEMT;
352 /** Wait event semaphore. */
353 RTSEMEVENT EventSemWait;
354 /** Wait/Idle indicator. */
355 bool volatile fWait;
356 /** Set if we've been thru vmR3Destroy and decremented the active EMT count
357 * already. */
358 bool volatile fBeenThruVmDestroy;
359 /** Align the next bit. */
360 bool afAlignment[HC_ARCH_BITS == 32 ? 2 : 6];
361
362 /** @name Generic Halt data
363 * @{
364 */
365 /** The average time (ns) between two halts in the last second. (updated once per second) */
366 uint32_t HaltInterval;
367 /** The average halt frequency for the last second. (updated once per second) */
368 uint32_t HaltFrequency;
369 /** The number of halts in the current period. */
370 uint32_t cHalts;
371 uint32_t padding; /**< alignment padding. */
372 /** When we started counting halts in cHalts (RTTimeNanoTS). */
373 uint64_t u64HaltsStartTS;
374 /** @} */
375
376 /** Union containing data and config for the different halt algorithms. */
377 union
378 {
379 /**
380 * Method 1 & 2 - Block whenever possible, and when lagging behind
381 * switch to spinning with regular blocking every 5-200ms (defaults)
382 * depending on the accumulated lag. The blocking interval is adjusted
383 * with the average oversleeping of the last 64 times.
384 *
385 * The difference between 1 and 2 is that we use native absolute
386 * time APIs for the blocking instead of the millisecond based IPRT
387 * interface.
388 */
389 struct
390 {
391 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
392 uint32_t cBlocks;
393 /** Align the next member. */
394 uint32_t u32Alignment;
395 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
396 uint64_t cNSBlockedTooLongAvg;
397 /** Total time spend oversleeping when blocking. */
398 uint64_t cNSBlockedTooLong;
399 /** Total time spent blocking. */
400 uint64_t cNSBlocked;
401 /** The timestamp (RTTimeNanoTS) of the last block. */
402 uint64_t u64LastBlockTS;
403
404 /** When we started spinning relentlessly in order to catch up some of the oversleeping.
405 * This is 0 when we're not spinning. */
406 uint64_t u64StartSpinTS;
407 } Method12;
408
409# if 0
410 /**
411 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
412 * sprinkle it with yields.
413 */
414 struct
415 {
416 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
417 uint32_t cBlocks;
418 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
419 uint64_t cBlockedTooLongNSAvg;
420 /** Total time spend oversleeping when blocking. */
421 uint64_t cBlockedTooLongNS;
422 /** Total time spent blocking. */
423 uint64_t cBlockedNS;
424 /** The timestamp (RTTimeNanoTS) of the last block. */
425 uint64_t u64LastBlockTS;
426
427 /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
428 uint32_t cYields;
429 /** Avg. time spend oversleeping when yielding. */
430 uint32_t cYieldTooLongNSAvg;
431 /** Total time spend oversleeping when yielding. */
432 uint64_t cYieldTooLongNS;
433 /** Total time spent yielding. */
434 uint64_t cYieldedNS;
435 /** The timestamp (RTTimeNanoTS) of the last block. */
436 uint64_t u64LastYieldTS;
437
438 /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
439 uint64_t u64StartSpinTS;
440 } Method34;
441# endif
442 } Halt;
443
444 /** Profiling the halted state; yielding vs blocking.
445 * @{ */
446 STAMPROFILE StatHaltYield;
447 STAMPROFILE StatHaltBlock;
448 STAMPROFILE StatHaltBlockOverslept;
449 STAMPROFILE StatHaltBlockInsomnia;
450 STAMPROFILE StatHaltBlockOnTime;
451 STAMPROFILE StatHaltTimers;
452 STAMPROFILE StatHaltPoll;
453 /** @} */
454} VMINTUSERPERVMCPU;
455AssertCompileMemberAlignment(VMINTUSERPERVMCPU, u64HaltsStartTS, 8);
456AssertCompileMemberAlignment(VMINTUSERPERVMCPU, Halt.Method12.cNSBlockedTooLongAvg, 8);
457AssertCompileMemberAlignment(VMINTUSERPERVMCPU, StatHaltYield, 8);
458
459/** Pointer to the VM internal data kept in the UVM. */
460typedef VMINTUSERPERVMCPU *PVMINTUSERPERVMCPU;
461
462#endif /* IN_RING3 */
463
464RT_C_DECLS_BEGIN
465
466DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg);
467int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod);
468DECLCALLBACK(int) vmR3Destroy(PVM pVM);
469DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
470void vmSetErrorCopy(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list args);
471DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage);
472DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa);
473void vmSetRuntimeErrorCopy(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va);
474void vmR3SetTerminated(PVM pVM);
475
476RT_C_DECLS_END
477
478
479/** @} */
480
481#endif
482
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette