VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 19176

Last change on this file since 19176 was 19141, checked in by vboxsync, 16 years ago

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.1 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39
40
41/** @defgroup grp_vm The Virtual Machine
42 * @{
43 */
44
45/** Maximum number of virtual CPUs per VM. */
46#define VMCPU_MAX_CPU_COUNT 255
47
48/**
49 * The state of a virtual CPU.
50 *
51 * The VM running states are a sub-states of the VMSTATE_RUNNING state. While
52 * VMCPUSTATE_NOT_RUNNING is a place holder for the other VM states.
53 */
54typedef enum VMCPUSTATE
55{
56 /** The customary invalid zero. */
57 VMCPUSTATE_INVALID = 0,
58
59 /** Running guest code (VM running). */
60 VMCPUSTATE_RUN_EXEC,
61 /** Running guest code in the recompiler (VM running). */
62 VMCPUSTATE_RUN_EXEC_REM,
63 /** Halted (VM running). */
64 VMCPUSTATE_RUN_HALTED,
65 /** All the other bits we do while running a VM (VM running). */
66 VMCPUSTATE_RUN_MISC,
67 /** VM not running, we're servicing requests or whatever. */
68 VMCPUSTATE_NOT_RUNNING,
69 /** The end of valid virtual CPU states. */
70 VMCPUSTATE_END,
71
72 /** Ensure 32-bit type. */
73 VMCPUSTATE_32BIT_HACK = 0x7fffffff
74} VMCPUSTATE;
75
76
77/**
78 * Per virtual CPU data.
79 */
80typedef struct VMCPU
81{
82 /** Per CPU forced action.
83 * See the VMCPU_FF_* \#defines. Updated atomically. */
84 uint32_t volatile fLocalForcedActions;
85 /** The CPU state. */
86 VMCPUSTATE volatile enmState;
87
88 /** Ring-3 Host Context VM Pointer. */
89 PVMR3 pVMR3;
90 /** Ring-0 Host Context VM Pointer. */
91 PVMR0 pVMR0;
92 /** Raw-mode Context VM Pointer. */
93 PVMRC pVMRC;
94 /** The CPU ID.
95 * This is the index into the VM::aCpu array. */
96 VMCPUID idCpu;
97 /** The native thread handle. */
98 RTNATIVETHREAD hNativeThread;
99
100 /** Align the next bit on a 64-byte boundary.
101 *
102 * @remarks The aligments of the members that are larger than 48 bytes should be
103 * 64-byte for cache line reasons. structs containing small amounts of
104 * data could be lumped together at the end with a < 64 byte padding
105 * following it (to grow into and align the struct size).
106 * */
107 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 9 : 6];
108
109 /** CPUM part. */
110 union
111 {
112#ifdef ___CPUMInternal_h
113 struct CPUMCPU s;
114#endif
115 char padding[4096]; /* multiple of 64 */
116 } cpum;
117 /** VMM part. */
118 union
119 {
120#ifdef ___VMMInternal_h
121 struct VMMCPU s;
122#endif
123 char padding[64]; /* multiple of 64 */
124 } vmm;
125
126 /** PGM part. */
127 union
128 {
129#ifdef ___PGMInternal_h
130 struct PGMCPU s;
131#endif
132 char padding[32*1024]; /* multiple of 64 */
133 } pgm;
134
135 /** HWACCM part. */
136 union
137 {
138#ifdef ___HWACCMInternal_h
139 struct HWACCMCPU s;
140#endif
141 char padding[5120]; /* multiple of 64 */
142 } hwaccm;
143
144 /** EM part. */
145 union
146 {
147#ifdef ___EMInternal_h
148 struct EMCPU s;
149#endif
150 char padding[2048]; /* multiple of 64 */
151 } em;
152
153 /** TRPM part. */
154 union
155 {
156#ifdef ___TRPMInternal_h
157 struct TRPMCPU s;
158#endif
159 char padding[64]; /* multiple of 64 */
160 } trpm;
161
162 /** TM part. */
163 union
164 {
165#ifdef ___TMInternal_h
166 struct TMCPU s;
167#endif
168 char padding[64]; /* multiple of 64 */
169 } tm;
170} VMCPU;
171
172/** Pointer to a VMCPU. */
173#ifndef ___VBox_types_h
174typedef struct VMCPU *PVMCPU;
175#endif
176
177/** The name of the Guest Context VMM Core module. */
178#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
179/** The name of the Ring 0 Context VMM Core module. */
180#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
181
182/** VM Forced Action Flags.
183 *
184 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
185 * action mask of a VM.
186 *
187 * @{
188 */
189/** This action forces the VM to schedule and run pending timer (TM). */
190#define VM_FF_TIMER RT_BIT_32(2)
191/** PDM Queues are pending. */
192#define VM_FF_PDM_QUEUES RT_BIT_32(3)
193/** PDM DMA transfers are pending. */
194#define VM_FF_PDM_DMA RT_BIT_32(4)
195/** PDM critical section unlocking is pending, process promptly upon return to R3. */
196#define VM_FF_PDM_CRITSECT RT_BIT_32(5)
197/** This action forces the VM to call DBGF so DBGF can service debugger
198 * requests in the emulation thread.
199 * This action flag stays asserted till DBGF clears it.*/
200#define VM_FF_DBGF RT_BIT_32(8)
201/** This action forces the VM to service pending requests from other
202 * thread or requests which must be executed in another context. */
203#define VM_FF_REQUEST RT_BIT_32(9)
204/** Terminate the VM immediately. */
205#define VM_FF_TERMINATE RT_BIT_32(10)
206/** Reset the VM. (postponed) */
207#define VM_FF_RESET RT_BIT_32(11)
208/** PGM needs to allocate handy pages. */
209#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
210/** PGM is out of memory.
211 * Abandon all loops and code paths which can be resumed and get up to the EM
212 * loops. */
213#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
214/** REM needs to be informed about handler changes. */
215#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(29)
216/** Suspend the VM - debug only. */
217#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
218
219
220/** This action forces the VM to service check and pending interrups on the APIC. */
221#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
222/** This action forces the VM to service check and pending interrups on the PIC. */
223#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
224/** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */
225#define VMCPU_FF_TIMER RT_BIT_32(2)
226/** This action forces the VM to service pending requests from other
227 * thread or requests which must be executed in another context. */
228#define VMCPU_FF_REQUEST RT_BIT_32(9)
229/** This action forces the VM to resync the page tables before going
230 * back to execute guest code. (GLOBAL FLUSH) */
231#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
232/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
233 * (NON-GLOBAL FLUSH) */
234#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
235/** Check the interupt and trap gates */
236#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
237/** Check Guest's TSS ring 0 stack */
238#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
239/** Check Guest's GDT table */
240#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
241/** Check Guest's LDT table */
242#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
243/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
244#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
245/** CSAM needs to scan the page that's being executed */
246#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
247/** CSAM needs to do some homework. */
248#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
249/** Force return to Ring-3. */
250#define VMCPU_FF_TO_R3 RT_BIT_32(28)
251
252/** Externally VM forced actions. Used to quit the idle/wait loop. */
253#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
254/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
255#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
256
257/** Externally forced VM actions. Used to quit the idle/wait loop. */
258#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
259/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
260#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST)
261
262/** High priority VM pre-execution actions. */
263#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
264 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
265/** High priority VMCPU pre-execution actions. */
266#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
267 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)
268
269/** High priority VM pre raw-mode execution mask. */
270#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
271/** High priority VMCPU pre raw-mode execution mask. */
272#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT \
273 | VMCPU_FF_INHIBIT_INTERRUPTS)
274
275/** High priority post-execution actions. */
276#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT | VM_FF_PGM_NO_MEMORY)
277/** High priority post-execution actions. */
278#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_CSAM_PENDING_ACTION)
279
280/** Normal priority VM post-execution actions. */
281#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)
282/** Normal priority VMCPU post-execution actions. */
283#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
284
285/** Normal priority actions. */
286#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
287
288/** Flags to clear before resuming guest execution. */
289#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
290
291/** VM Flags that cause the HWACCM loops to go back to ring-3. */
292#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
293/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
294#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3)
295
296/** All the forced flags. */
297#define VM_FF_ALL_MASK (~0U)
298/** All the forced VM flags. */
299#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PDM_CRITSECT) | VM_FF_PGM_NO_MEMORY)
300/** All the forced VMCPU flags. */
301#define VMCPU_FF_ALL_BUT_RAW_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION))
302
303/** @} */
304
305/** @def VM_FF_SET
306 * Sets a force action flag.
307 *
308 * @param pVM VM Handle.
309 * @param fFlag The flag to set.
310 */
311#if 1
312# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
313#else
314# define VM_FF_SET(pVM, fFlag) \
315 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
316 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
317 } while (0)
318#endif
319
320/** @def VMCPU_FF_SET
321 * Sets a force action flag for the given VCPU.
322 *
323 * @param pVCpu VMCPU Handle.
324 * @param fFlag The flag to set.
325 */
326#if 1 //def VBOX_WITH_SMP_GUESTS
327# define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
328#else
329# define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, (fFlag))
330#endif
331
332/** @def VM_FF_CLEAR
333 * Clears a force action flag.
334 *
335 * @param pVM VM Handle.
336 * @param fFlag The flag to clear.
337 */
338#if 1
339# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
340#else
341# define VM_FF_CLEAR(pVM, fFlag) \
342 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
343 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
344 } while (0)
345#endif
346
347/** @def VMCPU_FF_CLEAR
348 * Clears a force action flag for the given VCPU.
349 *
350 * @param pVCpu VMCPU Handle.
351 * @param fFlag The flag to clear.
352 */
353#if 1 //def VBOX_WITH_SMP_GUESTS
354# define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
355#else
356# define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, ~(fFlag))
357#endif
358
359/** @def VM_FF_ISSET
360 * Checks if a force action flag is set.
361 *
362 * @param pVM VM Handle.
363 * @param fFlag The flag to check.
364 */
365#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
366
367/** @def VMCPU_FF_ISSET
368 * Checks if a force action flag is set for the given VCPU.
369 *
370 * @param pVCpu VMCPU Handle.
371 * @param fFlag The flag to check.
372 */
373#if 1 //def VBOX_WITH_SMP_GUESTS
374# define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
375#else
376# define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
377#endif
378
379/** @def VM_FF_ISPENDING
380 * Checks if one or more force action in the specified set is pending.
381 *
382 * @param pVM VM Handle.
383 * @param fFlags The flags to check for.
384 */
385#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
386
387/** @def VMCPU_FF_ISPENDING
388 * Checks if one or more force action in the specified set is pending for the given VCPU.
389 *
390 * @param pVCpu VMCPU Handle.
391 * @param fFlags The flags to check for.
392 */
393#if 1 //def VBOX_WITH_SMP_GUESTS
394# define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
395#else
396# define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags))
397#endif
398
399/** @def VM_FF_ISPENDING
400 * Checks if one or more force action in the specified set is pending while one
401 * or more other ones are not.
402 *
403 * @param pVM VM Handle.
404 * @param fFlags The flags to check for.
405 * @param fExcpt The flags that should not be set.
406 */
407#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
408
409/** @def VMCPU_FF_IS_PENDING_EXCEPT
410 * Checks if one or more force action in the specified set is pending for the given
411 * VCPU while one or more other ones are not.
412 *
413 * @param pVCpu VMCPU Handle.
414 * @param fFlags The flags to check for.
415 * @param fExcpt The flags that should not be set.
416 */
417#if 1 //def VBOX_WITH_SMP_GUESTS
418# define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
419#else
420# define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags)) && !((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fExcpt)) )
421#endif
422
423/** @def VM_IS_EMT
424 * Checks if the current thread is the emulation thread (EMT).
425 *
426 * @remark The ring-0 variation will need attention if we expand the ring-0
427 * code to let threads other than EMT mess around with the VM.
428 */
429#ifdef IN_RC
430# define VM_IS_EMT(pVM) true
431#elif defined(IN_RING0)
432# define VM_IS_EMT(pVM) true
433#else
434/** @todo need to rework this macro for the case of multiple emulation threads for SMP */
435# define VM_IS_EMT(pVM) (VMR3GetVMCPUNativeThread(pVM) == RTThreadNativeSelf())
436#endif
437
438/** @def VM_ASSERT_EMT
439 * Asserts that the current thread IS the emulation thread (EMT).
440 */
441#ifdef IN_RC
442# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
443#elif defined(IN_RING0)
444# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
445#else
446# define VM_ASSERT_EMT(pVM) \
447 AssertMsg(VM_IS_EMT(pVM), \
448 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
449#endif
450
451/** @def VM_ASSERT_EMT_RETURN
452 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
453 */
454#ifdef IN_RC
455# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
456#elif defined(IN_RING0)
457# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
458#else
459# define VM_ASSERT_EMT_RETURN(pVM, rc) \
460 AssertMsgReturn(VM_IS_EMT(pVM), \
461 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
462 (rc))
463#endif
464
465
466/**
467 * Asserts that the current thread is NOT the emulation thread.
468 */
469#define VM_ASSERT_OTHER_THREAD(pVM) \
470 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
471
472
473/** @def VM_ASSERT_STATE_RETURN
474 * Asserts a certain VM state.
475 */
476#define VM_ASSERT_STATE(pVM, _enmState) \
477 AssertMsg((pVM)->enmVMState == (_enmState), \
478 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)))
479
480/** @def VM_ASSERT_STATE_RETURN
481 * Asserts a certain VM state and returns if it doesn't match.
482 */
483#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
484 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
485 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)), \
486 (rc))
487
488
489
490
491/** This is the VM structure.
492 *
493 * It contains (nearly?) all the VM data which have to be available in all
494 * contexts. Even if it contains all the data the idea is to use APIs not
495 * to modify all the members all around the place. Therefore we make use of
496 * unions to hide everything which isn't local to the current source module.
497 * This means we'll have to pay a little bit of attention when adding new
498 * members to structures in the unions and make sure to keep the padding sizes
499 * up to date.
500 *
501 * Run tstVMStructSize after update!
502 */
503typedef struct VM
504{
505 /** The state of the VM.
506 * This field is read only to everyone except the VM and EM. */
507 VMSTATE enmVMState;
508 /** Forced action flags.
509 * See the VM_FF_* \#defines. Updated atomically.
510 */
511 volatile uint32_t fGlobalForcedActions;
512 /** Pointer to the array of page descriptors for the VM structure allocation. */
513 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
514 /** Session handle. For use when calling SUPR0 APIs. */
515 PSUPDRVSESSION pSession;
516 /** Pointer to the ring-3 VM structure. */
517 PUVM pUVM;
518 /** Ring-3 Host Context VM Pointer. */
519 R3PTRTYPE(struct VM *) pVMR3;
520 /** Ring-0 Host Context VM Pointer. */
521 R0PTRTYPE(struct VM *) pVMR0;
522 /** Raw-mode Context VM Pointer. */
523 RCPTRTYPE(struct VM *) pVMRC;
524
525 /** The GVM VM handle. Only the GVM should modify this field. */
526 uint32_t hSelf;
527 /** Number of virtual CPUs. */
528 uint32_t cCPUs;
529
530 /** Size of the VM structure including the VMCPU array. */
531 uint32_t cbSelf;
532
533 /** Offset to the VMCPU array starting from beginning of this structure. */
534 uint32_t offVMCPU;
535
536 /** Reserved; alignment. */
537 uint32_t u32Reserved[6];
538
539 /** @name Public VMM Switcher APIs
540 * @{ */
541 /**
542 * Assembly switch entry point for returning to host context.
543 * This function will clean up the stack frame.
544 *
545 * @param eax The return code, register.
546 * @param Ctx The guest core context.
547 * @remark Assume interrupts disabled.
548 */
549 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
550
551 /**
552 * Assembly switch entry point for returning to host context.
553 *
554 * This is an alternative entry point which we'll be using when the we have the
555 * hypervisor context and need to save that before going to the host.
556 *
557 * This is typically useful when abandoning the hypervisor because of a trap
558 * and want the trap state to be saved.
559 *
560 * @param eax The return code, register.
561 * @param ecx Pointer to the hypervisor core context, register.
562 * @remark Assume interrupts disabled.
563 */
564 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
565
566 /**
567 * Assembly switch entry point for returning to host context.
568 *
569 * This is an alternative to the two *Ctx APIs and implies that the context has already
570 * been saved, or that it's just a brief return to HC and that the caller intends to resume
571 * whatever it is doing upon 'return' from this call.
572 *
573 * @param eax The return code, register.
574 * @remark Assume interrupts disabled.
575 */
576 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
577 /** @} */
578
579
580 /** @name Various VM data owned by VM.
581 * @{ */
582 RTTHREAD uPadding1;
583 /** The native handle of ThreadEMT. Getting the native handle
584 * is generally faster than getting the IPRT one (except on OS/2 :-). */
585 RTNATIVETHREAD uPadding2;
586 /** @} */
587
588
589 /** @name Various items that are frequently accessed.
590 * @{ */
591 /** Raw ring-3 indicator. */
592 bool fRawR3Enabled;
593 /** Raw ring-0 indicator. */
594 bool fRawR0Enabled;
595 /** PATM enabled flag.
596 * This is placed here for performance reasons. */
597 bool fPATMEnabled;
598 /** CSAM enabled flag.
599 * This is placed here for performance reasons. */
600 bool fCSAMEnabled;
601 /** Hardware VM support is available and enabled.
602 * This is placed here for performance reasons. */
603 bool fHWACCMEnabled;
604 /** Hardware VM support is required and non-optional.
605 * This is initialized together with the rest of the VM structure. */
606 bool fHwVirtExtForced;
607 /** PARAV enabled flag. */
608 bool fPARAVEnabled;
609 /** @} */
610
611
612 /* padding to make gnuc put the StatQemuToGC where msc does. */
613#if HC_ARCH_BITS == 32
614 uint32_t padding0;
615#endif
616
617 /** Profiling the total time from Qemu to GC. */
618 STAMPROFILEADV StatTotalQemuToGC;
619 /** Profiling the total time from GC to Qemu. */
620 STAMPROFILEADV StatTotalGCToQemu;
621 /** Profiling the total time spent in GC. */
622 STAMPROFILEADV StatTotalInGC;
623 /** Profiling the total time spent not in Qemu. */
624 STAMPROFILEADV StatTotalInQemu;
625 /** Profiling the VMMSwitcher code for going to GC. */
626 STAMPROFILEADV StatSwitcherToGC;
627 /** Profiling the VMMSwitcher code for going to HC. */
628 STAMPROFILEADV StatSwitcherToHC;
629 STAMPROFILEADV StatSwitcherSaveRegs;
630 STAMPROFILEADV StatSwitcherSysEnter;
631 STAMPROFILEADV StatSwitcherDebug;
632 STAMPROFILEADV StatSwitcherCR0;
633 STAMPROFILEADV StatSwitcherCR4;
634 STAMPROFILEADV StatSwitcherJmpCR3;
635 STAMPROFILEADV StatSwitcherRstrRegs;
636 STAMPROFILEADV StatSwitcherLgdt;
637 STAMPROFILEADV StatSwitcherLidt;
638 STAMPROFILEADV StatSwitcherLldt;
639 STAMPROFILEADV StatSwitcherTSS;
640
641/** @todo Realign everything on 64 byte boundaries to better match the
642 * cache-line size. */
643 /* padding - the unions must be aligned on 32 bytes boundraries. */
644 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
645
646 /** CPUM part. */
647 union
648 {
649#ifdef ___CPUMInternal_h
650 struct CPUM s;
651#endif
652 char padding[2048]; /* multiple of 32 */
653 } cpum;
654
655 /** VMM part. */
656 union
657 {
658#ifdef ___VMMInternal_h
659 struct VMM s;
660#endif
661 char padding[1600]; /* multiple of 32 */
662 } vmm;
663
664 /** PGM part. */
665 union
666 {
667#ifdef ___PGMInternal_h
668 struct PGM s;
669#endif
670 char padding[16*1024]; /* multiple of 32 */
671 } pgm;
672
673 /** HWACCM part. */
674 union
675 {
676#ifdef ___HWACCMInternal_h
677 struct HWACCM s;
678#endif
679 char padding[512]; /* multiple of 32 */
680 } hwaccm;
681
682 /** TRPM part. */
683 union
684 {
685#ifdef ___TRPMInternal_h
686 struct TRPM s;
687#endif
688 char padding[5344]; /* multiple of 32 */
689 } trpm;
690
691 /** SELM part. */
692 union
693 {
694#ifdef ___SELMInternal_h
695 struct SELM s;
696#endif
697 char padding[544]; /* multiple of 32 */
698 } selm;
699
700 /** MM part. */
701 union
702 {
703#ifdef ___MMInternal_h
704 struct MM s;
705#endif
706 char padding[192]; /* multiple of 32 */
707 } mm;
708
709 /** CFGM part. */
710 union
711 {
712#ifdef ___CFGMInternal_h
713 struct CFGM s;
714#endif
715 char padding[32]; /* multiple of 32 */
716 } cfgm;
717
718 /** PDM part. */
719 union
720 {
721#ifdef ___PDMInternal_h
722 struct PDM s;
723#endif
724 char padding[1824]; /* multiple of 32 */
725 } pdm;
726
727 /** IOM part. */
728 union
729 {
730#ifdef ___IOMInternal_h
731 struct IOM s;
732#endif
733 char padding[4544]; /* multiple of 32 */
734 } iom;
735
736 /** PATM part. */
737 union
738 {
739#ifdef ___PATMInternal_h
740 struct PATM s;
741#endif
742 char padding[768]; /* multiple of 32 */
743 } patm;
744
745 /** CSAM part. */
746 union
747 {
748#ifdef ___CSAMInternal_h
749 struct CSAM s;
750#endif
751 char padding[3328]; /* multiple of 32 */
752 } csam;
753
754 /** PARAV part. */
755 union
756 {
757#ifdef ___PARAVInternal_h
758 struct PARAV s;
759#endif
760 char padding[128];
761 } parav;
762
763 /** EM part. */
764 union
765 {
766#ifdef ___EMInternal_h
767 struct EM s;
768#endif
769 char padding[64]; /* multiple of 32 */
770 } em;
771
772 /** TM part. */
773 union
774 {
775#ifdef ___TMInternal_h
776 struct TM s;
777#endif
778 char padding[1536]; /* multiple of 32 */
779 } tm;
780
781 /** DBGF part. */
782 union
783 {
784#ifdef ___DBGFInternal_h
785 struct DBGF s;
786#endif
787 char padding[2368]; /* multiple of 32 */
788 } dbgf;
789
790 /** SSM part. */
791 union
792 {
793#ifdef ___SSMInternal_h
794 struct SSM s;
795#endif
796 char padding[32]; /* multiple of 32 */
797 } ssm;
798
799 /** VM part. */
800 union
801 {
802#ifdef ___VMInternal_h
803 struct VMINT s;
804#endif
805 char padding[768]; /* multiple of 32 */
806 } vm;
807
808 /** REM part. */
809 union
810 {
811#ifdef ___REMInternal_h
812 struct REM s;
813#endif
814
815/** @def VM_REM_SIZE
816 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
817#if GC_ARCH_BITS == 32
818# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10800 : 0x10800)
819#else
820# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10900 : 0x10900)
821#endif
822 char padding[VM_REM_SIZE]; /* multiple of 32 */
823 } rem;
824
825 /** Padding for aligning the cpu array on a 64 byte boundrary. */
826 uint32_t u32Reserved2[8];
827
828 /** VMCPU array for the configured number of virtual CPUs.
829 * Must be aligned on a 64-byte boundrary. */
830 VMCPU aCpus[1];
831} VM;
832
833/** Pointer to a VM. */
834#ifndef ___VBox_types_h
835typedef struct VM *PVM;
836#endif
837
838
839#ifdef IN_RC
840__BEGIN_DECLS
841
842/** The VM structure.
843 * This is imported from the VMMGCBuiltin module, i.e. it's a one
844 * of those magic globals which we should avoid using.
845 */
846extern DECLIMPORT(VM) g_VM;
847
848__END_DECLS
849#endif
850
851/** @} */
852
853#endif
854
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette