VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 19315

Last change on this file since 19315 was 19286, checked in by vboxsync, 16 years ago

VMM,VBoxDbg: SMP refactoring, part 1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.7 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39
40
41/** @defgroup grp_vm The Virtual Machine
42 * @{
43 */
44
45/** Maximum number of virtual CPUs per VM. */
46#define VMCPU_MAX_CPU_COUNT 255
47
48/**
49 * The state of a virtual CPU.
50 *
51 * The VM running states are a sub-states of the VMSTATE_RUNNING state. While
52 * VMCPUSTATE_NOT_RUNNING is a place holder for the other VM states.
53 */
54typedef enum VMCPUSTATE
55{
56 /** The customary invalid zero. */
57 VMCPUSTATE_INVALID = 0,
58
59 /** Running guest code (VM running). */
60 VMCPUSTATE_RUN_EXEC,
61 /** Running guest code in the recompiler (VM running). */
62 VMCPUSTATE_RUN_EXEC_REM,
63 /** Halted (VM running). */
64 VMCPUSTATE_RUN_HALTED,
65 /** All the other bits we do while running a VM (VM running). */
66 VMCPUSTATE_RUN_MISC,
67 /** VM not running, we're servicing requests or whatever. */
68 VMCPUSTATE_NOT_RUNNING,
69 /** The end of valid virtual CPU states. */
70 VMCPUSTATE_END,
71
72 /** Ensure 32-bit type. */
73 VMCPUSTATE_32BIT_HACK = 0x7fffffff
74} VMCPUSTATE;
75
76
77/**
78 * Per virtual CPU data.
79 */
80typedef struct VMCPU
81{
82 /** Per CPU forced action.
83 * See the VMCPU_FF_* \#defines. Updated atomically. */
84 uint32_t volatile fLocalForcedActions;
85 /** The CPU state. */
86 VMCPUSTATE volatile enmState;
87
88 /** Pointer to the ring-3 UVMCPU structure. */
89 PUVMCPU pUVCpu;
90 /** Ring-3 Host Context VM Pointer. */
91 PVMR3 pVMR3;
92 /** Ring-0 Host Context VM Pointer. */
93 PVMR0 pVMR0;
94 /** Raw-mode Context VM Pointer. */
95 PVMRC pVMRC;
96 /** The CPU ID.
97 * This is the index into the VM::aCpu array. */
98 VMCPUID idCpu;
99 /** The native thread handle. */
100 RTNATIVETHREAD hNativeThread;
101
102 /** Align the next bit on a 64-byte boundary.
103 *
104 * @remarks The aligments of the members that are larger than 48 bytes should be
105 * 64-byte for cache line reasons. structs containing small amounts of
106 * data could be lumped together at the end with a < 64 byte padding
107 * following it (to grow into and align the struct size).
108 * */
109 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 8 : 4];
110
111 /** CPUM part. */
112 union
113 {
114#ifdef ___CPUMInternal_h
115 struct CPUMCPU s;
116#endif
117 char padding[4096]; /* multiple of 64 */
118 } cpum;
119
120 /** PGM part. */
121 union
122 {
123#ifdef ___PGMInternal_h
124 struct PGMCPU s;
125#endif
126 char padding[32*1024]; /* multiple of 64 */
127 } pgm;
128
129 /** HWACCM part. */
130 union
131 {
132#ifdef ___HWACCMInternal_h
133 struct HWACCMCPU s;
134#endif
135 char padding[5120]; /* multiple of 64 */
136 } hwaccm;
137
138 /** EM part. */
139 union
140 {
141#ifdef ___EMInternal_h
142 struct EMCPU s;
143#endif
144 char padding[2048]; /* multiple of 64 */
145 } em;
146
147 /** TRPM part. */
148 union
149 {
150#ifdef ___TRPMInternal_h
151 struct TRPMCPU s;
152#endif
153 char padding[128]; /* multiple of 64 */
154 } trpm;
155
156 /** TM part. */
157 union
158 {
159#ifdef ___TMInternal_h
160 struct TMCPU s;
161#endif
162 char padding[64]; /* multiple of 64 */
163 } tm;
164
165 /** VMM part.
166 * @todo Combine this with other tiny structures. */
167 union
168 {
169#ifdef ___VMMInternal_h
170 struct VMMCPU s;
171#endif
172 char padding[64]; /* multiple of 64 */
173 } vmm;
174
175 /** DBGF part.
176 * @todo Combine this with other tiny structures. */
177 union
178 {
179#ifdef ___DBGFInternal_h
180 struct DBGFCPU s;
181#endif
182 uint8_t padding[64]; /* multiple of 64 */
183 } dbgf;
184
185} VMCPU;
186
187/** Pointer to a VMCPU. */
188#ifndef ___VBox_types_h
189typedef struct VMCPU *PVMCPU;
190#endif
191
192/** The name of the Guest Context VMM Core module. */
193#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
194/** The name of the Ring 0 Context VMM Core module. */
195#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
196
197/** VM Forced Action Flags.
198 *
199 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
200 * action mask of a VM.
201 *
202 * @{
203 */
204/** This action forces the VM to schedule and run pending timer (TM). */
205#define VM_FF_TIMER RT_BIT_32(2)
206/** PDM Queues are pending. */
207#define VM_FF_PDM_QUEUES RT_BIT_32(3)
208/** PDM DMA transfers are pending. */
209#define VM_FF_PDM_DMA RT_BIT_32(4)
210/** PDM critical section unlocking is pending, process promptly upon return to R3. */
211#define VM_FF_PDM_CRITSECT RT_BIT_32(5)
212/** This action forces the VM to call DBGF so DBGF can service debugger
213 * requests in the emulation thread.
214 * This action flag stays asserted till DBGF clears it.*/
215#define VM_FF_DBGF RT_BIT_32(8)
216/** This action forces the VM to service pending requests from other
217 * thread or requests which must be executed in another context. */
218#define VM_FF_REQUEST RT_BIT_32(9)
219/** Terminate the VM immediately. */
220#define VM_FF_TERMINATE RT_BIT_32(10)
221/** Reset the VM. (postponed) */
222#define VM_FF_RESET RT_BIT_32(11)
223/** PGM needs to allocate handy pages. */
224#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
225/** PGM is out of memory.
226 * Abandon all loops and code paths which can be resumed and get up to the EM
227 * loops. */
228#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
229/** REM needs to be informed about handler changes. */
230#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(29)
231/** Suspend the VM - debug only. */
232#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
233
234
235/** This action forces the VM to service check and pending interrups on the APIC. */
236#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
237/** This action forces the VM to service check and pending interrups on the PIC. */
238#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
239/** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */
240#define VMCPU_FF_TIMER RT_BIT_32(2)
241/** This action forces the VM to service pending requests from other
242 * thread or requests which must be executed in another context. */
243#define VMCPU_FF_REQUEST RT_BIT_32(9)
244/** This action forces the VM to resync the page tables before going
245 * back to execute guest code. (GLOBAL FLUSH) */
246#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
247/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
248 * (NON-GLOBAL FLUSH) */
249#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
250/** Check the interupt and trap gates */
251#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
252/** Check Guest's TSS ring 0 stack */
253#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
254/** Check Guest's GDT table */
255#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
256/** Check Guest's LDT table */
257#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
258/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
259#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
260/** CSAM needs to scan the page that's being executed */
261#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
262/** CSAM needs to do some homework. */
263#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
264/** Force return to Ring-3. */
265#define VMCPU_FF_TO_R3 RT_BIT_32(28)
266
267/** Externally VM forced actions. Used to quit the idle/wait loop. */
268#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
269/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
270#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
271
272/** Externally forced VM actions. Used to quit the idle/wait loop. */
273#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
274/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
275#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST)
276
277/** High priority VM pre-execution actions. */
278#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
279 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
280/** High priority VMCPU pre-execution actions. */
281#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
282 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)
283
284/** High priority VM pre raw-mode execution mask. */
285#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
286/** High priority VMCPU pre raw-mode execution mask. */
287#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT \
288 | VMCPU_FF_INHIBIT_INTERRUPTS)
289
290/** High priority post-execution actions. */
291#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT | VM_FF_PGM_NO_MEMORY)
292/** High priority post-execution actions. */
293#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_CSAM_PENDING_ACTION)
294
295/** Normal priority VM post-execution actions. */
296#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)
297/** Normal priority VMCPU post-execution actions. */
298#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
299
300/** Normal priority VM actions. */
301#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
302/** Normal priority VMCPU actions. */
303#define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST)
304
305/** Flags to clear before resuming guest execution. */
306#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
307
308/** VM Flags that cause the HWACCM loops to go back to ring-3. */
309#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
310/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
311#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3)
312
313/** All the forced flags. */
314#define VM_FF_ALL_MASK (~0U)
315/** All the forced VM flags. */
316#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PDM_CRITSECT) | VM_FF_PGM_NO_MEMORY)
317/** All the forced VMCPU flags. */
318#define VMCPU_FF_ALL_BUT_RAW_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION))
319
320/** @} */
321
322/** @def VM_FF_SET
323 * Sets a force action flag.
324 *
325 * @param pVM VM Handle.
326 * @param fFlag The flag to set.
327 */
328#if 1
329# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
330#else
331# define VM_FF_SET(pVM, fFlag) \
332 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
333 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
334 } while (0)
335#endif
336
337/** @def VMCPU_FF_SET
338 * Sets a force action flag for the given VCPU.
339 *
340 * @param pVCpu VMCPU Handle.
341 * @param fFlag The flag to set.
342 */
343#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
344
345/** @def VM_FF_CLEAR
346 * Clears a force action flag.
347 *
348 * @param pVM VM Handle.
349 * @param fFlag The flag to clear.
350 */
351#if 1
352# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
353#else
354# define VM_FF_CLEAR(pVM, fFlag) \
355 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
356 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
357 } while (0)
358#endif
359
360/** @def VMCPU_FF_CLEAR
361 * Clears a force action flag for the given VCPU.
362 *
363 * @param pVCpu VMCPU Handle.
364 * @param fFlag The flag to clear.
365 */
366#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
367
368/** @def VM_FF_ISSET
369 * Checks if a force action flag is set.
370 *
371 * @param pVM VM Handle.
372 * @param fFlag The flag to check.
373 */
374#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
375
376/** @def VMCPU_FF_ISSET
377 * Checks if a force action flag is set for the given VCPU.
378 *
379 * @param pVCpu VMCPU Handle.
380 * @param fFlag The flag to check.
381 */
382#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
383
384/** @def VM_FF_ISPENDING
385 * Checks if one or more force action in the specified set is pending.
386 *
387 * @param pVM VM Handle.
388 * @param fFlags The flags to check for.
389 */
390#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
391
392/** @def VMCPU_FF_ISPENDING
393 * Checks if one or more force action in the specified set is pending for the given VCPU.
394 *
395 * @param pVCpu VMCPU Handle.
396 * @param fFlags The flags to check for.
397 */
398#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
399
400/** @def VM_FF_ISPENDING
401 * Checks if one or more force action in the specified set is pending while one
402 * or more other ones are not.
403 *
404 * @param pVM VM Handle.
405 * @param fFlags The flags to check for.
406 * @param fExcpt The flags that should not be set.
407 */
408#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
409
410/** @def VMCPU_FF_IS_PENDING_EXCEPT
411 * Checks if one or more force action in the specified set is pending for the given
412 * VCPU while one or more other ones are not.
413 *
414 * @param pVCpu VMCPU Handle.
415 * @param fFlags The flags to check for.
416 * @param fExcpt The flags that should not be set.
417 */
418#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
419
420/** @def VM_IS_EMT
421 * Checks if the current thread is the emulation thread (EMT).
422 *
423 * @remark The ring-0 variation will need attention if we expand the ring-0
424 * code to let threads other than EMT mess around with the VM.
425 */
426#ifdef IN_RC
427# define VM_IS_EMT(pVM) true
428#elif defined(IN_RING0)
429# define VM_IS_EMT(pVM) true
430#else
431/** @todo need to rework this macro for the case of multiple emulation threads for SMP */
432# define VM_IS_EMT(pVM) (VMR3GetVMCPUNativeThread(pVM) == RTThreadNativeSelf())
433#endif
434
435/** @def VMCPU_IS_EMT
436 * Checks if the current thread is the emulation thread (EMT) for the specified
437 * virtual CPU.
438 */
439#ifdef IN_RC
440# define VMCPU_IS_EMT(pVCpu) true
441#elif defined(IN_RING0)
442# define VMCPU_IS_EMT(pVCpu) fixme - need to call HWACCM I think... /** @todo SMP */
443#else
444/** @todo need to rework this macro for the case of multiple emulation threads for SMP */
445# define VMCPU_IS_EMT(pVCpu) ((pVCpu)->hNativeThread == RTThreadNativeSelf())
446#endif
447
448/** @def VM_ASSERT_EMT
449 * Asserts that the current thread IS the emulation thread (EMT).
450 */
451#ifdef IN_RC
452# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
453#elif defined(IN_RING0)
454# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
455#else
456# define VM_ASSERT_EMT(pVM) \
457 AssertMsg(VM_IS_EMT(pVM), \
458 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
459#endif
460
461/** @def VMCPU_ASSERT_EMT
462 * Asserts that the current thread IS the emulation thread (EMT) of the
463 * specified virtual CPU.
464 */
465#ifdef IN_RC
466# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
467#elif defined(IN_RING0)
468# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
469#else
470# define VMCPU_ASSERT_EMT(pVCpu) \
471 AssertMsg(VMCPU_IS_EMT(pVCpu), \
472 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
473 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
474#endif
475
476/** @def VM_ASSERT_EMT_RETURN
477 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
478 */
479#ifdef IN_RC
480# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
481#elif defined(IN_RING0)
482# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
483#else
484# define VM_ASSERT_EMT_RETURN(pVM, rc) \
485 AssertMsgReturn(VM_IS_EMT(pVM), \
486 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
487 (rc))
488#endif
489
490/** @def VMCPU_ASSERT_EMT_RETURN
491 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
492 */
493#ifdef IN_RC
494# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
495#elif defined(IN_RING0)
496# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
497#else
498# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
499 AssertMsg(VMCPU_IS_EMT(pVCpu), \
500 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
501 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
502 (rc))
503#endif
504
505
506/**
507 * Asserts that the current thread is NOT the emulation thread.
508 */
509#define VM_ASSERT_OTHER_THREAD(pVM) \
510 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
511
512
513/** @def VM_ASSERT_STATE_RETURN
514 * Asserts a certain VM state.
515 */
516#define VM_ASSERT_STATE(pVM, _enmState) \
517 AssertMsg((pVM)->enmVMState == (_enmState), \
518 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)))
519
520/** @def VM_ASSERT_STATE_RETURN
521 * Asserts a certain VM state and returns if it doesn't match.
522 */
523#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
524 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
525 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)), \
526 (rc))
527
528/** @def VM_ASSERT_VALID_EXT_RETURN
529 * Asserts a the VM handle is valid for external access, i.e. not being
530 * destroy or terminated.
531 */
532#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
533 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
534 && (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
535 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
536 ? VMGetStateName(pVM->enmVMState) : ""), \
537 (rc))
538
539/** @def VMCPU_ASSERT_VALID_EXT_RETURN
540 * Asserts a the VMCPU handle is valid for external access, i.e. not being
541 * destroy or terminated.
542 */
543#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
544 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
545 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
546 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
547 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
548 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
549 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
550 (rc))
551
552
553/** This is the VM structure.
554 *
555 * It contains (nearly?) all the VM data which have to be available in all
556 * contexts. Even if it contains all the data the idea is to use APIs not
557 * to modify all the members all around the place. Therefore we make use of
558 * unions to hide everything which isn't local to the current source module.
559 * This means we'll have to pay a little bit of attention when adding new
560 * members to structures in the unions and make sure to keep the padding sizes
561 * up to date.
562 *
563 * Run tstVMStructSize after update!
564 */
565typedef struct VM
566{
567 /** The state of the VM.
568 * This field is read only to everyone except the VM and EM. */
569 VMSTATE enmVMState;
570 /** Forced action flags.
571 * See the VM_FF_* \#defines. Updated atomically.
572 */
573 volatile uint32_t fGlobalForcedActions;
574 /** Pointer to the array of page descriptors for the VM structure allocation. */
575 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
576 /** Session handle. For use when calling SUPR0 APIs. */
577 PSUPDRVSESSION pSession;
578 /** Pointer to the ring-3 VM structure. */
579 PUVM pUVM;
580 /** Ring-3 Host Context VM Pointer. */
581 R3PTRTYPE(struct VM *) pVMR3;
582 /** Ring-0 Host Context VM Pointer. */
583 R0PTRTYPE(struct VM *) pVMR0;
584 /** Raw-mode Context VM Pointer. */
585 RCPTRTYPE(struct VM *) pVMRC;
586
587 /** The GVM VM handle. Only the GVM should modify this field. */
588 uint32_t hSelf;
589 /** Number of virtual CPUs. */
590 uint32_t cCPUs;
591
592 /** Size of the VM structure including the VMCPU array. */
593 uint32_t cbSelf;
594
595 /** Offset to the VMCPU array starting from beginning of this structure. */
596 uint32_t offVMCPU;
597
598 /** Reserved; alignment. */
599 uint32_t u32Reserved[6];
600
601 /** @name Public VMM Switcher APIs
602 * @{ */
603 /**
604 * Assembly switch entry point for returning to host context.
605 * This function will clean up the stack frame.
606 *
607 * @param eax The return code, register.
608 * @param Ctx The guest core context.
609 * @remark Assume interrupts disabled.
610 */
611 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
612
613 /**
614 * Assembly switch entry point for returning to host context.
615 *
616 * This is an alternative entry point which we'll be using when the we have the
617 * hypervisor context and need to save that before going to the host.
618 *
619 * This is typically useful when abandoning the hypervisor because of a trap
620 * and want the trap state to be saved.
621 *
622 * @param eax The return code, register.
623 * @param ecx Pointer to the hypervisor core context, register.
624 * @remark Assume interrupts disabled.
625 */
626 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
627
628 /**
629 * Assembly switch entry point for returning to host context.
630 *
631 * This is an alternative to the two *Ctx APIs and implies that the context has already
632 * been saved, or that it's just a brief return to HC and that the caller intends to resume
633 * whatever it is doing upon 'return' from this call.
634 *
635 * @param eax The return code, register.
636 * @remark Assume interrupts disabled.
637 */
638 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
639 /** @} */
640
641
642 /** @name Various VM data owned by VM.
643 * @{ */
644 RTTHREAD uPadding1;
645 /** The native handle of ThreadEMT. Getting the native handle
646 * is generally faster than getting the IPRT one (except on OS/2 :-). */
647 RTNATIVETHREAD uPadding2;
648 /** @} */
649
650
651 /** @name Various items that are frequently accessed.
652 * @{ */
653 /** Raw ring-3 indicator. */
654 bool fRawR3Enabled;
655 /** Raw ring-0 indicator. */
656 bool fRawR0Enabled;
657 /** PATM enabled flag.
658 * This is placed here for performance reasons. */
659 bool fPATMEnabled;
660 /** CSAM enabled flag.
661 * This is placed here for performance reasons. */
662 bool fCSAMEnabled;
663 /** Hardware VM support is available and enabled.
664 * This is placed here for performance reasons. */
665 bool fHWACCMEnabled;
666 /** Hardware VM support is required and non-optional.
667 * This is initialized together with the rest of the VM structure. */
668 bool fHwVirtExtForced;
669 /** PARAV enabled flag. */
670 bool fPARAVEnabled;
671 /** @} */
672
673
674 /* padding to make gnuc put the StatQemuToGC where msc does. */
675#if HC_ARCH_BITS == 32
676 uint32_t padding0;
677#endif
678
679 /** Profiling the total time from Qemu to GC. */
680 STAMPROFILEADV StatTotalQemuToGC;
681 /** Profiling the total time from GC to Qemu. */
682 STAMPROFILEADV StatTotalGCToQemu;
683 /** Profiling the total time spent in GC. */
684 STAMPROFILEADV StatTotalInGC;
685 /** Profiling the total time spent not in Qemu. */
686 STAMPROFILEADV StatTotalInQemu;
687 /** Profiling the VMMSwitcher code for going to GC. */
688 STAMPROFILEADV StatSwitcherToGC;
689 /** Profiling the VMMSwitcher code for going to HC. */
690 STAMPROFILEADV StatSwitcherToHC;
691 STAMPROFILEADV StatSwitcherSaveRegs;
692 STAMPROFILEADV StatSwitcherSysEnter;
693 STAMPROFILEADV StatSwitcherDebug;
694 STAMPROFILEADV StatSwitcherCR0;
695 STAMPROFILEADV StatSwitcherCR4;
696 STAMPROFILEADV StatSwitcherJmpCR3;
697 STAMPROFILEADV StatSwitcherRstrRegs;
698 STAMPROFILEADV StatSwitcherLgdt;
699 STAMPROFILEADV StatSwitcherLidt;
700 STAMPROFILEADV StatSwitcherLldt;
701 STAMPROFILEADV StatSwitcherTSS;
702
703/** @todo Realign everything on 64 byte boundaries to better match the
704 * cache-line size. */
705 /* padding - the unions must be aligned on 32 bytes boundraries. */
706 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
707
708 /** CPUM part. */
709 union
710 {
711#ifdef ___CPUMInternal_h
712 struct CPUM s;
713#endif
714 char padding[2048]; /* multiple of 32 */
715 } cpum;
716
717 /** VMM part. */
718 union
719 {
720#ifdef ___VMMInternal_h
721 struct VMM s;
722#endif
723 char padding[1600]; /* multiple of 32 */
724 } vmm;
725
726 /** PGM part. */
727 union
728 {
729#ifdef ___PGMInternal_h
730 struct PGM s;
731#endif
732 char padding[16*1024]; /* multiple of 32 */
733 } pgm;
734
735 /** HWACCM part. */
736 union
737 {
738#ifdef ___HWACCMInternal_h
739 struct HWACCM s;
740#endif
741 char padding[512]; /* multiple of 32 */
742 } hwaccm;
743
744 /** TRPM part. */
745 union
746 {
747#ifdef ___TRPMInternal_h
748 struct TRPM s;
749#endif
750 char padding[5344]; /* multiple of 32 */
751 } trpm;
752
753 /** SELM part. */
754 union
755 {
756#ifdef ___SELMInternal_h
757 struct SELM s;
758#endif
759 char padding[544]; /* multiple of 32 */
760 } selm;
761
762 /** MM part. */
763 union
764 {
765#ifdef ___MMInternal_h
766 struct MM s;
767#endif
768 char padding[192]; /* multiple of 32 */
769 } mm;
770
771 /** CFGM part. */
772 union
773 {
774#ifdef ___CFGMInternal_h
775 struct CFGM s;
776#endif
777 char padding[32]; /* multiple of 32 */
778 } cfgm;
779
780 /** PDM part. */
781 union
782 {
783#ifdef ___PDMInternal_h
784 struct PDM s;
785#endif
786 char padding[1824]; /* multiple of 32 */
787 } pdm;
788
789 /** IOM part. */
790 union
791 {
792#ifdef ___IOMInternal_h
793 struct IOM s;
794#endif
795 char padding[4544]; /* multiple of 32 */
796 } iom;
797
798 /** PATM part. */
799 union
800 {
801#ifdef ___PATMInternal_h
802 struct PATM s;
803#endif
804 char padding[768]; /* multiple of 32 */
805 } patm;
806
807 /** CSAM part. */
808 union
809 {
810#ifdef ___CSAMInternal_h
811 struct CSAM s;
812#endif
813 char padding[3328]; /* multiple of 32 */
814 } csam;
815
816 /** PARAV part. */
817 union
818 {
819#ifdef ___PARAVInternal_h
820 struct PARAV s;
821#endif
822 char padding[128];
823 } parav;
824
825 /** EM part. */
826 union
827 {
828#ifdef ___EMInternal_h
829 struct EM s;
830#endif
831 char padding[256]; /* multiple of 32 */
832 } em;
833
834 /** TM part. */
835 union
836 {
837#ifdef ___TMInternal_h
838 struct TM s;
839#endif
840 char padding[1536]; /* multiple of 32 */
841 } tm;
842
843 /** DBGF part. */
844 union
845 {
846#ifdef ___DBGFInternal_h
847 struct DBGF s;
848#endif
849 char padding[2368]; /* multiple of 32 */
850 } dbgf;
851
852 /** SSM part. */
853 union
854 {
855#ifdef ___SSMInternal_h
856 struct SSM s;
857#endif
858 char padding[32]; /* multiple of 32 */
859 } ssm;
860
861 /** VM part. */
862 union
863 {
864#ifdef ___VMInternal_h
865 struct VMINT s;
866#endif
867 char padding[768]; /* multiple of 32 */
868 } vm;
869
870 /** REM part. */
871 union
872 {
873#ifdef ___REMInternal_h
874 struct REM s;
875#endif
876
877/** @def VM_REM_SIZE
878 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
879#if GC_ARCH_BITS == 32
880# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10800 : 0x10800)
881#else
882# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10900 : 0x10900)
883#endif
884 char padding[VM_REM_SIZE]; /* multiple of 32 */
885 } rem;
886
887 /** Padding for aligning the cpu array on a 64 byte boundrary. */
888 uint32_t u32Reserved2[8];
889
890 /** VMCPU array for the configured number of virtual CPUs.
891 * Must be aligned on a 64-byte boundrary. */
892 VMCPU aCpus[1];
893} VM;
894
895/** Pointer to a VM. */
896#ifndef ___VBox_types_h
897typedef struct VM *PVM;
898#endif
899
900
901#ifdef IN_RC
902__BEGIN_DECLS
903
904/** The VM structure.
905 * This is imported from the VMMGCBuiltin module, i.e. it's a one
906 * of those magic globals which we should avoid using.
907 */
908extern DECLIMPORT(VM) g_VM;
909
910__END_DECLS
911#endif
912
913/** @} */
914
915#endif
916
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette