VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 19403

Last change on this file since 19403 was 19403, checked in by vboxsync, 16 years ago

VBox/parma.h,VMM: VMCPU_MAX_CPU_COUNT & VMM_MAX_CPUS => VMM_MAX_CPU_COUNT, added VMM_MIN_CPU_COUNT for schema future replacement dropping a hint about these constants Main.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.3 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39#include <VBox/vmm.h>
40
41
42/** @defgroup grp_vm The Virtual Machine
43 * @{
44 */
45
46/**
47 * The state of a virtual CPU.
48 *
49 * The VM running states are a sub-states of the VMSTATE_RUNNING state. While
50 * VMCPUSTATE_NOT_RUNNING is a place holder for the other VM states.
51 */
52typedef enum VMCPUSTATE
53{
54 /** The customary invalid zero. */
55 VMCPUSTATE_INVALID = 0,
56
57 /** Running guest code (VM running). */
58 VMCPUSTATE_RUN_EXEC,
59 /** Running guest code in the recompiler (VM running). */
60 VMCPUSTATE_RUN_EXEC_REM,
61 /** Halted (VM running). */
62 VMCPUSTATE_RUN_HALTED,
63 /** All the other bits we do while running a VM (VM running). */
64 VMCPUSTATE_RUN_MISC,
65 /** VM not running, we're servicing requests or whatever. */
66 VMCPUSTATE_NOT_RUNNING,
67 /** The end of valid virtual CPU states. */
68 VMCPUSTATE_END,
69
70 /** Ensure 32-bit type. */
71 VMCPUSTATE_32BIT_HACK = 0x7fffffff
72} VMCPUSTATE;
73
74
75/**
76 * Per virtual CPU data.
77 */
78typedef struct VMCPU
79{
80 /** Per CPU forced action.
81 * See the VMCPU_FF_* \#defines. Updated atomically. */
82 uint32_t volatile fLocalForcedActions;
83 /** The CPU state. */
84 VMCPUSTATE volatile enmState;
85
86 /** Pointer to the ring-3 UVMCPU structure. */
87 PUVMCPU pUVCpu;
88 /** Ring-3 Host Context VM Pointer. */
89 PVMR3 pVMR3;
90 /** Ring-0 Host Context VM Pointer. */
91 PVMR0 pVMR0;
92 /** Raw-mode Context VM Pointer. */
93 PVMRC pVMRC;
94 /** The CPU ID.
95 * This is the index into the VM::aCpu array. */
96 VMCPUID idCpu;
97 /** The native thread handle. */
98 RTNATIVETHREAD hNativeThread;
99
100 /** Align the next bit on a 64-byte boundary.
101 *
102 * @remarks The aligments of the members that are larger than 48 bytes should be
103 * 64-byte for cache line reasons. structs containing small amounts of
104 * data could be lumped together at the end with a < 64 byte padding
105 * following it (to grow into and align the struct size).
106 * */
107 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 8 : 4];
108
109 /** CPUM part. */
110 union
111 {
112#ifdef ___CPUMInternal_h
113 struct CPUMCPU s;
114#endif
115 char padding[4096]; /* multiple of 64 */
116 } cpum;
117
118 /** PGM part. */
119 union
120 {
121#ifdef ___PGMInternal_h
122 struct PGMCPU s;
123#endif
124 char padding[32*1024]; /* multiple of 64 */
125 } pgm;
126
127 /** HWACCM part. */
128 union
129 {
130#ifdef ___HWACCMInternal_h
131 struct HWACCMCPU s;
132#endif
133 char padding[5120]; /* multiple of 64 */
134 } hwaccm;
135
136 /** EM part. */
137 union
138 {
139#ifdef ___EMInternal_h
140 struct EMCPU s;
141#endif
142 char padding[2048]; /* multiple of 64 */
143 } em;
144
145 /** TRPM part. */
146 union
147 {
148#ifdef ___TRPMInternal_h
149 struct TRPMCPU s;
150#endif
151 char padding[128]; /* multiple of 64 */
152 } trpm;
153
154 /** TM part. */
155 union
156 {
157#ifdef ___TMInternal_h
158 struct TMCPU s;
159#endif
160 char padding[64]; /* multiple of 64 */
161 } tm;
162
163 /** VMM part.
164 * @todo Combine this with other tiny structures. */
165 union
166 {
167#ifdef ___VMMInternal_h
168 struct VMMCPU s;
169#endif
170 char padding[64]; /* multiple of 64 */
171 } vmm;
172
173 /** DBGF part.
174 * @todo Combine this with other tiny structures. */
175 union
176 {
177#ifdef ___DBGFInternal_h
178 struct DBGFCPU s;
179#endif
180 uint8_t padding[64]; /* multiple of 64 */
181 } dbgf;
182
183} VMCPU;
184
185/** Pointer to a VMCPU. */
186#ifndef ___VBox_types_h
187typedef struct VMCPU *PVMCPU;
188#endif
189
190/** The name of the Guest Context VMM Core module. */
191#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
192/** The name of the Ring 0 Context VMM Core module. */
193#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
194
195/** VM Forced Action Flags.
196 *
197 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
198 * action mask of a VM.
199 *
200 * @{
201 */
202/** This action forces the VM to schedule and run pending timer (TM). */
203#define VM_FF_TIMER RT_BIT_32(2)
204/** PDM Queues are pending. */
205#define VM_FF_PDM_QUEUES RT_BIT_32(3)
206/** PDM DMA transfers are pending. */
207#define VM_FF_PDM_DMA RT_BIT_32(4)
208/** PDM critical section unlocking is pending, process promptly upon return to R3. */
209#define VM_FF_PDM_CRITSECT RT_BIT_32(5)
210/** This action forces the VM to call DBGF so DBGF can service debugger
211 * requests in the emulation thread.
212 * This action flag stays asserted till DBGF clears it.*/
213#define VM_FF_DBGF RT_BIT_32(8)
214/** This action forces the VM to service pending requests from other
215 * thread or requests which must be executed in another context. */
216#define VM_FF_REQUEST RT_BIT_32(9)
217/** Terminate the VM immediately. */
218#define VM_FF_TERMINATE RT_BIT_32(10)
219/** Reset the VM. (postponed) */
220#define VM_FF_RESET RT_BIT_32(11)
221/** PGM needs to allocate handy pages. */
222#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
223/** PGM is out of memory.
224 * Abandon all loops and code paths which can be resumed and get up to the EM
225 * loops. */
226#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
227/** REM needs to be informed about handler changes. */
228#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(29)
229/** Suspend the VM - debug only. */
230#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
231
232
233/** This action forces the VM to service check and pending interrups on the APIC. */
234#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
235/** This action forces the VM to service check and pending interrups on the PIC. */
236#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
237/** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */
238#define VMCPU_FF_TIMER RT_BIT_32(2)
239/** This action forces the VM to service pending requests from other
240 * thread or requests which must be executed in another context. */
241#define VMCPU_FF_REQUEST RT_BIT_32(9)
242/** This action forces the VM to resync the page tables before going
243 * back to execute guest code. (GLOBAL FLUSH) */
244#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
245/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
246 * (NON-GLOBAL FLUSH) */
247#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
248/** Check the interupt and trap gates */
249#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
250/** Check Guest's TSS ring 0 stack */
251#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
252/** Check Guest's GDT table */
253#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
254/** Check Guest's LDT table */
255#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
256/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
257#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
258/** Check for pending TLB shootdown actions. */
259#define VMCPU_FF_TLB_SHOOTDOWN RT_BIT_32(25)
260/** CSAM needs to scan the page that's being executed */
261#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
262/** CSAM needs to do some homework. */
263#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
264/** Force return to Ring-3. */
265#define VMCPU_FF_TO_R3 RT_BIT_32(28)
266
267/** Externally VM forced actions. Used to quit the idle/wait loop. */
268#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
269/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
270#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
271
272/** Externally forced VM actions. Used to quit the idle/wait loop. */
273#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
274/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
275#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST)
276
277/** High priority VM pre-execution actions. */
278#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
279 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
280/** High priority VMCPU pre-execution actions. */
281#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
282 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)
283
284/** High priority VM pre raw-mode execution mask. */
285#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
286/** High priority VMCPU pre raw-mode execution mask. */
287#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT \
288 | VMCPU_FF_INHIBIT_INTERRUPTS)
289
290/** High priority post-execution actions. */
291#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT | VM_FF_PGM_NO_MEMORY)
292/** High priority post-execution actions. */
293#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_CSAM_PENDING_ACTION)
294
295/** Normal priority VM post-execution actions. */
296#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)
297/** Normal priority VMCPU post-execution actions. */
298#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
299
300/** Normal priority VM actions. */
301#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
302/** Normal priority VMCPU actions. */
303#define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST)
304
305/** Flags to clear before resuming guest execution. */
306#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
307
308/** VM Flags that cause the HWACCM loops to go back to ring-3. */
309#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
310/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
311#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3)
312
313/** All the forced flags. */
314#define VM_FF_ALL_MASK (~0U)
315/** All the forced VM flags. */
316#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PDM_CRITSECT) | VM_FF_PGM_NO_MEMORY)
317/** All the forced VMCPU flags. */
318#define VMCPU_FF_ALL_BUT_RAW_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION))
319
320/** @} */
321
322/** @def VM_FF_SET
323 * Sets a force action flag.
324 *
325 * @param pVM VM Handle.
326 * @param fFlag The flag to set.
327 */
328#if 1
329# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
330#else
331# define VM_FF_SET(pVM, fFlag) \
332 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
333 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
334 } while (0)
335#endif
336
337/** @def VMCPU_FF_SET
338 * Sets a force action flag for the given VCPU.
339 *
340 * @param pVCpu VMCPU Handle.
341 * @param fFlag The flag to set.
342 */
343#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
344
345/** @def VM_FF_CLEAR
346 * Clears a force action flag.
347 *
348 * @param pVM VM Handle.
349 * @param fFlag The flag to clear.
350 */
351#if 1
352# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
353#else
354# define VM_FF_CLEAR(pVM, fFlag) \
355 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
356 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
357 } while (0)
358#endif
359
360/** @def VMCPU_FF_CLEAR
361 * Clears a force action flag for the given VCPU.
362 *
363 * @param pVCpu VMCPU Handle.
364 * @param fFlag The flag to clear.
365 */
366#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
367
368/** @def VM_FF_ISSET
369 * Checks if a force action flag is set.
370 *
371 * @param pVM VM Handle.
372 * @param fFlag The flag to check.
373 */
374#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
375
376/** @def VMCPU_FF_ISSET
377 * Checks if a force action flag is set for the given VCPU.
378 *
379 * @param pVCpu VMCPU Handle.
380 * @param fFlag The flag to check.
381 */
382#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
383
384/** @def VM_FF_ISPENDING
385 * Checks if one or more force action in the specified set is pending.
386 *
387 * @param pVM VM Handle.
388 * @param fFlags The flags to check for.
389 */
390#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
391
392/** @def VMCPU_FF_ISPENDING
393 * Checks if one or more force action in the specified set is pending for the given VCPU.
394 *
395 * @param pVCpu VMCPU Handle.
396 * @param fFlags The flags to check for.
397 */
398#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
399
400/** @def VM_FF_ISPENDING
401 * Checks if one or more force action in the specified set is pending while one
402 * or more other ones are not.
403 *
404 * @param pVM VM Handle.
405 * @param fFlags The flags to check for.
406 * @param fExcpt The flags that should not be set.
407 */
408#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
409
410/** @def VMCPU_FF_IS_PENDING_EXCEPT
411 * Checks if one or more force action in the specified set is pending for the given
412 * VCPU while one or more other ones are not.
413 *
414 * @param pVCpu VMCPU Handle.
415 * @param fFlags The flags to check for.
416 * @param fExcpt The flags that should not be set.
417 */
418#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
419
420/** @def VM_IS_EMT
421 * Checks if the current thread is the emulation thread (EMT).
422 *
423 * @remark The ring-0 variation will need attention if we expand the ring-0
424 * code to let threads other than EMT mess around with the VM.
425 */
426#ifdef IN_RC
427# define VM_IS_EMT(pVM) true
428#else
429# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
430#endif
431
432/** @def VMCPU_IS_EMT
433 * Checks if the current thread is the emulation thread (EMT) for the specified
434 * virtual CPU.
435 */
436#ifdef IN_RC
437# define VMCPU_IS_EMT(pVCpu) true
438#else
439# define VMCPU_IS_EMT(pVCpu) (pVCpu && (pVCpu == VMMGetCpu(pVCpu->CTX_SUFF(pVM))))
440#endif
441
442/** @def VM_ASSERT_EMT
443 * Asserts that the current thread IS the emulation thread (EMT).
444 */
445#ifdef IN_RC
446# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
447#elif defined(IN_RING0)
448# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
449#else
450# define VM_ASSERT_EMT(pVM) \
451 AssertMsg(VM_IS_EMT(pVM), \
452 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
453#endif
454
455/** @def VMCPU_ASSERT_EMT
456 * Asserts that the current thread IS the emulation thread (EMT) of the
457 * specified virtual CPU.
458 */
459#ifdef IN_RC
460# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
461#elif defined(IN_RING0)
462# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
463#else
464# define VMCPU_ASSERT_EMT(pVCpu) \
465 AssertMsg(VMCPU_IS_EMT(pVCpu), \
466 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
467 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
468#endif
469
470/** @def VM_ASSERT_EMT_RETURN
471 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
472 */
473#ifdef IN_RC
474# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
475#elif defined(IN_RING0)
476# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
477#else
478# define VM_ASSERT_EMT_RETURN(pVM, rc) \
479 AssertMsgReturn(VM_IS_EMT(pVM), \
480 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
481 (rc))
482#endif
483
484/** @def VMCPU_ASSERT_EMT_RETURN
485 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
486 */
487#ifdef IN_RC
488# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
489#elif defined(IN_RING0)
490# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
491#else
492# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
493 AssertMsg(VMCPU_IS_EMT(pVCpu), \
494 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
495 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
496 (rc))
497#endif
498
499
500/**
501 * Asserts that the current thread is NOT the emulation thread.
502 */
503#define VM_ASSERT_OTHER_THREAD(pVM) \
504 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
505
506
507/** @def VM_ASSERT_STATE_RETURN
508 * Asserts a certain VM state.
509 */
510#define VM_ASSERT_STATE(pVM, _enmState) \
511 AssertMsg((pVM)->enmVMState == (_enmState), \
512 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)))
513
514/** @def VM_ASSERT_STATE_RETURN
515 * Asserts a certain VM state and returns if it doesn't match.
516 */
517#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
518 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
519 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)), \
520 (rc))
521
522/** @def VM_ASSERT_VALID_EXT_RETURN
523 * Asserts a the VM handle is valid for external access, i.e. not being
524 * destroy or terminated.
525 */
526#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
527 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
528 && (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
529 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
530 ? VMGetStateName(pVM->enmVMState) : ""), \
531 (rc))
532
533/** @def VMCPU_ASSERT_VALID_EXT_RETURN
534 * Asserts a the VMCPU handle is valid for external access, i.e. not being
535 * destroy or terminated.
536 */
537#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
538 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
539 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
540 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
541 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
542 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
543 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
544 (rc))
545
546
547/** This is the VM structure.
548 *
549 * It contains (nearly?) all the VM data which have to be available in all
550 * contexts. Even if it contains all the data the idea is to use APIs not
551 * to modify all the members all around the place. Therefore we make use of
552 * unions to hide everything which isn't local to the current source module.
553 * This means we'll have to pay a little bit of attention when adding new
554 * members to structures in the unions and make sure to keep the padding sizes
555 * up to date.
556 *
557 * Run tstVMStructSize after update!
558 */
559typedef struct VM
560{
561 /** The state of the VM.
562 * This field is read only to everyone except the VM and EM. */
563 VMSTATE enmVMState;
564 /** Forced action flags.
565 * See the VM_FF_* \#defines. Updated atomically.
566 */
567 volatile uint32_t fGlobalForcedActions;
568 /** Pointer to the array of page descriptors for the VM structure allocation. */
569 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
570 /** Session handle. For use when calling SUPR0 APIs. */
571 PSUPDRVSESSION pSession;
572 /** Pointer to the ring-3 VM structure. */
573 PUVM pUVM;
574 /** Ring-3 Host Context VM Pointer. */
575 R3PTRTYPE(struct VM *) pVMR3;
576 /** Ring-0 Host Context VM Pointer. */
577 R0PTRTYPE(struct VM *) pVMR0;
578 /** Raw-mode Context VM Pointer. */
579 RCPTRTYPE(struct VM *) pVMRC;
580
581 /** The GVM VM handle. Only the GVM should modify this field. */
582 uint32_t hSelf;
583 /** Number of virtual CPUs. */
584 uint32_t cCPUs;
585
586 /** Size of the VM structure including the VMCPU array. */
587 uint32_t cbSelf;
588
589 /** Offset to the VMCPU array starting from beginning of this structure. */
590 uint32_t offVMCPU;
591
592 /** Reserved; alignment. */
593 uint32_t u32Reserved[6];
594
595 /** @name Public VMM Switcher APIs
596 * @{ */
597 /**
598 * Assembly switch entry point for returning to host context.
599 * This function will clean up the stack frame.
600 *
601 * @param eax The return code, register.
602 * @param Ctx The guest core context.
603 * @remark Assume interrupts disabled.
604 */
605 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
606
607 /**
608 * Assembly switch entry point for returning to host context.
609 *
610 * This is an alternative entry point which we'll be using when the we have the
611 * hypervisor context and need to save that before going to the host.
612 *
613 * This is typically useful when abandoning the hypervisor because of a trap
614 * and want the trap state to be saved.
615 *
616 * @param eax The return code, register.
617 * @param ecx Pointer to the hypervisor core context, register.
618 * @remark Assume interrupts disabled.
619 */
620 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
621
622 /**
623 * Assembly switch entry point for returning to host context.
624 *
625 * This is an alternative to the two *Ctx APIs and implies that the context has already
626 * been saved, or that it's just a brief return to HC and that the caller intends to resume
627 * whatever it is doing upon 'return' from this call.
628 *
629 * @param eax The return code, register.
630 * @remark Assume interrupts disabled.
631 */
632 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
633 /** @} */
634
635
636 /** @name Various VM data owned by VM.
637 * @{ */
638 RTTHREAD uPadding1;
639 /** The native handle of ThreadEMT. Getting the native handle
640 * is generally faster than getting the IPRT one (except on OS/2 :-). */
641 RTNATIVETHREAD uPadding2;
642 /** @} */
643
644
645 /** @name Various items that are frequently accessed.
646 * @{ */
647 /** Raw ring-3 indicator. */
648 bool fRawR3Enabled;
649 /** Raw ring-0 indicator. */
650 bool fRawR0Enabled;
651 /** PATM enabled flag.
652 * This is placed here for performance reasons. */
653 bool fPATMEnabled;
654 /** CSAM enabled flag.
655 * This is placed here for performance reasons. */
656 bool fCSAMEnabled;
657 /** Hardware VM support is available and enabled.
658 * This is placed here for performance reasons. */
659 bool fHWACCMEnabled;
660 /** Hardware VM support is required and non-optional.
661 * This is initialized together with the rest of the VM structure. */
662 bool fHwVirtExtForced;
663 /** PARAV enabled flag. */
664 bool fPARAVEnabled;
665 /** @} */
666
667
668 /* padding to make gnuc put the StatQemuToGC where msc does. */
669#if HC_ARCH_BITS == 32
670 uint32_t padding0;
671#endif
672
673 /** Profiling the total time from Qemu to GC. */
674 STAMPROFILEADV StatTotalQemuToGC;
675 /** Profiling the total time from GC to Qemu. */
676 STAMPROFILEADV StatTotalGCToQemu;
677 /** Profiling the total time spent in GC. */
678 STAMPROFILEADV StatTotalInGC;
679 /** Profiling the total time spent not in Qemu. */
680 STAMPROFILEADV StatTotalInQemu;
681 /** Profiling the VMMSwitcher code for going to GC. */
682 STAMPROFILEADV StatSwitcherToGC;
683 /** Profiling the VMMSwitcher code for going to HC. */
684 STAMPROFILEADV StatSwitcherToHC;
685 STAMPROFILEADV StatSwitcherSaveRegs;
686 STAMPROFILEADV StatSwitcherSysEnter;
687 STAMPROFILEADV StatSwitcherDebug;
688 STAMPROFILEADV StatSwitcherCR0;
689 STAMPROFILEADV StatSwitcherCR4;
690 STAMPROFILEADV StatSwitcherJmpCR3;
691 STAMPROFILEADV StatSwitcherRstrRegs;
692 STAMPROFILEADV StatSwitcherLgdt;
693 STAMPROFILEADV StatSwitcherLidt;
694 STAMPROFILEADV StatSwitcherLldt;
695 STAMPROFILEADV StatSwitcherTSS;
696
697/** @todo Realign everything on 64 byte boundaries to better match the
698 * cache-line size. */
699 /* padding - the unions must be aligned on 32 bytes boundraries. */
700 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
701
702 /** CPUM part. */
703 union
704 {
705#ifdef ___CPUMInternal_h
706 struct CPUM s;
707#endif
708 char padding[2048]; /* multiple of 32 */
709 } cpum;
710
711 /** VMM part. */
712 union
713 {
714#ifdef ___VMMInternal_h
715 struct VMM s;
716#endif
717 char padding[1600]; /* multiple of 32 */
718 } vmm;
719
720 /** PGM part. */
721 union
722 {
723#ifdef ___PGMInternal_h
724 struct PGM s;
725#endif
726 char padding[16*1024]; /* multiple of 32 */
727 } pgm;
728
729 /** HWACCM part. */
730 union
731 {
732#ifdef ___HWACCMInternal_h
733 struct HWACCM s;
734#endif
735 char padding[512]; /* multiple of 32 */
736 } hwaccm;
737
738 /** TRPM part. */
739 union
740 {
741#ifdef ___TRPMInternal_h
742 struct TRPM s;
743#endif
744 char padding[5344]; /* multiple of 32 */
745 } trpm;
746
747 /** SELM part. */
748 union
749 {
750#ifdef ___SELMInternal_h
751 struct SELM s;
752#endif
753 char padding[544]; /* multiple of 32 */
754 } selm;
755
756 /** MM part. */
757 union
758 {
759#ifdef ___MMInternal_h
760 struct MM s;
761#endif
762 char padding[192]; /* multiple of 32 */
763 } mm;
764
765 /** CFGM part. */
766 union
767 {
768#ifdef ___CFGMInternal_h
769 struct CFGM s;
770#endif
771 char padding[32]; /* multiple of 32 */
772 } cfgm;
773
774 /** PDM part. */
775 union
776 {
777#ifdef ___PDMInternal_h
778 struct PDM s;
779#endif
780 char padding[1824]; /* multiple of 32 */
781 } pdm;
782
783 /** IOM part. */
784 union
785 {
786#ifdef ___IOMInternal_h
787 struct IOM s;
788#endif
789 char padding[4544]; /* multiple of 32 */
790 } iom;
791
792 /** PATM part. */
793 union
794 {
795#ifdef ___PATMInternal_h
796 struct PATM s;
797#endif
798 char padding[768]; /* multiple of 32 */
799 } patm;
800
801 /** CSAM part. */
802 union
803 {
804#ifdef ___CSAMInternal_h
805 struct CSAM s;
806#endif
807 char padding[3328]; /* multiple of 32 */
808 } csam;
809
810 /** PARAV part. */
811 union
812 {
813#ifdef ___PARAVInternal_h
814 struct PARAV s;
815#endif
816 char padding[128];
817 } parav;
818
819 /** EM part. */
820 union
821 {
822#ifdef ___EMInternal_h
823 struct EM s;
824#endif
825 char padding[256]; /* multiple of 32 */
826 } em;
827
828 /** TM part. */
829 union
830 {
831#ifdef ___TMInternal_h
832 struct TM s;
833#endif
834 char padding[1536]; /* multiple of 32 */
835 } tm;
836
837 /** DBGF part. */
838 union
839 {
840#ifdef ___DBGFInternal_h
841 struct DBGF s;
842#endif
843 char padding[2368]; /* multiple of 32 */
844 } dbgf;
845
846 /** SSM part. */
847 union
848 {
849#ifdef ___SSMInternal_h
850 struct SSM s;
851#endif
852 char padding[32]; /* multiple of 32 */
853 } ssm;
854
855 /** VM part. */
856 union
857 {
858#ifdef ___VMInternal_h
859 struct VMINT s;
860#endif
861 char padding[768]; /* multiple of 32 */
862 } vm;
863
864 /** REM part. */
865 union
866 {
867#ifdef ___REMInternal_h
868 struct REM s;
869#endif
870
871/** @def VM_REM_SIZE
872 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
873#if GC_ARCH_BITS == 32
874# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10800 : 0x10800)
875#else
876# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10900 : 0x10900)
877#endif
878 char padding[VM_REM_SIZE]; /* multiple of 32 */
879 } rem;
880
881 /** Padding for aligning the cpu array on a 64 byte boundrary. */
882 uint32_t u32Reserved2[8];
883
884 /** VMCPU array for the configured number of virtual CPUs.
885 * Must be aligned on a 64-byte boundrary. */
886 VMCPU aCpus[1];
887} VM;
888
889/** Pointer to a VM. */
890#ifndef ___VBox_types_h
891typedef struct VM *PVM;
892#endif
893
894
895#ifdef IN_RC
896__BEGIN_DECLS
897
898/** The VM structure.
899 * This is imported from the VMMGCBuiltin module, i.e. it's a one
900 * of those magic globals which we should avoid using.
901 */
902extern DECLIMPORT(VM) g_VM;
903
904__END_DECLS
905#endif
906
907/** @} */
908
909#endif
910
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette