VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 19326

Last change on this file since 19326 was 19326, checked in by vboxsync, 16 years ago

Started with TLB shootdown.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.8 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39
40
41/** @defgroup grp_vm The Virtual Machine
42 * @{
43 */
44
45/** Maximum number of virtual CPUs per VM. */
46#define VMCPU_MAX_CPU_COUNT 255
47
48/**
49 * The state of a virtual CPU.
50 *
51 * The VM running states are a sub-states of the VMSTATE_RUNNING state. While
52 * VMCPUSTATE_NOT_RUNNING is a place holder for the other VM states.
53 */
54typedef enum VMCPUSTATE
55{
56 /** The customary invalid zero. */
57 VMCPUSTATE_INVALID = 0,
58
59 /** Running guest code (VM running). */
60 VMCPUSTATE_RUN_EXEC,
61 /** Running guest code in the recompiler (VM running). */
62 VMCPUSTATE_RUN_EXEC_REM,
63 /** Halted (VM running). */
64 VMCPUSTATE_RUN_HALTED,
65 /** All the other bits we do while running a VM (VM running). */
66 VMCPUSTATE_RUN_MISC,
67 /** VM not running, we're servicing requests or whatever. */
68 VMCPUSTATE_NOT_RUNNING,
69 /** The end of valid virtual CPU states. */
70 VMCPUSTATE_END,
71
72 /** Ensure 32-bit type. */
73 VMCPUSTATE_32BIT_HACK = 0x7fffffff
74} VMCPUSTATE;
75
76
77/**
78 * Per virtual CPU data.
79 */
80typedef struct VMCPU
81{
82 /** Per CPU forced action.
83 * See the VMCPU_FF_* \#defines. Updated atomically. */
84 uint32_t volatile fLocalForcedActions;
85 /** The CPU state. */
86 VMCPUSTATE volatile enmState;
87
88 /** Pointer to the ring-3 UVMCPU structure. */
89 PUVMCPU pUVCpu;
90 /** Ring-3 Host Context VM Pointer. */
91 PVMR3 pVMR3;
92 /** Ring-0 Host Context VM Pointer. */
93 PVMR0 pVMR0;
94 /** Raw-mode Context VM Pointer. */
95 PVMRC pVMRC;
96 /** The CPU ID.
97 * This is the index into the VM::aCpu array. */
98 VMCPUID idCpu;
99 /** The native thread handle. */
100 RTNATIVETHREAD hNativeThread;
101
102 /** Align the next bit on a 64-byte boundary.
103 *
104 * @remarks The aligments of the members that are larger than 48 bytes should be
105 * 64-byte for cache line reasons. structs containing small amounts of
106 * data could be lumped together at the end with a < 64 byte padding
107 * following it (to grow into and align the struct size).
108 * */
109 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 8 : 4];
110
111 /** CPUM part. */
112 union
113 {
114#ifdef ___CPUMInternal_h
115 struct CPUMCPU s;
116#endif
117 char padding[4096]; /* multiple of 64 */
118 } cpum;
119
120 /** PGM part. */
121 union
122 {
123#ifdef ___PGMInternal_h
124 struct PGMCPU s;
125#endif
126 char padding[32*1024]; /* multiple of 64 */
127 } pgm;
128
129 /** HWACCM part. */
130 union
131 {
132#ifdef ___HWACCMInternal_h
133 struct HWACCMCPU s;
134#endif
135 char padding[5120]; /* multiple of 64 */
136 } hwaccm;
137
138 /** EM part. */
139 union
140 {
141#ifdef ___EMInternal_h
142 struct EMCPU s;
143#endif
144 char padding[2048]; /* multiple of 64 */
145 } em;
146
147 /** TRPM part. */
148 union
149 {
150#ifdef ___TRPMInternal_h
151 struct TRPMCPU s;
152#endif
153 char padding[128]; /* multiple of 64 */
154 } trpm;
155
156 /** TM part. */
157 union
158 {
159#ifdef ___TMInternal_h
160 struct TMCPU s;
161#endif
162 char padding[64]; /* multiple of 64 */
163 } tm;
164
165 /** VMM part.
166 * @todo Combine this with other tiny structures. */
167 union
168 {
169#ifdef ___VMMInternal_h
170 struct VMMCPU s;
171#endif
172 char padding[64]; /* multiple of 64 */
173 } vmm;
174
175 /** DBGF part.
176 * @todo Combine this with other tiny structures. */
177 union
178 {
179#ifdef ___DBGFInternal_h
180 struct DBGFCPU s;
181#endif
182 uint8_t padding[64]; /* multiple of 64 */
183 } dbgf;
184
185} VMCPU;
186
187/** Pointer to a VMCPU. */
188#ifndef ___VBox_types_h
189typedef struct VMCPU *PVMCPU;
190#endif
191
192/** The name of the Guest Context VMM Core module. */
193#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
194/** The name of the Ring 0 Context VMM Core module. */
195#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
196
197/** VM Forced Action Flags.
198 *
199 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
200 * action mask of a VM.
201 *
202 * @{
203 */
204/** This action forces the VM to schedule and run pending timer (TM). */
205#define VM_FF_TIMER RT_BIT_32(2)
206/** PDM Queues are pending. */
207#define VM_FF_PDM_QUEUES RT_BIT_32(3)
208/** PDM DMA transfers are pending. */
209#define VM_FF_PDM_DMA RT_BIT_32(4)
210/** PDM critical section unlocking is pending, process promptly upon return to R3. */
211#define VM_FF_PDM_CRITSECT RT_BIT_32(5)
212/** This action forces the VM to call DBGF so DBGF can service debugger
213 * requests in the emulation thread.
214 * This action flag stays asserted till DBGF clears it.*/
215#define VM_FF_DBGF RT_BIT_32(8)
216/** This action forces the VM to service pending requests from other
217 * thread or requests which must be executed in another context. */
218#define VM_FF_REQUEST RT_BIT_32(9)
219/** Terminate the VM immediately. */
220#define VM_FF_TERMINATE RT_BIT_32(10)
221/** Reset the VM. (postponed) */
222#define VM_FF_RESET RT_BIT_32(11)
223/** PGM needs to allocate handy pages. */
224#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
225/** PGM is out of memory.
226 * Abandon all loops and code paths which can be resumed and get up to the EM
227 * loops. */
228#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
229/** REM needs to be informed about handler changes. */
230#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(29)
231/** Suspend the VM - debug only. */
232#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
233
234
235/** This action forces the VM to service check and pending interrups on the APIC. */
236#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
237/** This action forces the VM to service check and pending interrups on the PIC. */
238#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
239/** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */
240#define VMCPU_FF_TIMER RT_BIT_32(2)
241/** This action forces the VM to service pending requests from other
242 * thread or requests which must be executed in another context. */
243#define VMCPU_FF_REQUEST RT_BIT_32(9)
244/** This action forces the VM to resync the page tables before going
245 * back to execute guest code. (GLOBAL FLUSH) */
246#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
247/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
248 * (NON-GLOBAL FLUSH) */
249#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
250/** Check the interupt and trap gates */
251#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
252/** Check Guest's TSS ring 0 stack */
253#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
254/** Check Guest's GDT table */
255#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
256/** Check Guest's LDT table */
257#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
258/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
259#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
260/** Check for pending TLB shootdown actions. */
261#define VMCPU_FF_TLB_SHOOTDOWN RT_BIT_32(25)
262/** CSAM needs to scan the page that's being executed */
263#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
264/** CSAM needs to do some homework. */
265#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
266/** Force return to Ring-3. */
267#define VMCPU_FF_TO_R3 RT_BIT_32(28)
268
269/** Externally VM forced actions. Used to quit the idle/wait loop. */
270#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
271/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
272#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
273
274/** Externally forced VM actions. Used to quit the idle/wait loop. */
275#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
276/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
277#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST)
278
279/** High priority VM pre-execution actions. */
280#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
281 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
282/** High priority VMCPU pre-execution actions. */
283#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
284 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)
285
286/** High priority VM pre raw-mode execution mask. */
287#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
288/** High priority VMCPU pre raw-mode execution mask. */
289#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT \
290 | VMCPU_FF_INHIBIT_INTERRUPTS)
291
292/** High priority post-execution actions. */
293#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT | VM_FF_PGM_NO_MEMORY)
294/** High priority post-execution actions. */
295#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_CSAM_PENDING_ACTION)
296
297/** Normal priority VM post-execution actions. */
298#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)
299/** Normal priority VMCPU post-execution actions. */
300#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
301
302/** Normal priority VM actions. */
303#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
304/** Normal priority VMCPU actions. */
305#define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST)
306
307/** Flags to clear before resuming guest execution. */
308#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
309
310/** VM Flags that cause the HWACCM loops to go back to ring-3. */
311#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
312/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
313#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3)
314
315/** All the forced flags. */
316#define VM_FF_ALL_MASK (~0U)
317/** All the forced VM flags. */
318#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PDM_CRITSECT) | VM_FF_PGM_NO_MEMORY)
319/** All the forced VMCPU flags. */
320#define VMCPU_FF_ALL_BUT_RAW_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION))
321
322/** @} */
323
324/** @def VM_FF_SET
325 * Sets a force action flag.
326 *
327 * @param pVM VM Handle.
328 * @param fFlag The flag to set.
329 */
330#if 1
331# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
332#else
333# define VM_FF_SET(pVM, fFlag) \
334 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
335 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
336 } while (0)
337#endif
338
339/** @def VMCPU_FF_SET
340 * Sets a force action flag for the given VCPU.
341 *
342 * @param pVCpu VMCPU Handle.
343 * @param fFlag The flag to set.
344 */
345#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
346
347/** @def VM_FF_CLEAR
348 * Clears a force action flag.
349 *
350 * @param pVM VM Handle.
351 * @param fFlag The flag to clear.
352 */
353#if 1
354# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
355#else
356# define VM_FF_CLEAR(pVM, fFlag) \
357 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
358 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
359 } while (0)
360#endif
361
362/** @def VMCPU_FF_CLEAR
363 * Clears a force action flag for the given VCPU.
364 *
365 * @param pVCpu VMCPU Handle.
366 * @param fFlag The flag to clear.
367 */
368#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
369
370/** @def VM_FF_ISSET
371 * Checks if a force action flag is set.
372 *
373 * @param pVM VM Handle.
374 * @param fFlag The flag to check.
375 */
376#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
377
378/** @def VMCPU_FF_ISSET
379 * Checks if a force action flag is set for the given VCPU.
380 *
381 * @param pVCpu VMCPU Handle.
382 * @param fFlag The flag to check.
383 */
384#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
385
386/** @def VM_FF_ISPENDING
387 * Checks if one or more force action in the specified set is pending.
388 *
389 * @param pVM VM Handle.
390 * @param fFlags The flags to check for.
391 */
392#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
393
394/** @def VMCPU_FF_ISPENDING
395 * Checks if one or more force action in the specified set is pending for the given VCPU.
396 *
397 * @param pVCpu VMCPU Handle.
398 * @param fFlags The flags to check for.
399 */
400#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
401
402/** @def VM_FF_ISPENDING
403 * Checks if one or more force action in the specified set is pending while one
404 * or more other ones are not.
405 *
406 * @param pVM VM Handle.
407 * @param fFlags The flags to check for.
408 * @param fExcpt The flags that should not be set.
409 */
410#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
411
412/** @def VMCPU_FF_IS_PENDING_EXCEPT
413 * Checks if one or more force action in the specified set is pending for the given
414 * VCPU while one or more other ones are not.
415 *
416 * @param pVCpu VMCPU Handle.
417 * @param fFlags The flags to check for.
418 * @param fExcpt The flags that should not be set.
419 */
420#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
421
422/** @def VM_IS_EMT
423 * Checks if the current thread is the emulation thread (EMT).
424 *
425 * @remark The ring-0 variation will need attention if we expand the ring-0
426 * code to let threads other than EMT mess around with the VM.
427 */
428#ifdef IN_RC
429# define VM_IS_EMT(pVM) true
430#elif defined(IN_RING0)
431# define VM_IS_EMT(pVM) true
432#else
433/** @todo need to rework this macro for the case of multiple emulation threads for SMP */
434# define VM_IS_EMT(pVM) (VMR3GetVMCPUNativeThread(pVM) == RTThreadNativeSelf())
435#endif
436
437/** @def VMCPU_IS_EMT
438 * Checks if the current thread is the emulation thread (EMT) for the specified
439 * virtual CPU.
440 */
441#ifdef IN_RC
442# define VMCPU_IS_EMT(pVCpu) true
443#elif defined(IN_RING0)
444# define VMCPU_IS_EMT(pVCpu) fixme - need to call HWACCM I think... /** @todo SMP */
445#else
446/** @todo need to rework this macro for the case of multiple emulation threads for SMP */
447# define VMCPU_IS_EMT(pVCpu) ((pVCpu)->hNativeThread == RTThreadNativeSelf())
448#endif
449
450/** @def VM_ASSERT_EMT
451 * Asserts that the current thread IS the emulation thread (EMT).
452 */
453#ifdef IN_RC
454# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
455#elif defined(IN_RING0)
456# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
457#else
458# define VM_ASSERT_EMT(pVM) \
459 AssertMsg(VM_IS_EMT(pVM), \
460 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
461#endif
462
463/** @def VMCPU_ASSERT_EMT
464 * Asserts that the current thread IS the emulation thread (EMT) of the
465 * specified virtual CPU.
466 */
467#ifdef IN_RC
468# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
469#elif defined(IN_RING0)
470# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
471#else
472# define VMCPU_ASSERT_EMT(pVCpu) \
473 AssertMsg(VMCPU_IS_EMT(pVCpu), \
474 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
475 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
476#endif
477
478/** @def VM_ASSERT_EMT_RETURN
479 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
480 */
481#ifdef IN_RC
482# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
483#elif defined(IN_RING0)
484# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
485#else
486# define VM_ASSERT_EMT_RETURN(pVM, rc) \
487 AssertMsgReturn(VM_IS_EMT(pVM), \
488 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
489 (rc))
490#endif
491
492/** @def VMCPU_ASSERT_EMT_RETURN
493 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
494 */
495#ifdef IN_RC
496# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
497#elif defined(IN_RING0)
498# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
499#else
500# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
501 AssertMsg(VMCPU_IS_EMT(pVCpu), \
502 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
503 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
504 (rc))
505#endif
506
507
508/**
509 * Asserts that the current thread is NOT the emulation thread.
510 */
511#define VM_ASSERT_OTHER_THREAD(pVM) \
512 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
513
514
515/** @def VM_ASSERT_STATE_RETURN
516 * Asserts a certain VM state.
517 */
518#define VM_ASSERT_STATE(pVM, _enmState) \
519 AssertMsg((pVM)->enmVMState == (_enmState), \
520 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)))
521
522/** @def VM_ASSERT_STATE_RETURN
523 * Asserts a certain VM state and returns if it doesn't match.
524 */
525#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
526 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
527 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)), \
528 (rc))
529
530/** @def VM_ASSERT_VALID_EXT_RETURN
531 * Asserts a the VM handle is valid for external access, i.e. not being
532 * destroy or terminated.
533 */
534#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
535 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
536 && (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
537 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
538 ? VMGetStateName(pVM->enmVMState) : ""), \
539 (rc))
540
541/** @def VMCPU_ASSERT_VALID_EXT_RETURN
542 * Asserts a the VMCPU handle is valid for external access, i.e. not being
543 * destroy or terminated.
544 */
545#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
546 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
547 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
548 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
549 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
550 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
551 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
552 (rc))
553
554
555/** This is the VM structure.
556 *
557 * It contains (nearly?) all the VM data which have to be available in all
558 * contexts. Even if it contains all the data the idea is to use APIs not
559 * to modify all the members all around the place. Therefore we make use of
560 * unions to hide everything which isn't local to the current source module.
561 * This means we'll have to pay a little bit of attention when adding new
562 * members to structures in the unions and make sure to keep the padding sizes
563 * up to date.
564 *
565 * Run tstVMStructSize after update!
566 */
567typedef struct VM
568{
569 /** The state of the VM.
570 * This field is read only to everyone except the VM and EM. */
571 VMSTATE enmVMState;
572 /** Forced action flags.
573 * See the VM_FF_* \#defines. Updated atomically.
574 */
575 volatile uint32_t fGlobalForcedActions;
576 /** Pointer to the array of page descriptors for the VM structure allocation. */
577 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
578 /** Session handle. For use when calling SUPR0 APIs. */
579 PSUPDRVSESSION pSession;
580 /** Pointer to the ring-3 VM structure. */
581 PUVM pUVM;
582 /** Ring-3 Host Context VM Pointer. */
583 R3PTRTYPE(struct VM *) pVMR3;
584 /** Ring-0 Host Context VM Pointer. */
585 R0PTRTYPE(struct VM *) pVMR0;
586 /** Raw-mode Context VM Pointer. */
587 RCPTRTYPE(struct VM *) pVMRC;
588
589 /** The GVM VM handle. Only the GVM should modify this field. */
590 uint32_t hSelf;
591 /** Number of virtual CPUs. */
592 uint32_t cCPUs;
593
594 /** Size of the VM structure including the VMCPU array. */
595 uint32_t cbSelf;
596
597 /** Offset to the VMCPU array starting from beginning of this structure. */
598 uint32_t offVMCPU;
599
600 /** Reserved; alignment. */
601 uint32_t u32Reserved[6];
602
603 /** @name Public VMM Switcher APIs
604 * @{ */
605 /**
606 * Assembly switch entry point for returning to host context.
607 * This function will clean up the stack frame.
608 *
609 * @param eax The return code, register.
610 * @param Ctx The guest core context.
611 * @remark Assume interrupts disabled.
612 */
613 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
614
615 /**
616 * Assembly switch entry point for returning to host context.
617 *
618 * This is an alternative entry point which we'll be using when the we have the
619 * hypervisor context and need to save that before going to the host.
620 *
621 * This is typically useful when abandoning the hypervisor because of a trap
622 * and want the trap state to be saved.
623 *
624 * @param eax The return code, register.
625 * @param ecx Pointer to the hypervisor core context, register.
626 * @remark Assume interrupts disabled.
627 */
628 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
629
630 /**
631 * Assembly switch entry point for returning to host context.
632 *
633 * This is an alternative to the two *Ctx APIs and implies that the context has already
634 * been saved, or that it's just a brief return to HC and that the caller intends to resume
635 * whatever it is doing upon 'return' from this call.
636 *
637 * @param eax The return code, register.
638 * @remark Assume interrupts disabled.
639 */
640 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
641 /** @} */
642
643
644 /** @name Various VM data owned by VM.
645 * @{ */
646 RTTHREAD uPadding1;
647 /** The native handle of ThreadEMT. Getting the native handle
648 * is generally faster than getting the IPRT one (except on OS/2 :-). */
649 RTNATIVETHREAD uPadding2;
650 /** @} */
651
652
653 /** @name Various items that are frequently accessed.
654 * @{ */
655 /** Raw ring-3 indicator. */
656 bool fRawR3Enabled;
657 /** Raw ring-0 indicator. */
658 bool fRawR0Enabled;
659 /** PATM enabled flag.
660 * This is placed here for performance reasons. */
661 bool fPATMEnabled;
662 /** CSAM enabled flag.
663 * This is placed here for performance reasons. */
664 bool fCSAMEnabled;
665 /** Hardware VM support is available and enabled.
666 * This is placed here for performance reasons. */
667 bool fHWACCMEnabled;
668 /** Hardware VM support is required and non-optional.
669 * This is initialized together with the rest of the VM structure. */
670 bool fHwVirtExtForced;
671 /** PARAV enabled flag. */
672 bool fPARAVEnabled;
673 /** @} */
674
675
676 /* padding to make gnuc put the StatQemuToGC where msc does. */
677#if HC_ARCH_BITS == 32
678 uint32_t padding0;
679#endif
680
681 /** Profiling the total time from Qemu to GC. */
682 STAMPROFILEADV StatTotalQemuToGC;
683 /** Profiling the total time from GC to Qemu. */
684 STAMPROFILEADV StatTotalGCToQemu;
685 /** Profiling the total time spent in GC. */
686 STAMPROFILEADV StatTotalInGC;
687 /** Profiling the total time spent not in Qemu. */
688 STAMPROFILEADV StatTotalInQemu;
689 /** Profiling the VMMSwitcher code for going to GC. */
690 STAMPROFILEADV StatSwitcherToGC;
691 /** Profiling the VMMSwitcher code for going to HC. */
692 STAMPROFILEADV StatSwitcherToHC;
693 STAMPROFILEADV StatSwitcherSaveRegs;
694 STAMPROFILEADV StatSwitcherSysEnter;
695 STAMPROFILEADV StatSwitcherDebug;
696 STAMPROFILEADV StatSwitcherCR0;
697 STAMPROFILEADV StatSwitcherCR4;
698 STAMPROFILEADV StatSwitcherJmpCR3;
699 STAMPROFILEADV StatSwitcherRstrRegs;
700 STAMPROFILEADV StatSwitcherLgdt;
701 STAMPROFILEADV StatSwitcherLidt;
702 STAMPROFILEADV StatSwitcherLldt;
703 STAMPROFILEADV StatSwitcherTSS;
704
705/** @todo Realign everything on 64 byte boundaries to better match the
706 * cache-line size. */
707 /* padding - the unions must be aligned on 32 bytes boundraries. */
708 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
709
710 /** CPUM part. */
711 union
712 {
713#ifdef ___CPUMInternal_h
714 struct CPUM s;
715#endif
716 char padding[2048]; /* multiple of 32 */
717 } cpum;
718
719 /** VMM part. */
720 union
721 {
722#ifdef ___VMMInternal_h
723 struct VMM s;
724#endif
725 char padding[1600]; /* multiple of 32 */
726 } vmm;
727
728 /** PGM part. */
729 union
730 {
731#ifdef ___PGMInternal_h
732 struct PGM s;
733#endif
734 char padding[16*1024]; /* multiple of 32 */
735 } pgm;
736
737 /** HWACCM part. */
738 union
739 {
740#ifdef ___HWACCMInternal_h
741 struct HWACCM s;
742#endif
743 char padding[512]; /* multiple of 32 */
744 } hwaccm;
745
746 /** TRPM part. */
747 union
748 {
749#ifdef ___TRPMInternal_h
750 struct TRPM s;
751#endif
752 char padding[5344]; /* multiple of 32 */
753 } trpm;
754
755 /** SELM part. */
756 union
757 {
758#ifdef ___SELMInternal_h
759 struct SELM s;
760#endif
761 char padding[544]; /* multiple of 32 */
762 } selm;
763
764 /** MM part. */
765 union
766 {
767#ifdef ___MMInternal_h
768 struct MM s;
769#endif
770 char padding[192]; /* multiple of 32 */
771 } mm;
772
773 /** CFGM part. */
774 union
775 {
776#ifdef ___CFGMInternal_h
777 struct CFGM s;
778#endif
779 char padding[32]; /* multiple of 32 */
780 } cfgm;
781
782 /** PDM part. */
783 union
784 {
785#ifdef ___PDMInternal_h
786 struct PDM s;
787#endif
788 char padding[1824]; /* multiple of 32 */
789 } pdm;
790
791 /** IOM part. */
792 union
793 {
794#ifdef ___IOMInternal_h
795 struct IOM s;
796#endif
797 char padding[4544]; /* multiple of 32 */
798 } iom;
799
800 /** PATM part. */
801 union
802 {
803#ifdef ___PATMInternal_h
804 struct PATM s;
805#endif
806 char padding[768]; /* multiple of 32 */
807 } patm;
808
809 /** CSAM part. */
810 union
811 {
812#ifdef ___CSAMInternal_h
813 struct CSAM s;
814#endif
815 char padding[3328]; /* multiple of 32 */
816 } csam;
817
818 /** PARAV part. */
819 union
820 {
821#ifdef ___PARAVInternal_h
822 struct PARAV s;
823#endif
824 char padding[128];
825 } parav;
826
827 /** EM part. */
828 union
829 {
830#ifdef ___EMInternal_h
831 struct EM s;
832#endif
833 char padding[256]; /* multiple of 32 */
834 } em;
835
836 /** TM part. */
837 union
838 {
839#ifdef ___TMInternal_h
840 struct TM s;
841#endif
842 char padding[1536]; /* multiple of 32 */
843 } tm;
844
845 /** DBGF part. */
846 union
847 {
848#ifdef ___DBGFInternal_h
849 struct DBGF s;
850#endif
851 char padding[2368]; /* multiple of 32 */
852 } dbgf;
853
854 /** SSM part. */
855 union
856 {
857#ifdef ___SSMInternal_h
858 struct SSM s;
859#endif
860 char padding[32]; /* multiple of 32 */
861 } ssm;
862
863 /** VM part. */
864 union
865 {
866#ifdef ___VMInternal_h
867 struct VMINT s;
868#endif
869 char padding[768]; /* multiple of 32 */
870 } vm;
871
872 /** REM part. */
873 union
874 {
875#ifdef ___REMInternal_h
876 struct REM s;
877#endif
878
879/** @def VM_REM_SIZE
880 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
881#if GC_ARCH_BITS == 32
882# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10800 : 0x10800)
883#else
884# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10900 : 0x10900)
885#endif
886 char padding[VM_REM_SIZE]; /* multiple of 32 */
887 } rem;
888
889 /** Padding for aligning the cpu array on a 64 byte boundrary. */
890 uint32_t u32Reserved2[8];
891
892 /** VMCPU array for the configured number of virtual CPUs.
893 * Must be aligned on a 64-byte boundrary. */
894 VMCPU aCpus[1];
895} VM;
896
897/** Pointer to a VM. */
898#ifndef ___VBox_types_h
899typedef struct VM *PVM;
900#endif
901
902
903#ifdef IN_RC
904__BEGIN_DECLS
905
906/** The VM structure.
907 * This is imported from the VMMGCBuiltin module, i.e. it's a one
908 * of those magic globals which we should avoid using.
909 */
910extern DECLIMPORT(VM) g_VM;
911
912__END_DECLS
913#endif
914
915/** @} */
916
917#endif
918
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette