VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 19425

Last change on this file since 19425 was 19423, checked in by vboxsync, 16 years ago

Action flag updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.9 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39#include <VBox/vmm.h>
40
41
42/** @defgroup grp_vm The Virtual Machine
43 * @{
44 */
45
46/**
47 * The state of a virtual CPU.
48 *
49 * The VM running states are a sub-states of the VMSTATE_RUNNING state. While
50 * VMCPUSTATE_NOT_RUNNING is a place holder for the other VM states.
51 */
52typedef enum VMCPUSTATE
53{
54 /** The customary invalid zero. */
55 VMCPUSTATE_INVALID = 0,
56
57 /** Running guest code (VM running). */
58 VMCPUSTATE_RUN_EXEC,
59 /** Running guest code in the recompiler (VM running). */
60 VMCPUSTATE_RUN_EXEC_REM,
61 /** Halted (VM running). */
62 VMCPUSTATE_RUN_HALTED,
63 /** All the other bits we do while running a VM (VM running). */
64 VMCPUSTATE_RUN_MISC,
65 /** VM not running, we're servicing requests or whatever. */
66 VMCPUSTATE_NOT_RUNNING,
67 /** The end of valid virtual CPU states. */
68 VMCPUSTATE_END,
69
70 /** Ensure 32-bit type. */
71 VMCPUSTATE_32BIT_HACK = 0x7fffffff
72} VMCPUSTATE;
73
74
75/**
76 * Per virtual CPU data.
77 */
78typedef struct VMCPU
79{
80 /** Per CPU forced action.
81 * See the VMCPU_FF_* \#defines. Updated atomically. */
82 uint32_t volatile fLocalForcedActions;
83 /** The CPU state. */
84 VMCPUSTATE volatile enmState;
85
86 /** Pointer to the ring-3 UVMCPU structure. */
87 PUVMCPU pUVCpu;
88 /** Ring-3 Host Context VM Pointer. */
89 PVMR3 pVMR3;
90 /** Ring-0 Host Context VM Pointer. */
91 PVMR0 pVMR0;
92 /** Raw-mode Context VM Pointer. */
93 PVMRC pVMRC;
94 /** The CPU ID.
95 * This is the index into the VM::aCpu array. */
96 VMCPUID idCpu;
97 /** The native thread handle. */
98 RTNATIVETHREAD hNativeThread;
99
100 /** Align the next bit on a 64-byte boundary.
101 *
102 * @remarks The aligments of the members that are larger than 48 bytes should be
103 * 64-byte for cache line reasons. structs containing small amounts of
104 * data could be lumped together at the end with a < 64 byte padding
105 * following it (to grow into and align the struct size).
106 * */
107 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 8 : 4];
108
109 /** CPUM part. */
110 union
111 {
112#ifdef ___CPUMInternal_h
113 struct CPUMCPU s;
114#endif
115 char padding[4096]; /* multiple of 64 */
116 } cpum;
117
118 /** PGM part. */
119 union
120 {
121#ifdef ___PGMInternal_h
122 struct PGMCPU s;
123#endif
124 char padding[32*1024]; /* multiple of 64 */
125 } pgm;
126
127 /** HWACCM part. */
128 union
129 {
130#ifdef ___HWACCMInternal_h
131 struct HWACCMCPU s;
132#endif
133 char padding[5120]; /* multiple of 64 */
134 } hwaccm;
135
136 /** EM part. */
137 union
138 {
139#ifdef ___EMInternal_h
140 struct EMCPU s;
141#endif
142 char padding[2048]; /* multiple of 64 */
143 } em;
144
145 /** TRPM part. */
146 union
147 {
148#ifdef ___TRPMInternal_h
149 struct TRPMCPU s;
150#endif
151 char padding[128]; /* multiple of 64 */
152 } trpm;
153
154 /** TM part. */
155 union
156 {
157#ifdef ___TMInternal_h
158 struct TMCPU s;
159#endif
160 char padding[64]; /* multiple of 64 */
161 } tm;
162
163 /** VMM part.
164 * @todo Combine this with other tiny structures. */
165 union
166 {
167#ifdef ___VMMInternal_h
168 struct VMMCPU s;
169#endif
170 char padding[64]; /* multiple of 64 */
171 } vmm;
172
173 /** DBGF part.
174 * @todo Combine this with other tiny structures. */
175 union
176 {
177#ifdef ___DBGFInternal_h
178 struct DBGFCPU s;
179#endif
180 uint8_t padding[64]; /* multiple of 64 */
181 } dbgf;
182
183} VMCPU;
184
185/** Pointer to a VMCPU. */
186#ifndef ___VBox_types_h
187typedef struct VMCPU *PVMCPU;
188#endif
189
190/** The name of the Guest Context VMM Core module. */
191#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
192/** The name of the Ring 0 Context VMM Core module. */
193#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
194
195/** VM Forced Action Flags.
196 *
197 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
198 * action mask of a VM.
199 *
200 * @{
201 */
202/** This action forces the VM to schedule and run pending timer (TM). */
203#define VM_FF_TIMER RT_BIT_32(2)
204/** PDM Queues are pending. */
205#define VM_FF_PDM_QUEUES_BIT 3
206#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
207/** PDM DMA transfers are pending. */
208#define VM_FF_PDM_DMA_BIT 4
209#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
210/** PDM critical section unlocking is pending, process promptly upon return to R3. */
211#define VM_FF_PDM_CRITSECT RT_BIT_32(5)
212/** This action forces the VM to call DBGF so DBGF can service debugger
213 * requests in the emulation thread.
214 * This action flag stays asserted till DBGF clears it.*/
215#define VM_FF_DBGF_BIT 8
216#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
217/** This action forces the VM to service pending requests from other
218 * thread or requests which must be executed in another context. */
219#define VM_FF_REQUEST RT_BIT_32(9)
220/** Terminate the VM immediately. */
221#define VM_FF_TERMINATE RT_BIT_32(10)
222/** Reset the VM. (postponed) */
223#define VM_FF_RESET_BIT 11
224#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
225/** PGM needs to allocate handy pages. */
226#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
227/** PGM is out of memory.
228 * Abandon all loops and code paths which can be resumed and get up to the EM
229 * loops. */
230#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
231/** REM needs to be informed about handler changes. */
232#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(29)
233/** Suspend the VM - debug only. */
234#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
235
236
237/** This action forces the VM to service check and pending interrups on the APIC. */
238#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
239/** This action forces the VM to service check and pending interrups on the PIC. */
240#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
241/** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */
242#define VMCPU_FF_TIMER RT_BIT_32(2)
243/** This action forces the VM to service pending requests from other
244 * thread or requests which must be executed in another context. */
245#define VMCPU_FF_REQUEST RT_BIT_32(9)
246/** This action forces the VM to resync the page tables before going
247 * back to execute guest code. (GLOBAL FLUSH) */
248#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
249/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
250 * (NON-GLOBAL FLUSH) */
251#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
252/** Check the interupt and trap gates */
253#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
254/** Check Guest's TSS ring 0 stack */
255#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
256/** Check Guest's GDT table */
257#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
258/** Check Guest's LDT table */
259#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
260/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
261#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
262/** Check for pending TLB shootdown actions. */
263#define VMCPU_FF_TLB_SHOOTDOWN RT_BIT_32(25)
264/** CSAM needs to scan the page that's being executed */
265#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
266/** CSAM needs to do some homework. */
267#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
268/** Force return to Ring-3. */
269#define VMCPU_FF_TO_R3 RT_BIT_32(28)
270
271/** Externally VM forced actions. Used to quit the idle/wait loop. */
272#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
273/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
274#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
275
276/** Externally forced VM actions. Used to quit the idle/wait loop. */
277#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
278/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
279#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST)
280
281/** High priority VM pre-execution actions. */
282#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
283 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
284/** High priority VMCPU pre-execution actions. */
285#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
286 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)
287
288/** High priority VM pre raw-mode execution mask. */
289#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
290/** High priority VMCPU pre raw-mode execution mask. */
291#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT \
292 | VMCPU_FF_INHIBIT_INTERRUPTS)
293
294/** High priority post-execution actions. */
295#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT | VM_FF_PGM_NO_MEMORY)
296/** High priority post-execution actions. */
297#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_CSAM_PENDING_ACTION)
298
299/** Normal priority VM post-execution actions. */
300#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)
301/** Normal priority VMCPU post-execution actions. */
302#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
303
304/** Normal priority VM actions. */
305#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
306/** Normal priority VMCPU actions. */
307#define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST)
308
309/** Flags to clear before resuming guest execution. */
310#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
311
312/** VM Flags that cause the HWACCM loops to go back to ring-3. */
313#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
314/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
315#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3)
316
317/** All the forced flags. */
318#define VM_FF_ALL_MASK (~0U)
319/** All the forced VM flags. */
320#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PDM_CRITSECT) | VM_FF_PGM_NO_MEMORY)
321/** All the forced VMCPU flags. */
322#define VMCPU_FF_ALL_BUT_RAW_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION))
323
324/** @} */
325
326/** @def VM_FF_SET
327 * Sets a force action flag.
328 *
329 * @param pVM VM Handle.
330 * @param fFlag The flag to set.
331 */
332#if 1
333# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
334#else
335# define VM_FF_SET(pVM, fFlag) \
336 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
337 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
338 } while (0)
339#endif
340
341/** @def VMCPU_FF_SET
342 * Sets a force action flag for the given VCPU.
343 *
344 * @param pVCpu VMCPU Handle.
345 * @param fFlag The flag to set.
346 */
347#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
348
349/** @def VM_FF_CLEAR
350 * Clears a force action flag.
351 *
352 * @param pVM VM Handle.
353 * @param fFlag The flag to clear.
354 */
355#if 1
356# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
357#else
358# define VM_FF_CLEAR(pVM, fFlag) \
359 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
360 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
361 } while (0)
362#endif
363
364/** @def VMCPU_FF_CLEAR
365 * Clears a force action flag for the given VCPU.
366 *
367 * @param pVCpu VMCPU Handle.
368 * @param fFlag The flag to clear.
369 */
370#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
371
372/** @def VM_FF_ISSET
373 * Checks if a force action flag is set.
374 *
375 * @param pVM VM Handle.
376 * @param fFlag The flag to check.
377 */
378#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
379
380/** @def VMCPU_FF_ISSET
381 * Checks if a force action flag is set for the given VCPU.
382 *
383 * @param pVCpu VMCPU Handle.
384 * @param fFlag The flag to check.
385 */
386#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
387
388/** @def VM_FF_ISPENDING
389 * Checks if one or more force action in the specified set is pending.
390 *
391 * @param pVM VM Handle.
392 * @param fFlags The flags to check for.
393 */
394#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
395
396/** @def VM_FF_TESTANDCLEAR
397 * Checks if one (!) force action in the specified set is pending and clears it atomically
398 *
399 * @returns true if the bit was set.
400 * @returns false if the bit was clear.
401 * @param pVM VM Handle.
402 * @param iBit Bit position to check and clear
403 */
404#define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit))
405
406/** @def VMCPU_FF_ISPENDING
407 * Checks if one or more force action in the specified set is pending for the given VCPU.
408 *
409 * @param pVCpu VMCPU Handle.
410 * @param fFlags The flags to check for.
411 */
412#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
413
414/** @def VM_FF_ISPENDING
415 * Checks if one or more force action in the specified set is pending while one
416 * or more other ones are not.
417 *
418 * @param pVM VM Handle.
419 * @param fFlags The flags to check for.
420 * @param fExcpt The flags that should not be set.
421 */
422#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
423
424/** @def VMCPU_FF_IS_PENDING_EXCEPT
425 * Checks if one or more force action in the specified set is pending for the given
426 * VCPU while one or more other ones are not.
427 *
428 * @param pVCpu VMCPU Handle.
429 * @param fFlags The flags to check for.
430 * @param fExcpt The flags that should not be set.
431 */
432#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
433
434/** @def VM_IS_EMT
435 * Checks if the current thread is the emulation thread (EMT).
436 *
437 * @remark The ring-0 variation will need attention if we expand the ring-0
438 * code to let threads other than EMT mess around with the VM.
439 */
440#ifdef IN_RC
441# define VM_IS_EMT(pVM) true
442#else
443# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
444#endif
445
446/** @def VMCPU_IS_EMT
447 * Checks if the current thread is the emulation thread (EMT) for the specified
448 * virtual CPU.
449 */
450#ifdef IN_RC
451# define VMCPU_IS_EMT(pVCpu) true
452#else
453# define VMCPU_IS_EMT(pVCpu) (pVCpu && (pVCpu == VMMGetCpu(pVCpu->CTX_SUFF(pVM))))
454#endif
455
456/** @def VM_ASSERT_EMT
457 * Asserts that the current thread IS the emulation thread (EMT).
458 */
459#ifdef IN_RC
460# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
461#elif defined(IN_RING0)
462# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
463#else
464# define VM_ASSERT_EMT(pVM) \
465 AssertMsg(VM_IS_EMT(pVM), \
466 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
467#endif
468
469/** @def VMCPU_ASSERT_EMT
470 * Asserts that the current thread IS the emulation thread (EMT) of the
471 * specified virtual CPU.
472 */
473#ifdef IN_RC
474# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
475#elif defined(IN_RING0)
476# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
477#else
478# define VMCPU_ASSERT_EMT(pVCpu) \
479 AssertMsg(VMCPU_IS_EMT(pVCpu), \
480 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
481 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
482#endif
483
484/** @def VM_ASSERT_EMT_RETURN
485 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
486 */
487#ifdef IN_RC
488# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
489#elif defined(IN_RING0)
490# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
491#else
492# define VM_ASSERT_EMT_RETURN(pVM, rc) \
493 AssertMsgReturn(VM_IS_EMT(pVM), \
494 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
495 (rc))
496#endif
497
498/** @def VMCPU_ASSERT_EMT_RETURN
499 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
500 */
501#ifdef IN_RC
502# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
503#elif defined(IN_RING0)
504# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
505#else
506# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
507 AssertMsg(VMCPU_IS_EMT(pVCpu), \
508 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
509 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
510 (rc))
511#endif
512
513
514/**
515 * Asserts that the current thread is NOT the emulation thread.
516 */
517#define VM_ASSERT_OTHER_THREAD(pVM) \
518 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
519
520
521/** @def VM_ASSERT_STATE_RETURN
522 * Asserts a certain VM state.
523 */
524#define VM_ASSERT_STATE(pVM, _enmState) \
525 AssertMsg((pVM)->enmVMState == (_enmState), \
526 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)))
527
528/** @def VM_ASSERT_STATE_RETURN
529 * Asserts a certain VM state and returns if it doesn't match.
530 */
531#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
532 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
533 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)), \
534 (rc))
535
536/** @def VM_ASSERT_VALID_EXT_RETURN
537 * Asserts a the VM handle is valid for external access, i.e. not being
538 * destroy or terminated.
539 */
540#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
541 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
542 && (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
543 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
544 ? VMGetStateName(pVM->enmVMState) : ""), \
545 (rc))
546
547/** @def VMCPU_ASSERT_VALID_EXT_RETURN
548 * Asserts a the VMCPU handle is valid for external access, i.e. not being
549 * destroy or terminated.
550 */
551#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
552 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
553 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
554 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
555 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
556 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
557 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
558 (rc))
559
560
561/** This is the VM structure.
562 *
563 * It contains (nearly?) all the VM data which have to be available in all
564 * contexts. Even if it contains all the data the idea is to use APIs not
565 * to modify all the members all around the place. Therefore we make use of
566 * unions to hide everything which isn't local to the current source module.
567 * This means we'll have to pay a little bit of attention when adding new
568 * members to structures in the unions and make sure to keep the padding sizes
569 * up to date.
570 *
571 * Run tstVMStructSize after update!
572 */
573typedef struct VM
574{
575 /** The state of the VM.
576 * This field is read only to everyone except the VM and EM. */
577 VMSTATE enmVMState;
578 /** Forced action flags.
579 * See the VM_FF_* \#defines. Updated atomically.
580 */
581 volatile uint32_t fGlobalForcedActions;
582 /** Pointer to the array of page descriptors for the VM structure allocation. */
583 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
584 /** Session handle. For use when calling SUPR0 APIs. */
585 PSUPDRVSESSION pSession;
586 /** Pointer to the ring-3 VM structure. */
587 PUVM pUVM;
588 /** Ring-3 Host Context VM Pointer. */
589 R3PTRTYPE(struct VM *) pVMR3;
590 /** Ring-0 Host Context VM Pointer. */
591 R0PTRTYPE(struct VM *) pVMR0;
592 /** Raw-mode Context VM Pointer. */
593 RCPTRTYPE(struct VM *) pVMRC;
594
595 /** The GVM VM handle. Only the GVM should modify this field. */
596 uint32_t hSelf;
597 /** Number of virtual CPUs. */
598 uint32_t cCPUs;
599
600 /** Size of the VM structure including the VMCPU array. */
601 uint32_t cbSelf;
602
603 /** Offset to the VMCPU array starting from beginning of this structure. */
604 uint32_t offVMCPU;
605
606 /** Reserved; alignment. */
607 uint32_t u32Reserved[6];
608
609 /** @name Public VMM Switcher APIs
610 * @{ */
611 /**
612 * Assembly switch entry point for returning to host context.
613 * This function will clean up the stack frame.
614 *
615 * @param eax The return code, register.
616 * @param Ctx The guest core context.
617 * @remark Assume interrupts disabled.
618 */
619 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
620
621 /**
622 * Assembly switch entry point for returning to host context.
623 *
624 * This is an alternative entry point which we'll be using when the we have the
625 * hypervisor context and need to save that before going to the host.
626 *
627 * This is typically useful when abandoning the hypervisor because of a trap
628 * and want the trap state to be saved.
629 *
630 * @param eax The return code, register.
631 * @param ecx Pointer to the hypervisor core context, register.
632 * @remark Assume interrupts disabled.
633 */
634 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
635
636 /**
637 * Assembly switch entry point for returning to host context.
638 *
639 * This is an alternative to the two *Ctx APIs and implies that the context has already
640 * been saved, or that it's just a brief return to HC and that the caller intends to resume
641 * whatever it is doing upon 'return' from this call.
642 *
643 * @param eax The return code, register.
644 * @remark Assume interrupts disabled.
645 */
646 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
647 /** @} */
648
649
650 /** @name Various VM data owned by VM.
651 * @{ */
652 RTTHREAD uPadding1;
653 /** The native handle of ThreadEMT. Getting the native handle
654 * is generally faster than getting the IPRT one (except on OS/2 :-). */
655 RTNATIVETHREAD uPadding2;
656 /** @} */
657
658
659 /** @name Various items that are frequently accessed.
660 * @{ */
661 /** Raw ring-3 indicator. */
662 bool fRawR3Enabled;
663 /** Raw ring-0 indicator. */
664 bool fRawR0Enabled;
665 /** PATM enabled flag.
666 * This is placed here for performance reasons. */
667 bool fPATMEnabled;
668 /** CSAM enabled flag.
669 * This is placed here for performance reasons. */
670 bool fCSAMEnabled;
671 /** Hardware VM support is available and enabled.
672 * This is placed here for performance reasons. */
673 bool fHWACCMEnabled;
674 /** Hardware VM support is required and non-optional.
675 * This is initialized together with the rest of the VM structure. */
676 bool fHwVirtExtForced;
677 /** PARAV enabled flag. */
678 bool fPARAVEnabled;
679 /** @} */
680
681
682 /* padding to make gnuc put the StatQemuToGC where msc does. */
683#if HC_ARCH_BITS == 32
684 uint32_t padding0;
685#endif
686
687 /** Profiling the total time from Qemu to GC. */
688 STAMPROFILEADV StatTotalQemuToGC;
689 /** Profiling the total time from GC to Qemu. */
690 STAMPROFILEADV StatTotalGCToQemu;
691 /** Profiling the total time spent in GC. */
692 STAMPROFILEADV StatTotalInGC;
693 /** Profiling the total time spent not in Qemu. */
694 STAMPROFILEADV StatTotalInQemu;
695 /** Profiling the VMMSwitcher code for going to GC. */
696 STAMPROFILEADV StatSwitcherToGC;
697 /** Profiling the VMMSwitcher code for going to HC. */
698 STAMPROFILEADV StatSwitcherToHC;
699 STAMPROFILEADV StatSwitcherSaveRegs;
700 STAMPROFILEADV StatSwitcherSysEnter;
701 STAMPROFILEADV StatSwitcherDebug;
702 STAMPROFILEADV StatSwitcherCR0;
703 STAMPROFILEADV StatSwitcherCR4;
704 STAMPROFILEADV StatSwitcherJmpCR3;
705 STAMPROFILEADV StatSwitcherRstrRegs;
706 STAMPROFILEADV StatSwitcherLgdt;
707 STAMPROFILEADV StatSwitcherLidt;
708 STAMPROFILEADV StatSwitcherLldt;
709 STAMPROFILEADV StatSwitcherTSS;
710
711/** @todo Realign everything on 64 byte boundaries to better match the
712 * cache-line size. */
713 /* padding - the unions must be aligned on 32 bytes boundraries. */
714 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
715
716 /** CPUM part. */
717 union
718 {
719#ifdef ___CPUMInternal_h
720 struct CPUM s;
721#endif
722 char padding[2048]; /* multiple of 32 */
723 } cpum;
724
725 /** VMM part. */
726 union
727 {
728#ifdef ___VMMInternal_h
729 struct VMM s;
730#endif
731 char padding[1600]; /* multiple of 32 */
732 } vmm;
733
734 /** PGM part. */
735 union
736 {
737#ifdef ___PGMInternal_h
738 struct PGM s;
739#endif
740 char padding[16*1024]; /* multiple of 32 */
741 } pgm;
742
743 /** HWACCM part. */
744 union
745 {
746#ifdef ___HWACCMInternal_h
747 struct HWACCM s;
748#endif
749 char padding[512]; /* multiple of 32 */
750 } hwaccm;
751
752 /** TRPM part. */
753 union
754 {
755#ifdef ___TRPMInternal_h
756 struct TRPM s;
757#endif
758 char padding[5344]; /* multiple of 32 */
759 } trpm;
760
761 /** SELM part. */
762 union
763 {
764#ifdef ___SELMInternal_h
765 struct SELM s;
766#endif
767 char padding[544]; /* multiple of 32 */
768 } selm;
769
770 /** MM part. */
771 union
772 {
773#ifdef ___MMInternal_h
774 struct MM s;
775#endif
776 char padding[192]; /* multiple of 32 */
777 } mm;
778
779 /** CFGM part. */
780 union
781 {
782#ifdef ___CFGMInternal_h
783 struct CFGM s;
784#endif
785 char padding[32]; /* multiple of 32 */
786 } cfgm;
787
788 /** PDM part. */
789 union
790 {
791#ifdef ___PDMInternal_h
792 struct PDM s;
793#endif
794 char padding[1824]; /* multiple of 32 */
795 } pdm;
796
797 /** IOM part. */
798 union
799 {
800#ifdef ___IOMInternal_h
801 struct IOM s;
802#endif
803 char padding[4544]; /* multiple of 32 */
804 } iom;
805
806 /** PATM part. */
807 union
808 {
809#ifdef ___PATMInternal_h
810 struct PATM s;
811#endif
812 char padding[768]; /* multiple of 32 */
813 } patm;
814
815 /** CSAM part. */
816 union
817 {
818#ifdef ___CSAMInternal_h
819 struct CSAM s;
820#endif
821 char padding[3328]; /* multiple of 32 */
822 } csam;
823
824 /** PARAV part. */
825 union
826 {
827#ifdef ___PARAVInternal_h
828 struct PARAV s;
829#endif
830 char padding[128];
831 } parav;
832
833 /** EM part. */
834 union
835 {
836#ifdef ___EMInternal_h
837 struct EM s;
838#endif
839 char padding[256]; /* multiple of 32 */
840 } em;
841
842 /** TM part. */
843 union
844 {
845#ifdef ___TMInternal_h
846 struct TM s;
847#endif
848 char padding[1536]; /* multiple of 32 */
849 } tm;
850
851 /** DBGF part. */
852 union
853 {
854#ifdef ___DBGFInternal_h
855 struct DBGF s;
856#endif
857 char padding[2368]; /* multiple of 32 */
858 } dbgf;
859
860 /** SSM part. */
861 union
862 {
863#ifdef ___SSMInternal_h
864 struct SSM s;
865#endif
866 char padding[32]; /* multiple of 32 */
867 } ssm;
868
869 /** VM part. */
870 union
871 {
872#ifdef ___VMInternal_h
873 struct VMINT s;
874#endif
875 char padding[768]; /* multiple of 32 */
876 } vm;
877
878 /** REM part. */
879 union
880 {
881#ifdef ___REMInternal_h
882 struct REM s;
883#endif
884
885/** @def VM_REM_SIZE
886 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
887#if GC_ARCH_BITS == 32
888# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10800 : 0x10800)
889#else
890# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10900 : 0x10900)
891#endif
892 char padding[VM_REM_SIZE]; /* multiple of 32 */
893 } rem;
894
895 /** Padding for aligning the cpu array on a 64 byte boundrary. */
896 uint32_t u32Reserved2[8];
897
898 /** VMCPU array for the configured number of virtual CPUs.
899 * Must be aligned on a 64-byte boundrary. */
900 VMCPU aCpus[1];
901} VM;
902
903/** Pointer to a VM. */
904#ifndef ___VBox_types_h
905typedef struct VM *PVM;
906#endif
907
908
909#ifdef IN_RC
910__BEGIN_DECLS
911
912/** The VM structure.
913 * This is imported from the VMMGCBuiltin module, i.e. it's a one
914 * of those magic globals which we should avoid using.
915 */
916extern DECLIMPORT(VM) g_VM;
917
918__END_DECLS
919#endif
920
921/** @} */
922
923#endif
924
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette