VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 15382

Last change on this file since 15382 was 15344, checked in by vboxsync, 16 years ago

#3202: Optimizations of the dynamic page mapping code (ring-0). Do lots of the stuff inline, using the set as a 2st level cache and not releasing it for each inner VT-x iteration.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.6 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_vm_h
31#define ___VBox_vm_h
32
33#include <VBox/cdefs.h>
34#include <VBox/types.h>
35#include <VBox/cpum.h>
36#include <VBox/stam.h>
37#include <VBox/vmapi.h>
38#include <VBox/sup.h>
39
40
41/** @defgroup grp_vm The Virtual Machine
42 * @{
43 */
44
45/** Maximum number of virtual CPUs per VM. */
46#define VMCPU_MAX_CPU_COUNT 255
47
48/**
49 * The state of a virtual CPU.
50 *
51 * The VM running states are a sub-states of the VMSTATE_RUNNING state. While
52 * VMCPUSTATE_NOT_RUNNING is a place holder for the other VM states.
53 */
54typedef enum VMCPUSTATE
55{
56 /** The customary invalid zero. */
57 VMCPUSTATE_INVALID = 0,
58
59 /** Running guest code (VM running). */
60 VMCPUSTATE_RUN_EXEC,
61 /** Running guest code in the recompiler (VM running). */
62 VMCPUSTATE_RUN_EXEC_REM,
63 /** Halted (VM running). */
64 VMCPUSTATE_RUN_HALTED,
65 /** All the other bits we do while running a VM (VM running). */
66 VMCPUSTATE_RUN_MISC,
67 /** VM not running, we're servicing requests or whatever. */
68 VMCPUSTATE_NOT_RUNNING,
69 /** The end of valid virtual CPU states. */
70 VMCPUSTATE_END,
71
72 /** Ensure 32-bit type. */
73 VMCPUSTATE_32BIT_HACK = 0x7fffffff
74} VMCPUSTATE;
75
76
77/**
78 * Per virtual CPU data.
79 */
80typedef struct VMCPU
81{
82 /** Per CPU forced action.
83 * See the VMCPU_FF_* \#defines. Updated atomically. */
84 uint32_t volatile fForcedActions;
85 /** The CPU state. */
86 VMCPUSTATE volatile enmState;
87
88 /** Ring-3 Host Context VM Pointer. */
89 PVMR3 pVMR3;
90 /** Ring-0 Host Context VM Pointer. */
91 PVMR0 pVMR0;
92 /** Raw-mode Context VM Pointer. */
93 PVMRC pVMRC;
94 /** The CPU ID.
95 * This is the index into the VM::aCpu array. */
96 VMCPUID idCpu;
97 /** The native thread handle. */
98 RTNATIVETHREAD hNativeThread;
99
100 /** Align the next bit on a 64-byte boundary.
101 *
102 * @remarks The aligments of the members that are larger than 48 bytes should be
103 * 64-byte for cache line reasons. structs containing small amounts of
104 * data could be lumped together at the end with a < 64 byte padding
105 * following it (to grow into and align the struct size).
106 * */
107 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 9 : 6];
108
109 /** CPUM part. */
110 union
111 {
112#ifdef ___CPUMInternal_h
113 struct CPUMCPU s;
114#endif
115 char padding[2560]; /* multiple of 64 */
116 } cpum;
117 /** VMM part. */
118 union
119 {
120#ifdef ___VMMInternal_h
121 struct VMMCPU s;
122#endif
123 char padding[64]; /* multiple of 64 */
124 } vmm;
125
126 /** PGM part. */
127 union
128 {
129#ifdef ___PGMInternal_h
130 struct PGMCPU s;
131#endif
132 char padding[1024]; /* multiple of 64 */
133 } pgm;
134
135 /** HWACCM part. */
136 union
137 {
138#ifdef ___HWACCMInternal_h
139 struct HWACCMCPU s;
140#endif
141 char padding[8192]; /* multiple of 64 */
142 } hwaccm;
143
144 /** EM part. */
145 union
146 {
147#ifdef ___EMInternal_h
148 struct EMCPU s;
149#endif
150 char padding[64]; /* multiple of 64 */
151 } em;
152
153 /** TM part. */
154 union
155 {
156#ifdef ___TMInternal_h
157 struct TMCPU s;
158#endif
159 char padding[64]; /* multiple of 64 */
160 } tm;
161} VMCPU;
162
163/** Pointer to a VMCPU. */
164#ifndef ___VBox_types_h
165typedef struct VMCPU *PVMCPU;
166#endif
167
168/** The name of the Guest Context VMM Core module. */
169#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
170/** The name of the Ring 0 Context VMM Core module. */
171#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
172
173/** VM Forced Action Flags.
174 *
175 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
176 * action mask of a VM.
177 *
178 * @{
179 */
180/** This action forces the VM to service check and pending interrups on the APIC. */
181#define VM_FF_INTERRUPT_APIC RT_BIT_32(0)
182/** This action forces the VM to service check and pending interrups on the PIC. */
183#define VM_FF_INTERRUPT_PIC RT_BIT_32(1)
184/** This action forces the VM to schedule and run pending timer (TM). */
185#define VM_FF_TIMER RT_BIT_32(2)
186/** PDM Queues are pending. */
187#define VM_FF_PDM_QUEUES RT_BIT_32(3)
188/** PDM DMA transfers are pending. */
189#define VM_FF_PDM_DMA RT_BIT_32(4)
190/** PDM critical section unlocking is pending, process promptly upon return to R3. */
191#define VM_FF_PDM_CRITSECT RT_BIT_32(5)
192
193/** This action forces the VM to call DBGF so DBGF can service debugger
194 * requests in the emulation thread.
195 * This action flag stays asserted till DBGF clears it.*/
196#define VM_FF_DBGF RT_BIT_32(8)
197/** This action forces the VM to service pending requests from other
198 * thread or requests which must be executed in another context. */
199#define VM_FF_REQUEST RT_BIT_32(9)
200/** Terminate the VM immediately. */
201#define VM_FF_TERMINATE RT_BIT_32(10)
202/** Reset the VM. (postponed) */
203#define VM_FF_RESET RT_BIT_32(11)
204
205/** This action forces the VM to resync the page tables before going
206 * back to execute guest code. (GLOBAL FLUSH) */
207#define VM_FF_PGM_SYNC_CR3 RT_BIT_32(16)
208/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
209 * (NON-GLOBAL FLUSH) */
210#define VM_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
211/** PGM needs to allocate handy pages. */
212#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
213/** Check the interupt and trap gates */
214#define VM_FF_TRPM_SYNC_IDT RT_BIT_32(19)
215/** Check Guest's TSS ring 0 stack */
216#define VM_FF_SELM_SYNC_TSS RT_BIT_32(20)
217/** Check Guest's GDT table */
218#define VM_FF_SELM_SYNC_GDT RT_BIT_32(21)
219/** Check Guest's LDT table */
220#define VM_FF_SELM_SYNC_LDT RT_BIT_32(22)
221/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
222#define VM_FF_INHIBIT_INTERRUPTS RT_BIT_32(23)
223
224/** CSAM needs to scan the page that's being executed */
225#define VM_FF_CSAM_SCAN_PAGE RT_BIT_32(24)
226/** CSAM needs to do some homework. */
227#define VM_FF_CSAM_PENDING_ACTION RT_BIT_32(25)
228
229/** Force return to Ring-3. */
230#define VM_FF_TO_R3 RT_BIT_32(28)
231
232/** REM needs to be informed about handler changes. */
233#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(29)
234
235/** Suspend the VM - debug only. */
236#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
237
238/** Externally forced actions. Used to quit the idle/wait loop. */
239#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
240/** Externally forced actions. Used to quit the idle/wait loop. */
241#define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
242/** High priority pre-execution actions. */
243#define VM_FF_HIGH_PRIORITY_PRE_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
244 | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_PGM_NEED_HANDY_PAGES)
245/** High priority pre raw-mode execution mask. */
246#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_PGM_NEED_HANDY_PAGES \
247 | VM_FF_INHIBIT_INTERRUPTS)
248/** High priority post-execution actions. */
249#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PDM_CRITSECT | VM_FF_CSAM_PENDING_ACTION)
250/** Normal priority post-execution actions. */
251#define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE)
252/** Normal priority actions. */
253#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
254/** Flags to check before resuming guest execution. */
255#define VM_FF_RESUME_GUEST_MASK (VM_FF_TO_R3)
256/** All the forced flags. */
257#define VM_FF_ALL_MASK (~0U)
258/** All the forced flags. */
259#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_CSAM_PENDING_ACTION | VM_FF_PDM_CRITSECT))
260
261/** @} */
262
263/** @def VM_FF_SET
264 * Sets a force action flag.
265 *
266 * @param pVM VM Handle.
267 * @param fFlag The flag to set.
268 */
269#if 1
270# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag))
271#else
272# define VM_FF_SET(pVM, fFlag) \
273 do { ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag)); \
274 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
275 } while (0)
276#endif
277
278/** @def VMCPU_FF_SET
279 * Sets a force action flag for given VCPU.
280 *
281 * @param pVM VM Handle.
282 * @param idCpu Virtual CPU ID.
283 * @param fFlag The flag to set.
284 */
285#ifdef VBOX_WITH_SMP_GUESTS
286# define VMCPU_FF_SET(pVM, idCpu, fFlag) ASMAtomicOrU32(&(pVM)->aCpu[idCpu].fForcedActions, (fFlag))
287#else
288# define VMCPU_FF_SET(pVM, idCpu, fFlag) VM_FF_SET(pVM, fFlag)
289#endif
290
291/** @def VM_FF_CLEAR
292 * Clears a force action flag.
293 *
294 * @param pVM VM Handle.
295 * @param fFlag The flag to clear.
296 */
297#if 1
298# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag))
299#else
300# define VM_FF_CLEAR(pVM, fFlag) \
301 do { ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag)); \
302 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
303 } while (0)
304#endif
305
306/** @def VMCPU_FF_CLEAR
307 * Clears a force action flag for given VCPU.
308 *
309 * @param pVM VM Handle.
310 * @param idCpu Virtual CPU ID.
311 * @param fFlag The flag to clear.
312 */
313#ifdef VBOX_WITH_SMP_GUESTS
314# define VMCPU_FF_CLEAR(pVM, idCpu, fFlag) ASMAtomicAndU32(&(pVM)->aCpu[idCpu].fForcedActions, ~(fFlag))
315#else
316# define VMCPU_FF_CLEAR(pVM, idCpu, fFlag) VM_FF_CLEAR(pVM, fFlag)
317#endif
318
319/** @def VM_FF_ISSET
320 * Checks if a force action flag is set.
321 *
322 * @param pVM VM Handle.
323 * @param fFlag The flag to check.
324 */
325#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fForcedActions & (fFlag)) == (fFlag))
326
327/** @def VMCPU_FF_ISSET
328 * Checks if a force action flag is set for given VCPU.
329 *
330 * @param pVM VM Handle.
331 * @param idCpu Virtual CPU ID.
332 * @param fFlag The flag to check.
333 */
334#ifdef VBOX_WITH_SMP_GUESTS
335# define VMCPU_FF_ISSET(pVM, idCpu, fFlag) (((pVM)->aCpu[idCpu].fForcedActions & (fFlag)) == (fFlag))
336#else
337# define VMCPU_FF_ISSET(pVM, idCpu, fFlag) VM_FF_ISSET(pVM, fFlag)
338#endif
339
340/** @def VM_FF_ISPENDING
341 * Checks if one or more force action in the specified set is pending.
342 *
343 * @param pVM VM Handle.
344 * @param fFlags The flags to check for.
345 */
346#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fForcedActions & (fFlags))
347
348/** @def VMCPU_FF_ISPENDING
349 * Checks if one or more force action in the specified set is pending for given VCPU.
350 *
351 * @param pVM VM Handle.
352 * @param idCpu Virtual CPU ID.
353 * @param fFlags The flags to check for.
354 */
355#ifdef VBOX_WITH_SMP_GUESTS
356# define VMCPU_FF_ISPENDING(pVM, idCpu, fFlags) ((pVM)->aCpu[idCpu].fForcedActions & (fFlags))
357#else
358# define VMCPU_FF_ISPENDING(pVM, idCpu, fFlags) VM_FF_ISPENDING(pVM, fFlags)
359#endif
360
361/** @def VM_IS_EMT
362 * Checks if the current thread is the emulation thread (EMT).
363 *
364 * @remark The ring-0 variation will need attention if we expand the ring-0
365 * code to let threads other than EMT mess around with the VM.
366 */
367#ifdef IN_RC
368# define VM_IS_EMT(pVM) true
369#elif defined(IN_RING0)
370# define VM_IS_EMT(pVM) true
371#else
372/** @todo need to rework this macro for the case of multiple emulation threads for SMP */
373# define VM_IS_EMT(pVM) (VMR3GetVMCPUNativeThread(pVM) == RTThreadNativeSelf())
374#endif
375
376/** @def VM_ASSERT_EMT
377 * Asserts that the current thread IS the emulation thread (EMT).
378 */
379#ifdef IN_RC
380# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
381#elif defined(IN_RING0)
382# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
383#else
384# define VM_ASSERT_EMT(pVM) \
385 AssertMsg(VM_IS_EMT(pVM), \
386 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
387#endif
388
389/** @def VM_ASSERT_EMT_RETURN
390 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
391 */
392#ifdef IN_RC
393# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
394#elif defined(IN_RING0)
395# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
396#else
397# define VM_ASSERT_EMT_RETURN(pVM, rc) \
398 AssertMsgReturn(VM_IS_EMT(pVM), \
399 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
400 (rc))
401#endif
402
403
404/**
405 * Asserts that the current thread is NOT the emulation thread.
406 */
407#define VM_ASSERT_OTHER_THREAD(pVM) \
408 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
409
410
411/** @def VM_ASSERT_STATE_RETURN
412 * Asserts a certain VM state.
413 */
414#define VM_ASSERT_STATE(pVM, _enmState) \
415 AssertMsg((pVM)->enmVMState == (_enmState), \
416 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)))
417
418/** @def VM_ASSERT_STATE_RETURN
419 * Asserts a certain VM state and returns if it doesn't match.
420 */
421#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
422 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
423 ("state %s, expected %s\n", VMGetStateName(pVM->enmVMState), VMGetStateName(_enmState)), \
424 (rc))
425
426
427
428
429/** This is the VM structure.
430 *
431 * It contains (nearly?) all the VM data which have to be available in all
432 * contexts. Even if it contains all the data the idea is to use APIs not
433 * to modify all the members all around the place. Therefore we make use of
434 * unions to hide everything which isn't local to the current source module.
435 * This means we'll have to pay a little bit of attention when adding new
436 * members to structures in the unions and make sure to keep the padding sizes
437 * up to date.
438 *
439 * Run tstVMStructSize after update!
440 */
441typedef struct VM
442{
443 /** The state of the VM.
444 * This field is read only to everyone except the VM and EM. */
445 VMSTATE enmVMState;
446 /** Forced action flags.
447 * See the VM_FF_* \#defines. Updated atomically.
448 */
449 volatile uint32_t fForcedActions;
450 /** Pointer to the array of page descriptors for the VM structure allocation. */
451 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
452 /** Session handle. For use when calling SUPR0 APIs. */
453 PSUPDRVSESSION pSession;
454 /** Pointer to the ring-3 VM structure. */
455 PUVM pUVM;
456 /** Ring-3 Host Context VM Pointer. */
457 R3PTRTYPE(struct VM *) pVMR3;
458 /** Ring-0 Host Context VM Pointer. */
459 R0PTRTYPE(struct VM *) pVMR0;
460 /** Raw-mode Context VM Pointer. */
461 RCPTRTYPE(struct VM *) pVMRC;
462
463 /** The GVM VM handle. Only the GVM should modify this field. */
464 uint32_t hSelf;
465 /** Number of virtual CPUs. */
466 uint32_t cCPUs;
467
468 /** Size of the VM structure including the VMCPU array. */
469 uint32_t cbSelf;
470
471 /** Offset to the VMCPU array starting from beginning of this structure. */
472 uint32_t offVMCPU;
473
474 /** Reserved; alignment. */
475 uint32_t u32Reserved[6];
476
477 /** @name Public VMM Switcher APIs
478 * @{ */
479 /**
480 * Assembly switch entry point for returning to host context.
481 * This function will clean up the stack frame.
482 *
483 * @param eax The return code, register.
484 * @param Ctx The guest core context.
485 * @remark Assume interrupts disabled.
486 */
487 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
488
489 /**
490 * Assembly switch entry point for returning to host context.
491 *
492 * This is an alternative entry point which we'll be using when the we have the
493 * hypervisor context and need to save that before going to the host.
494 *
495 * This is typically useful when abandoning the hypervisor because of a trap
496 * and want the trap state to be saved.
497 *
498 * @param eax The return code, register.
499 * @param ecx Pointer to the hypervisor core context, register.
500 * @remark Assume interrupts disabled.
501 */
502 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
503
504 /**
505 * Assembly switch entry point for returning to host context.
506 *
507 * This is an alternative to the two *Ctx APIs and implies that the context has already
508 * been saved, or that it's just a brief return to HC and that the caller intends to resume
509 * whatever it is doing upon 'return' from this call.
510 *
511 * @param eax The return code, register.
512 * @remark Assume interrupts disabled.
513 */
514 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
515 /** @} */
516
517
518 /** @name Various VM data owned by VM.
519 * @{ */
520 RTTHREAD uPadding1;
521 /** The native handle of ThreadEMT. Getting the native handle
522 * is generally faster than getting the IPRT one (except on OS/2 :-). */
523 RTNATIVETHREAD uPadding2;
524 /** @} */
525
526
527 /** @name Various items that are frequently accessed.
528 * @{ */
529 /** Raw ring-3 indicator. */
530 bool fRawR3Enabled;
531 /** Raw ring-0 indicator. */
532 bool fRawR0Enabled;
533 /** PATM enabled flag.
534 * This is placed here for performance reasons. */
535 bool fPATMEnabled;
536 /** CSAM enabled flag.
537 * This is placed here for performance reasons. */
538 bool fCSAMEnabled;
539 /** Hardware VM support is available and enabled.
540 * This is placed here for performance reasons. */
541 bool fHWACCMEnabled;
542 /** Hardware VM support is required and non-optional.
543 * This is initialized together with the rest of the VM structure. */
544 bool fHwVirtExtForced;
545 /** PARAV enabled flag. */
546 bool fPARAVEnabled;
547 /** @} */
548
549
550 /* padding to make gnuc put the StatQemuToGC where msc does. */
551#if HC_ARCH_BITS == 32
552 uint32_t padding0;
553#endif
554
555 /** Profiling the total time from Qemu to GC. */
556 STAMPROFILEADV StatTotalQemuToGC;
557 /** Profiling the total time from GC to Qemu. */
558 STAMPROFILEADV StatTotalGCToQemu;
559 /** Profiling the total time spent in GC. */
560 STAMPROFILEADV StatTotalInGC;
561 /** Profiling the total time spent not in Qemu. */
562 STAMPROFILEADV StatTotalInQemu;
563 /** Profiling the VMMSwitcher code for going to GC. */
564 STAMPROFILEADV StatSwitcherToGC;
565 /** Profiling the VMMSwitcher code for going to HC. */
566 STAMPROFILEADV StatSwitcherToHC;
567 STAMPROFILEADV StatSwitcherSaveRegs;
568 STAMPROFILEADV StatSwitcherSysEnter;
569 STAMPROFILEADV StatSwitcherDebug;
570 STAMPROFILEADV StatSwitcherCR0;
571 STAMPROFILEADV StatSwitcherCR4;
572 STAMPROFILEADV StatSwitcherJmpCR3;
573 STAMPROFILEADV StatSwitcherRstrRegs;
574 STAMPROFILEADV StatSwitcherLgdt;
575 STAMPROFILEADV StatSwitcherLidt;
576 STAMPROFILEADV StatSwitcherLldt;
577 STAMPROFILEADV StatSwitcherTSS;
578
579/** @todo Realign everything on 64 byte boundaries to better match the
580 * cache-line size. */
581 /* padding - the unions must be aligned on 32 bytes boundraries. */
582 uint32_t padding[HC_ARCH_BITS == 32 ? 4+8 : 6];
583
584 /** CPUM part. */
585 union
586 {
587#ifdef ___CPUMInternal_h
588 struct CPUM s;
589#endif
590 char padding[4096]; /* multiple of 32 */
591 } cpum;
592
593 /** VMM part. */
594 union
595 {
596#ifdef ___VMMInternal_h
597 struct VMM s;
598#endif
599 char padding[1536]; /* multiple of 32 */
600 } vmm;
601
602 /** PGM part. */
603 union
604 {
605#ifdef ___PGMInternal_h
606 struct PGM s;
607#endif
608 char padding[50*1024]; /* multiple of 32 */
609 } pgm;
610
611 /** HWACCM part. */
612 union
613 {
614#ifdef ___HWACCMInternal_h
615 struct HWACCM s;
616#endif
617 char padding[512]; /* multiple of 32 */
618 } hwaccm;
619
620 /** TRPM part. */
621 union
622 {
623#ifdef ___TRPMInternal_h
624 struct TRPM s;
625#endif
626 char padding[5344]; /* multiple of 32 */
627 } trpm;
628
629 /** SELM part. */
630 union
631 {
632#ifdef ___SELMInternal_h
633 struct SELM s;
634#endif
635 char padding[544]; /* multiple of 32 */
636 } selm;
637
638 /** MM part. */
639 union
640 {
641#ifdef ___MMInternal_h
642 struct MM s;
643#endif
644 char padding[192]; /* multiple of 32 */
645 } mm;
646
647 /** CFGM part. */
648 union
649 {
650#ifdef ___CFGMInternal_h
651 struct CFGM s;
652#endif
653 char padding[32]; /* multiple of 32 */
654 } cfgm;
655
656 /** PDM part. */
657 union
658 {
659#ifdef ___PDMInternal_h
660 struct PDM s;
661#endif
662 char padding[1824]; /* multiple of 32 */
663 } pdm;
664
665 /** IOM part. */
666 union
667 {
668#ifdef ___IOMInternal_h
669 struct IOM s;
670#endif
671 char padding[4544]; /* multiple of 32 */
672 } iom;
673
674 /** PATM part. */
675 union
676 {
677#ifdef ___PATMInternal_h
678 struct PATM s;
679#endif
680 char padding[768]; /* multiple of 32 */
681 } patm;
682
683 /** CSAM part. */
684 union
685 {
686#ifdef ___CSAMInternal_h
687 struct CSAM s;
688#endif
689 char padding[3328]; /* multiple of 32 */
690 } csam;
691
692 /** PARAV part. */
693 union
694 {
695#ifdef ___PARAVInternal_h
696 struct PARAV s;
697#endif
698 char padding[128];
699 } parav;
700
701 /** EM part. */
702 union
703 {
704#ifdef ___EMInternal_h
705 struct EM s;
706#endif
707 char padding[1344]; /* multiple of 32 */
708 } em;
709
710 /** TM part. */
711 union
712 {
713#ifdef ___TMInternal_h
714 struct TM s;
715#endif
716 char padding[1536]; /* multiple of 32 */
717 } tm;
718
719 /** DBGF part. */
720 union
721 {
722#ifdef ___DBGFInternal_h
723 struct DBGF s;
724#endif
725 char padding[2368]; /* multiple of 32 */
726 } dbgf;
727
728 /** SSM part. */
729 union
730 {
731#ifdef ___SSMInternal_h
732 struct SSM s;
733#endif
734 char padding[32]; /* multiple of 32 */
735 } ssm;
736
737 /** VM part. */
738 union
739 {
740#ifdef ___VMInternal_h
741 struct VMINT s;
742#endif
743 char padding[768]; /* multiple of 32 */
744 } vm;
745
746 /** REM part. */
747 union
748 {
749#ifdef ___REMInternal_h
750 struct REM s;
751#endif
752
753/** @def VM_REM_SIZE
754 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
755#if GC_ARCH_BITS == 32
756# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10800 : 0x10800)
757#else
758# define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x10900 : 0x10900)
759#endif
760 char padding[VM_REM_SIZE]; /* multiple of 32 */
761 } rem;
762
763 /** Padding for aligning the cpu array on a 64 byte boundrary. */
764 uint32_t u32Reserved2[8];
765
766 /** VMCPU array for the configured number of virtual CPUs.
767 * Must be aligned on a 64-byte boundrary. */
768 VMCPU aCpus[1];
769} VM;
770
771/** Pointer to a VM. */
772#ifndef ___VBox_types_h
773typedef struct VM *PVM;
774#endif
775
776
777#ifdef IN_RC
778__BEGIN_DECLS
779
780/** The VM structure.
781 * This is imported from the VMMGCBuiltin module, i.e. it's a one
782 * of those magic globals which we should avoid using.
783 */
784extern DECLIMPORT(VM) g_VM;
785
786__END_DECLS
787#endif
788
789/** @} */
790
791#endif
792
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette