VirtualBox

source: vbox/trunk/include/VBox/vmm/vm.h@ 91306

Last change on this file since 91306 was 91306, checked in by vboxsync, 3 years ago

VMM/CPUM,++: Moved the nested VT-X virtual apic page allocation into CPUMCTX. bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 57.0 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_vm_h
27#define VBOX_INCLUDED_vmm_vm_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#ifndef VBOX_FOR_DTRACE_LIB
33# ifndef USING_VMM_COMMON_DEFS
34# error "Compile job does not include VMM_COMMON_DEFS from src/VBox/VMM/Config.kmk - make sure you really need to include this file!"
35# endif
36# include <iprt/param.h>
37# include <VBox/param.h>
38# include <VBox/types.h>
39# include <VBox/vmm/cpum.h>
40# include <VBox/vmm/stam.h>
41# include <VBox/vmm/vmapi.h>
42# include <VBox/vmm/vmm.h>
43# include <VBox/sup.h>
44#else
45# pragma D depends_on library vbox-types.d
46# pragma D depends_on library CPUMInternal.d
47# define VMM_INCLUDED_SRC_include_CPUMInternal_h
48#endif
49
50
51
52/** @defgroup grp_vm The Virtual Machine
53 * @ingroup grp_vmm
54 * @{
55 */
56
57/**
58 * The state of a Virtual CPU.
59 *
60 * The basic state indicated here is whether the CPU has been started or not. In
61 * addition, there are sub-states when started for assisting scheduling (GVMM
62 * mostly).
63 *
64 * The transition out of the STOPPED state is done by a vmR3PowerOn.
65 * The transition back to the STOPPED state is done by vmR3PowerOff.
66 *
67 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
68 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
69 */
70typedef enum VMCPUSTATE
71{
72 /** The customary invalid zero. */
73 VMCPUSTATE_INVALID = 0,
74
75 /** Virtual CPU has not yet been started. */
76 VMCPUSTATE_STOPPED,
77
78 /** CPU started. */
79 VMCPUSTATE_STARTED,
80 /** CPU started in HM context. */
81 VMCPUSTATE_STARTED_HM,
82 /** Executing guest code and can be poked (RC or STI bits of HM). */
83 VMCPUSTATE_STARTED_EXEC,
84 /** Executing guest code using NEM. */
85 VMCPUSTATE_STARTED_EXEC_NEM,
86 VMCPUSTATE_STARTED_EXEC_NEM_WAIT,
87 VMCPUSTATE_STARTED_EXEC_NEM_CANCELED,
88 /** Halted. */
89 VMCPUSTATE_STARTED_HALTED,
90
91 /** The end of valid virtual CPU states. */
92 VMCPUSTATE_END,
93
94 /** Ensure 32-bit type. */
95 VMCPUSTATE_32BIT_HACK = 0x7fffffff
96} VMCPUSTATE;
97
98/** Enables 64-bit FFs. */
99#define VMCPU_WITH_64_BIT_FFS
100
101
102/**
103 * The cross context virtual CPU structure.
104 *
105 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
106 */
107typedef struct VMCPU
108{
109 /** @name Volatile per-cpu data.
110 * @{ */
111 /** Per CPU forced action.
112 * See the VMCPU_FF_* \#defines. Updated atomically. */
113#ifdef VMCPU_WITH_64_BIT_FFS
114 uint64_t volatile fLocalForcedActions;
115#else
116 uint32_t volatile fLocalForcedActions;
117 uint32_t fForLocalForcedActionsExpansion;
118#endif
119 /** The CPU state. */
120 VMCPUSTATE volatile enmState;
121
122 /** Padding up to 64 bytes. */
123 uint8_t abAlignment0[64 - 12];
124 /** @} */
125
126 /** IEM part.
127 * @remarks This comes first as it allows the use of 8-bit immediates for the
128 * first 64 bytes of the structure, reducing code size a wee bit. */
129#ifdef VMM_INCLUDED_SRC_include_IEMInternal_h /* For PDB hacking. */
130 union VMCPUUNIONIEMFULL
131#else
132 union VMCPUUNIONIEMSTUB
133#endif
134 {
135#ifdef VMM_INCLUDED_SRC_include_IEMInternal_h
136 struct IEMCPU s;
137#endif
138 uint8_t padding[26688]; /* multiple of 64 */
139 } iem;
140
141 /** @name Static per-cpu data.
142 * (Putting this after IEM, hoping that it's less frequently used than it.)
143 * @{ */
144 /** Ring-3 Host Context VM Pointer. */
145 PVMR3 pVMR3;
146 /** Ring-0 Host Context VM Pointer, currently used by VTG/dtrace. */
147 RTR0PTR pVCpuR0ForVtg;
148 /** Raw-mode Context VM Pointer. */
149 uint32_t pVMRC;
150 /** Padding for new raw-mode (long mode). */
151 uint32_t pVMRCPadding;
152 /** Pointer to the ring-3 UVMCPU structure. */
153 PUVMCPU pUVCpu;
154 /** The native thread handle. */
155 RTNATIVETHREAD hNativeThread;
156 /** The native R0 thread handle. (different from the R3 handle!) */
157 RTNATIVETHREAD hNativeThreadR0;
158 /** The IPRT thread handle (for VMMDevTesting). */
159 RTTHREAD hThread;
160 /** The CPU ID.
161 * This is the index into the VM::aCpu array. */
162#ifdef IN_RING0
163 VMCPUID idCpuUnsafe;
164#else
165 VMCPUID idCpu;
166#endif
167
168 /** Align the structures below bit on a 64-byte boundary and make sure it starts
169 * at the same offset in both 64-bit and 32-bit builds.
170 *
171 * @remarks The alignments of the members that are larger than 48 bytes should be
172 * 64-byte for cache line reasons. structs containing small amounts of
173 * data could be lumped together at the end with a < 64 byte padding
174 * following it (to grow into and align the struct size).
175 */
176 uint8_t abAlignment1[64 - 6 * (HC_ARCH_BITS == 32 ? 4 : 8) - 8 - 4];
177 /** @} */
178
179 /** HM part. */
180 union VMCPUUNIONHM
181 {
182#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
183 struct HMCPU s;
184#endif
185 uint8_t padding[9984]; /* multiple of 64 */
186 } hm;
187
188 /** NEM part. */
189 union VMCPUUNIONNEM
190 {
191#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
192 struct NEMCPU s;
193#endif
194 uint8_t padding[512]; /* multiple of 64 */
195 } nem;
196
197 /** TRPM part. */
198 union VMCPUUNIONTRPM
199 {
200#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
201 struct TRPMCPU s;
202#endif
203 uint8_t padding[128]; /* multiple of 64 */
204 } trpm;
205
206 /** TM part. */
207 union VMCPUUNIONTM
208 {
209#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
210 struct TMCPU s;
211#endif
212 uint8_t padding[5760]; /* multiple of 64 */
213 } tm;
214
215 /** VMM part. */
216 union VMCPUUNIONVMM
217 {
218#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
219 struct VMMCPU s;
220#endif
221 uint8_t padding[1344]; /* multiple of 64 */
222 } vmm;
223
224 /** PDM part. */
225 union VMCPUUNIONPDM
226 {
227#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
228 struct PDMCPU s;
229#endif
230 uint8_t padding[256]; /* multiple of 64 */
231 } pdm;
232
233 /** IOM part. */
234 union VMCPUUNIONIOM
235 {
236#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
237 struct IOMCPU s;
238#endif
239 uint8_t padding[512]; /* multiple of 64 */
240 } iom;
241
242 /** DBGF part.
243 * @todo Combine this with other tiny structures. */
244 union VMCPUUNIONDBGF
245 {
246#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
247 struct DBGFCPU s;
248#endif
249 uint8_t padding[512]; /* multiple of 64 */
250 } dbgf;
251
252 /** GIM part. */
253 union VMCPUUNIONGIM
254 {
255#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
256 struct GIMCPU s;
257#endif
258 uint8_t padding[512]; /* multiple of 64 */
259 } gim;
260
261 /** APIC part. */
262 union VMCPUUNIONAPIC
263 {
264#ifdef VMM_INCLUDED_SRC_include_APICInternal_h
265 struct APICCPU s;
266#endif
267 uint8_t padding[3840]; /* multiple of 64 */
268 } apic;
269
270 /*
271 * Some less frequently used global members that doesn't need to take up
272 * precious space at the head of the structure.
273 */
274
275 /** Trace groups enable flags. */
276 uint32_t fTraceGroups; /* 64 / 44 */
277 /** Number of collisions hashing the ring-0 EMT handle. */
278 uint8_t cEmtHashCollisions;
279 uint8_t abAdHoc[3];
280 /** Profiling samples for use by ad hoc profiling. */
281 STAMPROFILEADV aStatAdHoc[8]; /* size: 40*8 = 320 */
282
283 /** Align the following members on page boundary. */
284 uint8_t abAlignment2[2744];
285
286 /** PGM part. */
287 union VMCPUUNIONPGM
288 {
289#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
290 struct PGMCPU s;
291#endif
292 uint8_t padding[4096 + 28672]; /* multiple of 4096 */
293 } pgm;
294
295 /** CPUM part. */
296 union VMCPUUNIONCPUM
297 {
298#ifdef VMM_INCLUDED_SRC_include_CPUMInternal_h
299 struct CPUMCPU s;
300#endif
301#ifdef VMCPU_INCL_CPUM_GST_CTX
302 /** The guest CPUM context for direct use by execution engines.
303 * This is not for general consumption, but for HM, REM, IEM, and maybe a few
304 * others. The rest will use the function based CPUM API. */
305 CPUMCTX GstCtx;
306#endif
307 uint8_t padding[102400]; /* multiple of 4096 */
308 } cpum;
309
310 /** EM part. */
311 union VMCPUUNIONEM
312 {
313#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
314 struct EMCPU s;
315#endif
316 uint8_t padding[40960]; /* multiple of 4096 */
317 } em;
318} VMCPU;
319
320
321#ifndef VBOX_FOR_DTRACE_LIB
322AssertCompileSizeAlignment(VMCPU, 4096);
323
324/** @name Operations on VMCPU::enmState
325 * @{ */
326/** Gets the VMCPU state. */
327#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
328/** Sets the VMCPU state. */
329#define VMCPU_SET_STATE(pVCpu, enmNewState) \
330 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
331/** Cmpares and sets the VMCPU state. */
332#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
333 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
334/** Checks the VMCPU state. */
335#ifdef VBOX_STRICT
336# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
337 do { \
338 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
339 AssertMsg(enmState == (enmExpectedState), \
340 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
341 enmState, enmExpectedState, (pVCpu)->idCpu)); \
342 } while (0)
343
344# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) \
345 do { \
346 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
347 AssertMsg( enmState == (enmExpectedState) \
348 || enmState == (a_enmExpectedState2), \
349 ("enmState=%d enmExpectedState=%d enmExpectedState2=%d idCpu=%u\n", \
350 enmState, enmExpectedState, a_enmExpectedState2, (pVCpu)->idCpu)); \
351 } while (0)
352#else
353# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) do { } while (0)
354# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) do { } while (0)
355#endif
356/** Tests if the state means that the CPU is started. */
357#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
358/** Tests if the state means that the CPU is stopped. */
359#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
360/** @} */
361
362
363/** The name of the raw-mode context VMM Core module. */
364#define VMMRC_MAIN_MODULE_NAME "VMMRC.rc"
365/** The name of the ring-0 context VMM Core module. */
366#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
367
368
369/** VM Forced Action Flags.
370 *
371 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
372 * action mask of a VM.
373 *
374 * Available VM bits:
375 * 0, 1, 5, 6, 7, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
376 *
377 *
378 * Available VMCPU bits:
379 * 14, 15, 36 to 63
380 *
381 * @todo If we run low on VMCPU, we may consider merging the SELM bits
382 *
383 * @{
384 */
385/** The virtual sync clock has been stopped, go to TM until it has been
386 * restarted... */
387#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(VM_FF_TM_VIRTUAL_SYNC_BIT)
388#define VM_FF_TM_VIRTUAL_SYNC_BIT 2
389/** PDM Queues are pending. */
390#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
391/** The bit number for VM_FF_PDM_QUEUES. */
392#define VM_FF_PDM_QUEUES_BIT 3
393/** PDM DMA transfers are pending. */
394#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
395/** The bit number for VM_FF_PDM_DMA. */
396#define VM_FF_PDM_DMA_BIT 4
397/** This action forces the VM to call DBGF so DBGF can service debugger
398 * requests in the emulation thread.
399 * This action flag stays asserted till DBGF clears it.*/
400#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
401/** The bit number for VM_FF_DBGF. */
402#define VM_FF_DBGF_BIT 8
403/** This action forces the VM to service pending requests from other
404 * thread or requests which must be executed in another context. */
405#define VM_FF_REQUEST RT_BIT_32(VM_FF_REQUEST_BIT)
406#define VM_FF_REQUEST_BIT 9
407/** Check for VM state changes and take appropriate action. */
408#define VM_FF_CHECK_VM_STATE RT_BIT_32(VM_FF_CHECK_VM_STATE_BIT)
409/** The bit number for VM_FF_CHECK_VM_STATE. */
410#define VM_FF_CHECK_VM_STATE_BIT 10
411/** Reset the VM. (postponed) */
412#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
413/** The bit number for VM_FF_RESET. */
414#define VM_FF_RESET_BIT 11
415/** EMT rendezvous in VMM. */
416#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
417/** The bit number for VM_FF_EMT_RENDEZVOUS. */
418#define VM_FF_EMT_RENDEZVOUS_BIT 12
419
420/** PGM needs to allocate handy pages. */
421#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(VM_FF_PGM_NEED_HANDY_PAGES_BIT)
422#define VM_FF_PGM_NEED_HANDY_PAGES_BIT 18
423/** PGM is out of memory.
424 * Abandon all loops and code paths which can be resumed and get up to the EM
425 * loops. */
426#define VM_FF_PGM_NO_MEMORY RT_BIT_32(VM_FF_PGM_NO_MEMORY_BIT)
427#define VM_FF_PGM_NO_MEMORY_BIT 19
428 /** PGM is about to perform a lightweight pool flush
429 * Guest SMP: all EMT threads should return to ring 3
430 */
431#define VM_FF_PGM_POOL_FLUSH_PENDING RT_BIT_32(VM_FF_PGM_POOL_FLUSH_PENDING_BIT)
432#define VM_FF_PGM_POOL_FLUSH_PENDING_BIT 20
433/** Suspend the VM - debug only. */
434#define VM_FF_DEBUG_SUSPEND RT_BIT_32(VM_FF_DEBUG_SUSPEND_BIT)
435#define VM_FF_DEBUG_SUSPEND_BIT 31
436
437
438/** This action forces the VM to check any pending interrupts on the APIC. */
439#define VMCPU_FF_INTERRUPT_APIC RT_BIT_64(VMCPU_FF_INTERRUPT_APIC_BIT)
440#define VMCPU_FF_INTERRUPT_APIC_BIT 0
441/** This action forces the VM to check any pending interrups on the PIC. */
442#define VMCPU_FF_INTERRUPT_PIC RT_BIT_64(VMCPU_FF_INTERRUPT_PIC_BIT)
443#define VMCPU_FF_INTERRUPT_PIC_BIT 1
444/** This action forces the VM to schedule and run pending timer (TM).
445 * @remarks Don't move - PATM compatibility. */
446#define VMCPU_FF_TIMER RT_BIT_64(VMCPU_FF_TIMER_BIT)
447#define VMCPU_FF_TIMER_BIT 2
448/** This action forces the VM to check any pending NMIs. */
449#define VMCPU_FF_INTERRUPT_NMI RT_BIT_64(VMCPU_FF_INTERRUPT_NMI_BIT)
450#define VMCPU_FF_INTERRUPT_NMI_BIT 3
451/** This action forces the VM to check any pending SMIs. */
452#define VMCPU_FF_INTERRUPT_SMI RT_BIT_64(VMCPU_FF_INTERRUPT_SMI_BIT)
453#define VMCPU_FF_INTERRUPT_SMI_BIT 4
454/** PDM critical section unlocking is pending, process promptly upon return to R3. */
455#define VMCPU_FF_PDM_CRITSECT RT_BIT_64(VMCPU_FF_PDM_CRITSECT_BIT)
456#define VMCPU_FF_PDM_CRITSECT_BIT 5
457/** Special EM internal force flag that is used by EMUnhaltAndWakeUp() to force
458 * the virtual CPU out of the next (/current) halted state. It is not processed
459 * nor cleared by emR3ForcedActions (similar to VMCPU_FF_BLOCK_NMIS), instead it
460 * is cleared the next time EM leaves the HALTED state. */
461#define VMCPU_FF_UNHALT RT_BIT_64(VMCPU_FF_UNHALT_BIT)
462#define VMCPU_FF_UNHALT_BIT 6
463/** Pending IEM action (mask). */
464#define VMCPU_FF_IEM RT_BIT_64(VMCPU_FF_IEM_BIT)
465/** Pending IEM action (bit number). */
466#define VMCPU_FF_IEM_BIT 7
467/** Pending APIC action (bit number). */
468#define VMCPU_FF_UPDATE_APIC_BIT 8
469/** This action forces the VM to update APIC's asynchronously arrived
470 * interrupts as pending interrupts. */
471#define VMCPU_FF_UPDATE_APIC RT_BIT_64(VMCPU_FF_UPDATE_APIC_BIT)
472/** This action forces the VM to service pending requests from other
473 * thread or requests which must be executed in another context. */
474#define VMCPU_FF_REQUEST RT_BIT_64(VMCPU_FF_REQUEST_BIT)
475#define VMCPU_FF_REQUEST_BIT 9
476/** Pending DBGF event (alternative to passing VINF_EM_DBG_EVENT around). */
477#define VMCPU_FF_DBGF RT_BIT_64(VMCPU_FF_DBGF_BIT)
478/** The bit number for VMCPU_FF_DBGF. */
479#define VMCPU_FF_DBGF_BIT 10
480/** This action forces the VM to service any pending updates to CR3 (used only
481 * by HM). */
482/** Hardware virtualized nested-guest interrupt pending. */
483#define VMCPU_FF_INTERRUPT_NESTED_GUEST RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT)
484#define VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT 11
485#define VMCPU_FF_HM_UPDATE_CR3 RT_BIT_64(VMCPU_FF_HM_UPDATE_CR3_BIT)
486#define VMCPU_FF_HM_UPDATE_CR3_BIT 12
487/* Bit 13 used to be VMCPU_FF_HM_UPDATE_PAE_PDPES. */
488/** This action forces the VM to resync the page tables before going
489 * back to execute guest code. (GLOBAL FLUSH) */
490#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_BIT)
491#define VMCPU_FF_PGM_SYNC_CR3_BIT 16
492/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
493 * (NON-GLOBAL FLUSH) */
494#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT)
495#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT 17
496/** Check for pending TLB shootdown actions (deprecated)
497 * Reserved for furture HM re-use if necessary / safe.
498 * Consumer: HM */
499#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED RT_BIT_64(VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT)
500#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT 18
501/** Check for pending TLB flush action.
502 * Consumer: HM
503 * @todo rename to VMCPU_FF_HM_TLB_FLUSH */
504#define VMCPU_FF_TLB_FLUSH RT_BIT_64(VMCPU_FF_TLB_FLUSH_BIT)
505/** The bit number for VMCPU_FF_TLB_FLUSH. */
506#define VMCPU_FF_TLB_FLUSH_BIT 19
507/* 20 used to be VMCPU_FF_TRPM_SYNC_IDT (raw-mode only). */
508/* 21 used to be VMCPU_FF_SELM_SYNC_TSS (raw-mode only). */
509/* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */
510/* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */
511/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
512#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_64(VMCPU_FF_INHIBIT_INTERRUPTS_BIT)
513#define VMCPU_FF_INHIBIT_INTERRUPTS_BIT 24
514/** Block injection of non-maskable interrupts to the guest. */
515#define VMCPU_FF_BLOCK_NMIS RT_BIT_64(VMCPU_FF_BLOCK_NMIS_BIT)
516#define VMCPU_FF_BLOCK_NMIS_BIT 25
517/** Force return to Ring-3. */
518#define VMCPU_FF_TO_R3 RT_BIT_64(VMCPU_FF_TO_R3_BIT)
519#define VMCPU_FF_TO_R3_BIT 28
520/** Force return to ring-3 to service pending I/O or MMIO write.
521 * This is a backup for mechanism VINF_IOM_R3_IOPORT_COMMIT_WRITE and
522 * VINF_IOM_R3_MMIO_COMMIT_WRITE, allowing VINF_EM_DBG_BREAKPOINT and similar
523 * status codes to be propagated at the same time without loss. */
524#define VMCPU_FF_IOM RT_BIT_64(VMCPU_FF_IOM_BIT)
525#define VMCPU_FF_IOM_BIT 29
526/* 30 used to be VMCPU_FF_CPUM */
527/** VMX-preemption timer expired. */
528#define VMCPU_FF_VMX_PREEMPT_TIMER RT_BIT_64(VMCPU_FF_VMX_PREEMPT_TIMER_BIT)
529#define VMCPU_FF_VMX_PREEMPT_TIMER_BIT 31
530/** Pending MTF (Monitor Trap Flag) event. */
531#define VMCPU_FF_VMX_MTF RT_BIT_64(VMCPU_FF_VMX_MTF_BIT)
532#define VMCPU_FF_VMX_MTF_BIT 32
533/** VMX APIC-write emulation pending. */
534#define VMCPU_FF_VMX_APIC_WRITE RT_BIT_64(VMCPU_FF_VMX_APIC_WRITE_BIT)
535#define VMCPU_FF_VMX_APIC_WRITE_BIT 33
536/** VMX interrupt-window event pending. */
537#define VMCPU_FF_VMX_INT_WINDOW RT_BIT_64(VMCPU_FF_VMX_INT_WINDOW_BIT)
538#define VMCPU_FF_VMX_INT_WINDOW_BIT 34
539/** VMX NMI-window event pending. */
540#define VMCPU_FF_VMX_NMI_WINDOW RT_BIT_64(VMCPU_FF_VMX_NMI_WINDOW_BIT)
541#define VMCPU_FF_VMX_NMI_WINDOW_BIT 35
542
543
544/** Externally VM forced actions. Used to quit the idle/wait loop. */
545#define VM_FF_EXTERNAL_SUSPENDED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS )
546/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
547#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK ( VMCPU_FF_REQUEST | VMCPU_FF_DBGF )
548
549/** Externally forced VM actions. Used to quit the idle/wait loop. */
550#define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \
551 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS )
552/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
553#define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
554 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
555 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \
556 | VMCPU_FF_INTERRUPT_NESTED_GUEST)
557
558/** High priority VM pre-execution actions. */
559#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC \
560 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
561 | VM_FF_EMT_RENDEZVOUS )
562/** High priority VMCPU pre-execution actions. */
563#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
564 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF \
565 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
566 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
567 | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW )
568
569/** High priority VM pre raw-mode execution mask. */
570#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY )
571/** High priority VMCPU pre raw-mode execution mask. */
572#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
573 | VMCPU_FF_INHIBIT_INTERRUPTS )
574
575/** High priority post-execution actions. */
576#define VM_FF_HIGH_PRIORITY_POST_MASK ( VM_FF_PGM_NO_MEMORY )
577/** High priority post-execution actions. */
578#define VMCPU_FF_HIGH_PRIORITY_POST_MASK ( VMCPU_FF_PDM_CRITSECT | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_IEM | VMCPU_FF_IOM )
579
580/** Normal priority VM post-execution actions. */
581#define VM_FF_NORMAL_PRIORITY_POST_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET \
582 | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
583/** Normal priority VMCPU post-execution actions. */
584#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK ( VMCPU_FF_DBGF )
585
586/** Normal priority VM actions. */
587#define VM_FF_NORMAL_PRIORITY_MASK ( VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
588/** Normal priority VMCPU actions. */
589#define VMCPU_FF_NORMAL_PRIORITY_MASK ( VMCPU_FF_REQUEST )
590
591/** Flags to clear before resuming guest execution. */
592#define VMCPU_FF_RESUME_GUEST_MASK ( VMCPU_FF_TO_R3 )
593
594
595/** VM flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
596#define VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
597 | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_RESET)
598/** VM flags that cause the REP[|NE|E] STRINS loops to yield. */
599#define VM_FF_YIELD_REPSTR_MASK ( VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
600 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_DBGF | VM_FF_DEBUG_SUSPEND )
601/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
602#ifdef IN_RING3
603# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF \
604 | VMCPU_FF_VMX_MTF )
605#else
606# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_IEM | VMCPU_FF_IOM | VMCPU_FF_PGM_SYNC_CR3 \
607 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_VMX_MTF )
608#endif
609/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
610 * enabled. */
611#define VMCPU_FF_YIELD_REPSTR_MASK ( VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
612 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
613 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_PDM_CRITSECT \
614 | VMCPU_FF_TIMER | VMCPU_FF_REQUEST \
615 | VMCPU_FF_INTERRUPT_NESTED_GUEST )
616/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
617 * disabled. */
618#define VMCPU_FF_YIELD_REPSTR_NOINT_MASK ( VMCPU_FF_YIELD_REPSTR_MASK \
619 & ~( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
620 | VMCPU_FF_INTERRUPT_NESTED_GUEST) )
621
622/** VM Flags that cause the HM loops to go back to ring-3. */
623#define VM_FF_HM_TO_R3_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
624 | VM_FF_PDM_QUEUES | VM_FF_EMT_RENDEZVOUS)
625/** VMCPU Flags that cause the HM loops to go back to ring-3. */
626#define VMCPU_FF_HM_TO_R3_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT \
627 | VMCPU_FF_IEM | VMCPU_FF_IOM)
628
629/** High priority ring-0 VM pre HM-mode execution mask. */
630#define VM_FF_HP_R0_PRE_HM_MASK (VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
631/** High priority ring-0 VMCPU pre HM-mode execution mask. */
632#define VMCPU_FF_HP_R0_PRE_HM_MASK ( VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 \
633 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST \
634 | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER)
635/** High priority ring-0 VM pre HM-mode execution mask, single stepping. */
636#define VM_FF_HP_R0_PRE_HM_STEP_MASK (VM_FF_HP_R0_PRE_HM_MASK & ~( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES \
637 | VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST \
638 | VM_FF_PDM_DMA) )
639/** High priority ring-0 VMCPU pre HM-mode execution mask, single stepping. */
640#define VMCPU_FF_HP_R0_PRE_HM_STEP_MASK (VMCPU_FF_HP_R0_PRE_HM_MASK & ~( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER \
641 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_REQUEST) )
642
643/** All the VMX nested-guest flags. */
644#define VMCPU_FF_VMX_ALL_MASK ( VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
645 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW )
646
647/** All the forced VM flags. */
648#define VM_FF_ALL_MASK (UINT32_MAX)
649/** All the forced VMCPU flags. */
650#define VMCPU_FF_ALL_MASK (UINT32_MAX)
651
652/** All the forced VM flags except those related to raw-mode and hardware
653 * assisted execution. */
654#define VM_FF_ALL_REM_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
655/** All the forced VMCPU flags except those related to raw-mode and hardware
656 * assisted execution. */
657#define VMCPU_FF_ALL_REM_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_TLB_FLUSH))
658/** @} */
659
660/** @def VM_FF_SET
661 * Sets a single force action flag.
662 *
663 * @param pVM The cross context VM structure.
664 * @param fFlag The flag to set.
665 */
666#define VM_FF_SET(pVM, fFlag) do { \
667 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
668 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
669 ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
670 } while (0)
671
672/** @def VMCPU_FF_SET
673 * Sets a single force action flag for the given VCPU.
674 *
675 * @param pVCpu The cross context virtual CPU structure.
676 * @param fFlag The flag to set.
677 * @sa VMCPU_FF_SET_MASK
678 */
679#ifdef VMCPU_WITH_64_BIT_FFS
680# define VMCPU_FF_SET(pVCpu, fFlag) do { \
681 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
682 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
683 ASMAtomicBitSet(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
684 } while (0)
685#else
686# define VMCPU_FF_SET(pVCpu, fFlag) do { \
687 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
688 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
689 ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag)); \
690 } while (0)
691#endif
692
693/** @def VMCPU_FF_SET_MASK
694 * Sets a two or more force action flag for the given VCPU.
695 *
696 * @param pVCpu The cross context virtual CPU structure.
697 * @param fFlags The flags to set.
698 * @sa VMCPU_FF_SET
699 */
700#ifdef VMCPU_WITH_64_BIT_FFS
701# if ARCH_BITS > 32
702# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
703 do { ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
704# else
705# define VMCPU_FF_SET_MASK(pVCpu, fFlags) do { \
706 if (!((fFlags) >> 32)) ASMAtomicOrU32((uint32_t volatile *)&pVCpu->fLocalForcedActions, (uint32_t)(fFlags)); \
707 else ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); \
708 } while (0)
709# endif
710#else
711# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
712 do { ASMAtomicOrU32(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
713#endif
714
715/** @def VM_FF_CLEAR
716 * Clears a single force action flag.
717 *
718 * @param pVM The cross context VM structure.
719 * @param fFlag The flag to clear.
720 */
721#define VM_FF_CLEAR(pVM, fFlag) do { \
722 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
723 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
724 ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
725 } while (0)
726
727/** @def VMCPU_FF_CLEAR
728 * Clears a single force action flag for the given VCPU.
729 *
730 * @param pVCpu The cross context virtual CPU structure.
731 * @param fFlag The flag to clear.
732 */
733#ifdef VMCPU_WITH_64_BIT_FFS
734# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
735 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
736 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
737 ASMAtomicBitClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
738 } while (0)
739#else
740# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
741 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
742 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
743 ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag)); \
744 } while (0)
745#endif
746
747/** @def VMCPU_FF_CLEAR_MASK
748 * Clears two or more force action flags for the given VCPU.
749 *
750 * @param pVCpu The cross context virtual CPU structure.
751 * @param fFlags The flags to clear.
752 */
753#ifdef VMCPU_WITH_64_BIT_FFS
754# if ARCH_BITS > 32
755# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
756 do { ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
757# else
758# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) do { \
759 if (!((fFlags) >> 32)) ASMAtomicAndU32((uint32_t volatile *)&(pVCpu)->fLocalForcedActions, ~(uint32_t)(fFlags)); \
760 else ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); \
761 } while (0)
762# endif
763#else
764# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
765 do { ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
766#endif
767
768/** @def VM_FF_IS_SET
769 * Checks if single a force action flag is set.
770 *
771 * @param pVM The cross context VM structure.
772 * @param fFlag The flag to check.
773 * @sa VM_FF_IS_ANY_SET
774 */
775#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
776# define VM_FF_IS_SET(pVM, fFlag) RT_BOOL((pVM)->fGlobalForcedActions & (fFlag))
777#else
778# define VM_FF_IS_SET(pVM, fFlag) \
779 ([](PVM a_pVM) -> bool \
780 { \
781 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
782 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
783 return RT_BOOL(a_pVM->fGlobalForcedActions & (fFlag)); \
784 }(pVM))
785#endif
786
787/** @def VMCPU_FF_IS_SET
788 * Checks if a single force action flag is set for the given VCPU.
789 *
790 * @param pVCpu The cross context virtual CPU structure.
791 * @param fFlag The flag to check.
792 * @sa VMCPU_FF_IS_ANY_SET
793 */
794#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
795# define VMCPU_FF_IS_SET(pVCpu, fFlag) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlag))
796#else
797# define VMCPU_FF_IS_SET(pVCpu, fFlag) \
798 ([](PCVMCPU a_pVCpu) -> bool \
799 { \
800 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
801 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
802 return RT_BOOL(a_pVCpu->fLocalForcedActions & (fFlag)); \
803 }(pVCpu))
804#endif
805
806/** @def VM_FF_IS_ANY_SET
807 * Checks if one or more force action in the specified set is pending.
808 *
809 * @param pVM The cross context VM structure.
810 * @param fFlags The flags to check for.
811 * @sa VM_FF_IS_SET
812 */
813#define VM_FF_IS_ANY_SET(pVM, fFlags) RT_BOOL((pVM)->fGlobalForcedActions & (fFlags))
814
815/** @def VMCPU_FF_IS_ANY_SET
816 * Checks if two or more force action flags in the specified set is set for the given VCPU.
817 *
818 * @param pVCpu The cross context virtual CPU structure.
819 * @param fFlags The flags to check for.
820 * @sa VMCPU_FF_IS_SET
821 */
822#define VMCPU_FF_IS_ANY_SET(pVCpu, fFlags) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlags))
823
824/** @def VM_FF_TEST_AND_CLEAR
825 * Checks if one (!) force action in the specified set is pending and clears it atomically
826 *
827 * @returns true if the bit was set.
828 * @returns false if the bit was clear.
829 * @param pVM The cross context VM structure.
830 * @param fFlag Flag constant to check and clear (_BIT is appended).
831 */
832#define VM_FF_TEST_AND_CLEAR(pVM, fFlag) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, fFlag##_BIT))
833
834/** @def VMCPU_FF_TEST_AND_CLEAR
835 * Checks if one (!) force action in the specified set is pending and clears it atomically
836 *
837 * @returns true if the bit was set.
838 * @returns false if the bit was clear.
839 * @param pVCpu The cross context virtual CPU structure.
840 * @param fFlag Flag constant to check and clear (_BIT is appended).
841 */
842#define VMCPU_FF_TEST_AND_CLEAR(pVCpu, fFlag) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT))
843
844/** @def VM_FF_IS_PENDING_EXCEPT
845 * Checks if one or more force action in the specified set is pending while one
846 * or more other ones are not.
847 *
848 * @param pVM The cross context VM structure.
849 * @param fFlags The flags to check for.
850 * @param fExcpt The flags that should not be set.
851 */
852#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) \
853 ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
854
855/** @def VM_IS_EMT
856 * Checks if the current thread is the emulation thread (EMT).
857 *
858 * @remark The ring-0 variation will need attention if we expand the ring-0
859 * code to let threads other than EMT mess around with the VM.
860 */
861#ifdef IN_RC
862# define VM_IS_EMT(pVM) true
863#else
864# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
865#endif
866
867/** @def VMCPU_IS_EMT
868 * Checks if the current thread is the emulation thread (EMT) for the specified
869 * virtual CPU.
870 */
871#ifdef IN_RC
872# define VMCPU_IS_EMT(pVCpu) true
873#else
874# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
875#endif
876
877/** @def VM_ASSERT_EMT
878 * Asserts that the current thread IS the emulation thread (EMT).
879 */
880#ifdef IN_RC
881# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
882#elif defined(IN_RING0)
883# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
884#else
885# define VM_ASSERT_EMT(pVM) \
886 AssertMsg(VM_IS_EMT(pVM), \
887 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
888#endif
889
890/** @def VMCPU_ASSERT_EMT
891 * Asserts that the current thread IS the emulation thread (EMT) of the
892 * specified virtual CPU.
893 */
894#ifdef IN_RC
895# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
896#elif defined(IN_RING0)
897# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
898 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%u\n", \
899 RTThreadNativeSelf(), (pVCpu) ? (pVCpu)->hNativeThreadR0 : 0, \
900 (pVCpu) ? (pVCpu)->idCpu : 0))
901#else
902# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
903 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
904 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
905#endif
906
907/** @def VM_ASSERT_EMT_RETURN
908 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
909 */
910#ifdef IN_RC
911# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
912#elif defined(IN_RING0)
913# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
914#else
915# define VM_ASSERT_EMT_RETURN(pVM, rc) \
916 AssertMsgReturn(VM_IS_EMT(pVM), \
917 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
918 (rc))
919#endif
920
921/** @def VMCPU_ASSERT_EMT_RETURN
922 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
923 */
924#ifdef IN_RC
925# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
926#elif defined(IN_RING0)
927# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
928#else
929# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
930 AssertMsgReturn(VMCPU_IS_EMT(pVCpu), \
931 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
932 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
933 (rc))
934#endif
935
936/** @def VMCPU_ASSERT_EMT_OR_GURU
937 * Asserts that the current thread IS the emulation thread (EMT) of the
938 * specified virtual CPU.
939 */
940#if defined(IN_RC) || defined(IN_RING0)
941# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) Assert( VMCPU_IS_EMT(pVCpu) \
942 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
943 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS )
944#else
945# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) \
946 AssertMsg( VMCPU_IS_EMT(pVCpu) \
947 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
948 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS, \
949 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
950 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
951#endif
952
953/** @def VMCPU_ASSERT_EMT_OR_NOT_RUNNING
954 * Asserts that the current thread IS the emulation thread (EMT) of the
955 * specified virtual CPU or the VM is not running.
956 */
957#if defined(IN_RC) || defined(IN_RING0)
958# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
959 Assert( VMCPU_IS_EMT(pVCpu) \
960 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)) )
961#else
962# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
963 AssertMsg( VMCPU_IS_EMT(pVCpu) \
964 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)), \
965 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
966 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
967#endif
968
969/** @def VMSTATE_IS_RUNNING
970 * Checks if the given state indicates a running VM.
971 */
972#define VMSTATE_IS_RUNNING(a_enmVMState) \
973 ( (enmVMState) == VMSTATE_RUNNING \
974 || (enmVMState) == VMSTATE_RUNNING_LS )
975
976/** @def VM_IS_RUNNING_FOR_ASSERTIONS_ONLY
977 * Checks if the VM is running.
978 * @note This is only for pure debug assertions. No AssertReturn or similar!
979 * @sa VMSTATE_IS_RUNNING
980 */
981#define VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM) \
982 ( (pVM)->enmVMState == VMSTATE_RUNNING \
983 || (pVM)->enmVMState == VMSTATE_RUNNING_LS )
984
985/** @def VM_ASSERT_IS_NOT_RUNNING
986 * Asserts that the VM is not running.
987 */
988#if defined(IN_RC) || defined(IN_RING0)
989#define VM_ASSERT_IS_NOT_RUNNING(pVM) Assert(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM))
990#else
991#define VM_ASSERT_IS_NOT_RUNNING(pVM) AssertMsg(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM), \
992 ("VM is running. enmVMState=%d\n", (pVM)->enmVMState))
993#endif
994
995/** @def VM_ASSERT_EMT0
996 * Asserts that the current thread IS emulation thread \#0 (EMT0).
997 */
998#ifdef IN_RING3
999# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT((a_pVM)->apCpusR3[0])
1000#else
1001# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT(&(a_pVM)->aCpus[0])
1002#endif
1003
1004/** @def VM_ASSERT_EMT0_RETURN
1005 * Asserts that the current thread IS emulation thread \#0 (EMT0) and returns if
1006 * it isn't.
1007 */
1008#ifdef IN_RING3
1009# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN((pVM)->apCpusR3[0], (rc))
1010#else
1011# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN(&(pVM)->aCpus[0], (rc))
1012#endif
1013
1014
1015/**
1016 * Asserts that the current thread is NOT the emulation thread.
1017 */
1018#define VM_ASSERT_OTHER_THREAD(pVM) \
1019 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
1020
1021
1022/** @def VM_ASSERT_STATE
1023 * Asserts a certain VM state.
1024 */
1025#define VM_ASSERT_STATE(pVM, _enmState) \
1026 AssertMsg((pVM)->enmVMState == (_enmState), \
1027 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
1028
1029/** @def VM_ASSERT_STATE_RETURN
1030 * Asserts a certain VM state and returns if it doesn't match.
1031 */
1032#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
1033 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
1034 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
1035 (rc))
1036
1037/** @def VM_IS_VALID_EXT
1038 * Asserts a the VM handle is valid for external access, i.e. not being destroy
1039 * or terminated. */
1040#define VM_IS_VALID_EXT(pVM) \
1041 ( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
1042 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
1043 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
1044 && VM_IS_EMT(pVM))) )
1045
1046/** @def VM_ASSERT_VALID_EXT_RETURN
1047 * Asserts a the VM handle is valid for external access, i.e. not being
1048 * destroy or terminated.
1049 */
1050#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
1051 AssertMsgReturn(VM_IS_VALID_EXT(pVM), \
1052 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
1053 ? VMGetStateName(pVM->enmVMState) : ""), \
1054 (rc))
1055
1056/** @def VMCPU_ASSERT_VALID_EXT_RETURN
1057 * Asserts a the VMCPU handle is valid for external access, i.e. not being
1058 * destroy or terminated.
1059 */
1060#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
1061 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
1062 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
1063 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
1064 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
1065 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
1066 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
1067 (rc))
1068
1069#endif /* !VBOX_FOR_DTRACE_LIB */
1070
1071
1072/**
1073 * Helper that HM and NEM uses for safely modifying VM::bMainExecutionEngine.
1074 *
1075 * ONLY HM and NEM MAY USE THIS!
1076 *
1077 * @param a_pVM The cross context VM structure.
1078 * @param a_bValue The new value.
1079 * @internal
1080 */
1081#define VM_SET_MAIN_EXECUTION_ENGINE(a_pVM, a_bValue) \
1082 do { \
1083 *const_cast<uint8_t *>(&(a_pVM)->bMainExecutionEngine) = (a_bValue); \
1084 ASMCompilerBarrier(); /* just to be on the safe side */ \
1085 } while (0)
1086
1087/**
1088 * Checks whether raw-mode is used.
1089 *
1090 * @retval true if either is used.
1091 * @retval false if software virtualization (raw-mode) is used.
1092 *
1093 * @param a_pVM The cross context VM structure.
1094 * @sa VM_IS_HM_OR_NEM_ENABLED, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1095 * @internal
1096 */
1097#ifdef VBOX_WITH_RAW_MODE
1098# define VM_IS_RAW_MODE_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_RAW_MODE)
1099#else
1100# define VM_IS_RAW_MODE_ENABLED(a_pVM) (false)
1101#endif
1102
1103/**
1104 * Checks whether HM (VT-x/AMD-V) or NEM is being used by this VM.
1105 *
1106 * @retval true if either is used.
1107 * @retval false if software virtualization (raw-mode) is used.
1108 *
1109 * @param a_pVM The cross context VM structure.
1110 * @sa VM_IS_RAW_MODE_ENABLED, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1111 * @internal
1112 */
1113#define VM_IS_HM_OR_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine != VM_EXEC_ENGINE_RAW_MODE)
1114
1115/**
1116 * Checks whether HM is being used by this VM.
1117 *
1118 * @retval true if HM (VT-x/AMD-v) is used.
1119 * @retval false if not.
1120 *
1121 * @param a_pVM The cross context VM structure.
1122 * @sa VM_IS_NEM_ENABLED, VM_IS_RAW_MODE_ENABLED, VM_IS_HM_OR_NEM_ENABLED.
1123 * @internal
1124 */
1125#define VM_IS_HM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT)
1126
1127/**
1128 * Checks whether NEM is being used by this VM.
1129 *
1130 * @retval true if a native hypervisor API is used.
1131 * @retval false if not.
1132 *
1133 * @param a_pVM The cross context VM structure.
1134 * @sa VM_IS_HM_ENABLED, VM_IS_RAW_MODE_ENABLED, VM_IS_HM_OR_NEM_ENABLED.
1135 * @internal
1136 */
1137#define VM_IS_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
1138
1139
1140/**
1141 * The cross context VM structure.
1142 *
1143 * It contains all the VM data which have to be available in all contexts.
1144 * Even if it contains all the data the idea is to use APIs not to modify all
1145 * the members all around the place. Therefore we make use of unions to hide
1146 * everything which isn't local to the current source module. This means we'll
1147 * have to pay a little bit of attention when adding new members to structures
1148 * in the unions and make sure to keep the padding sizes up to date.
1149 *
1150 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
1151 */
1152typedef struct VM
1153{
1154 /** The state of the VM.
1155 * This field is read only to everyone except the VM and EM. */
1156 VMSTATE volatile enmVMState;
1157 /** Forced action flags.
1158 * See the VM_FF_* \#defines. Updated atomically.
1159 */
1160 volatile uint32_t fGlobalForcedActions;
1161 /** Pointer to the array of page descriptors for the VM structure allocation. */
1162 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
1163 /** Session handle. For use when calling SUPR0 APIs. */
1164#ifdef IN_RING0
1165 PSUPDRVSESSION pSessionUnsafe;
1166#else
1167 PSUPDRVSESSION pSession;
1168#endif
1169 /** Pointer to the ring-3 VM structure. */
1170 PUVM pUVM;
1171 /** Ring-3 Host Context VM Pointer. */
1172#ifdef IN_RING0
1173 R3PTRTYPE(struct VM *) pVMR3Unsafe;
1174#else
1175 R3PTRTYPE(struct VM *) pVMR3;
1176#endif
1177 /** Ring-0 Host Context VM pointer for making ring-0 calls. */
1178 R0PTRTYPE(struct VM *) pVMR0ForCall;
1179 /** Raw-mode Context VM Pointer. */
1180 uint32_t pVMRC;
1181 /** Padding for new raw-mode (long mode). */
1182 uint32_t pVMRCPadding;
1183
1184 /** The GVM VM handle. Only the GVM should modify this field. */
1185#ifdef IN_RING0
1186 uint32_t hSelfUnsafe;
1187#else
1188 uint32_t hSelf;
1189#endif
1190 /** Number of virtual CPUs. */
1191#ifdef IN_RING0
1192 uint32_t cCpusUnsafe;
1193#else
1194 uint32_t cCpus;
1195#endif
1196 /** CPU excution cap (1-100) */
1197 uint32_t uCpuExecutionCap;
1198
1199 /** Size of the VM structure. */
1200 uint32_t cbSelf;
1201 /** Size of the VMCPU structure. */
1202 uint32_t cbVCpu;
1203 /** Structure version number (TBD). */
1204 uint32_t uStructVersion;
1205
1206 /** @name Various items that are frequently accessed.
1207 * @{ */
1208 /** The main execution engine, VM_EXEC_ENGINE_XXX.
1209 * This is set early during vmR3InitRing3 by HM or NEM. */
1210 uint8_t const bMainExecutionEngine;
1211
1212 /** Hardware VM support is available and enabled.
1213 * Determined very early during init.
1214 * This is placed here for performance reasons.
1215 * @todo obsoleted by bMainExecutionEngine, eliminate. */
1216 bool fHMEnabled;
1217
1218 /** Large page enabled flag.
1219 * @todo This doesn't need to be here, PGM should store it in it's own
1220 * structures instead. */
1221 bool fUseLargePages;
1222 /** @} */
1223
1224 /** Alignment padding. */
1225 uint8_t uPadding1[5];
1226
1227 /** @name Debugging
1228 * @{ */
1229 /** Ring-3 Host Context VM Pointer. */
1230 R3PTRTYPE(RTTRACEBUF) hTraceBufR3;
1231 /** Ring-0 Host Context VM Pointer. */
1232 R0PTRTYPE(RTTRACEBUF) hTraceBufR0;
1233 /** @} */
1234
1235 /** Max EMT hash lookup collisions (in GVMM). */
1236 uint8_t cMaxEmtHashCollisions;
1237
1238 /** Padding - the unions must be aligned on a 64 bytes boundary. */
1239 uint8_t abAlignment3[HC_ARCH_BITS == 64 ? 23 : 51];
1240
1241 /** CPUM part. */
1242 union
1243 {
1244#ifdef VMM_INCLUDED_SRC_include_CPUMInternal_h
1245 struct CPUM s;
1246#endif
1247#ifdef VBOX_INCLUDED_vmm_cpum_h
1248 /** Read only info exposed about the host and guest CPUs. */
1249 struct
1250 {
1251 /** Padding for hidden fields. */
1252 uint8_t abHidden0[64];
1253 /** Host CPU feature information. */
1254 CPUMFEATURES HostFeatures;
1255 /** Guest CPU feature information. */
1256 CPUMFEATURES GuestFeatures;
1257 } const ro;
1258#endif
1259 /** @todo this is rather bloated because of static MSR range allocation.
1260 * Probably a good idea to move it to a separate R0 allocation... */
1261 uint8_t padding[8832 + 128*8192]; /* multiple of 64 */
1262 } cpum;
1263
1264 /** VMM part. */
1265 union
1266 {
1267#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
1268 struct VMM s;
1269#endif
1270 uint8_t padding[1600]; /* multiple of 64 */
1271 } vmm;
1272
1273 /** PGM part. */
1274 union
1275 {
1276#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
1277 struct PGM s;
1278#endif
1279 uint8_t padding[21120]; /* multiple of 64 */
1280 } pgm;
1281
1282 /** HM part. */
1283 union
1284 {
1285#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
1286 struct HM s;
1287#endif
1288 uint8_t padding[5504]; /* multiple of 64 */
1289 } hm;
1290
1291 /** TRPM part. */
1292 union
1293 {
1294#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
1295 struct TRPM s;
1296#endif
1297 uint8_t padding[2048]; /* multiple of 64 */
1298 } trpm;
1299
1300 /** SELM part. */
1301 union
1302 {
1303#ifdef VMM_INCLUDED_SRC_include_SELMInternal_h
1304 struct SELM s;
1305#endif
1306 uint8_t padding[768]; /* multiple of 64 */
1307 } selm;
1308
1309 /** MM part. */
1310 union
1311 {
1312#ifdef VMM_INCLUDED_SRC_include_MMInternal_h
1313 struct MM s;
1314#endif
1315 uint8_t padding[192]; /* multiple of 64 */
1316 } mm;
1317
1318 /** PDM part. */
1319 union
1320 {
1321#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
1322 struct PDM s;
1323#endif
1324 uint8_t padding[8320]; /* multiple of 64 */
1325 } pdm;
1326
1327 /** IOM part. */
1328 union
1329 {
1330#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
1331 struct IOM s;
1332#endif
1333 uint8_t padding[1152]; /* multiple of 64 */
1334 } iom;
1335
1336 /** EM part. */
1337 union
1338 {
1339#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
1340 struct EM s;
1341#endif
1342 uint8_t padding[256]; /* multiple of 64 */
1343 } em;
1344
1345 /** NEM part. */
1346 union
1347 {
1348#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
1349 struct NEM s;
1350#endif
1351 uint8_t padding[128]; /* multiple of 64 */
1352 } nem;
1353
1354 /** TM part. */
1355 union
1356 {
1357#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
1358 struct TM s;
1359#endif
1360 uint8_t padding[10112]; /* multiple of 64 */
1361 } tm;
1362
1363 /** DBGF part. */
1364 union
1365 {
1366#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
1367 struct DBGF s;
1368#endif
1369#ifdef VBOX_INCLUDED_vmm_dbgf_h
1370 /** Read only info exposed about interrupt breakpoints and selected events. */
1371 struct
1372 {
1373 /** Bitmap of enabled hardware interrupt breakpoints. */
1374 uint32_t bmHardIntBreakpoints[256 / 32];
1375 /** Bitmap of enabled software interrupt breakpoints. */
1376 uint32_t bmSoftIntBreakpoints[256 / 32];
1377 /** Bitmap of selected events.
1378 * This includes non-selectable events too for simplicity, we maintain the
1379 * state for some of these, as it may come in handy. */
1380 uint64_t bmSelectedEvents[(DBGFEVENT_END + 63) / 64];
1381 /** Enabled hardware interrupt breakpoints. */
1382 uint32_t cHardIntBreakpoints;
1383 /** Enabled software interrupt breakpoints. */
1384 uint32_t cSoftIntBreakpoints;
1385 /** The number of selected events. */
1386 uint32_t cSelectedEvents;
1387 /** The number of enabled hardware breakpoints. */
1388 uint8_t cEnabledHwBreakpoints;
1389 /** The number of enabled hardware I/O breakpoints. */
1390 uint8_t cEnabledHwIoBreakpoints;
1391 uint8_t au8Alignment1[2]; /**< Alignment padding. */
1392 /** The number of enabled INT3 breakpoints. */
1393 uint32_t volatile cEnabledInt3Breakpoints;
1394 } const ro;
1395#endif
1396 uint8_t padding[2432]; /* multiple of 64 */
1397 } dbgf;
1398
1399 /** SSM part. */
1400 union
1401 {
1402#ifdef VMM_INCLUDED_SRC_include_SSMInternal_h
1403 struct SSM s;
1404#endif
1405 uint8_t padding[128]; /* multiple of 64 */
1406 } ssm;
1407
1408 union
1409 {
1410#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
1411 struct GIM s;
1412#endif
1413 uint8_t padding[448]; /* multiple of 64 */
1414 } gim;
1415
1416 union
1417 {
1418#ifdef VMM_INCLUDED_SRC_include_APICInternal_h
1419 struct APIC s;
1420#endif
1421 uint8_t padding[128]; /* multiple of 8 */
1422 } apic;
1423
1424 /* ---- begin small stuff ---- */
1425
1426 /** VM part. */
1427 union
1428 {
1429#ifdef VMM_INCLUDED_SRC_include_VMInternal_h
1430 struct VMINT s;
1431#endif
1432 uint8_t padding[32]; /* multiple of 8 */
1433 } vm;
1434
1435 /** CFGM part. */
1436 union
1437 {
1438#ifdef VMM_INCLUDED_SRC_include_CFGMInternal_h
1439 struct CFGM s;
1440#endif
1441 uint8_t padding[8]; /* multiple of 8 */
1442 } cfgm;
1443
1444 /** Statistics for ring-0 only components. */
1445 struct
1446 {
1447 /** GMMR0 stats. */
1448 struct
1449 {
1450 /** Chunk TLB hits. */
1451 uint64_t cChunkTlbHits;
1452 /** Chunk TLB misses. */
1453 uint64_t cChunkTlbMisses;
1454 } gmm;
1455 uint64_t au64Padding[6]; /* probably more comming here... */
1456 } R0Stats;
1457
1458 /** Padding for aligning the structure size on a page boundrary. */
1459 uint8_t abAlignment2[2136 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
1460
1461 /* ---- end small stuff ---- */
1462
1463 /** Array of VMCPU ring-3 pointers. */
1464 PVMCPUR3 apCpusR3[VMM_MAX_CPU_COUNT];
1465} VM;
1466
1467
1468#ifdef IN_RC
1469RT_C_DECLS_BEGIN
1470
1471/** The VM structure.
1472 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1473 * globals which we should avoid using.
1474 */
1475extern DECLIMPORT(VM) g_VM;
1476
1477/** The VMCPU structure for virtual CPU \#0.
1478 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1479 * globals which we should avoid using.
1480 */
1481extern DECLIMPORT(VMCPU) g_VCpu0;
1482
1483RT_C_DECLS_END
1484#endif
1485
1486/** @} */
1487
1488#endif /* !VBOX_INCLUDED_vmm_vm_h */
1489
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette