VirtualBox

source: vbox/trunk/include/VBox/vmm/vm.h@ 99582

Last change on this file since 99582 was 99576, checked in by vboxsync, 20 months ago

VMM: Preparations for getting interrupts injected into the guest. With ARMv8 there are two types of interrupts (normal interrupts and fast interrupts) which need to be mapped to forced action flags. Because the PIC and APIC flags are not needed those are mapped to IRQs and FIQs on ARM respectively, bugref:10389

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 60.1 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_vm_h
37#define VBOX_INCLUDED_vmm_vm_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#ifndef VBOX_FOR_DTRACE_LIB
43# ifndef USING_VMM_COMMON_DEFS
44# error "Compile job does not include VMM_COMMON_DEFS from src/VBox/VMM/Config.kmk - make sure you really need to include this file!"
45# endif
46# include <iprt/param.h>
47# include <VBox/param.h>
48# include <VBox/types.h>
49# include <VBox/vmm/cpum.h>
50# include <VBox/vmm/stam.h>
51# include <VBox/vmm/vmapi.h>
52# include <VBox/vmm/vmm.h>
53# include <VBox/sup.h>
54#else
55# pragma D depends_on library vbox-types.d
56# pragma D depends_on library CPUMInternal.d
57# define VMM_INCLUDED_SRC_include_CPUMInternal_h
58#endif
59
60
61
62/** @defgroup grp_vm The Virtual Machine
63 * @ingroup grp_vmm
64 * @{
65 */
66
67/**
68 * The state of a Virtual CPU.
69 *
70 * The basic state indicated here is whether the CPU has been started or not. In
71 * addition, there are sub-states when started for assisting scheduling (GVMM
72 * mostly).
73 *
74 * The transition out of the STOPPED state is done by a vmR3PowerOn.
75 * The transition back to the STOPPED state is done by vmR3PowerOff.
76 *
77 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
78 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
79 */
80typedef enum VMCPUSTATE
81{
82 /** The customary invalid zero. */
83 VMCPUSTATE_INVALID = 0,
84
85 /** Virtual CPU has not yet been started. */
86 VMCPUSTATE_STOPPED,
87
88 /** CPU started. */
89 VMCPUSTATE_STARTED,
90 /** CPU started in HM context. */
91 VMCPUSTATE_STARTED_HM,
92 /** Executing guest code and can be poked (RC or STI bits of HM). */
93 VMCPUSTATE_STARTED_EXEC,
94 /** Executing guest code using NEM. */
95 VMCPUSTATE_STARTED_EXEC_NEM,
96 VMCPUSTATE_STARTED_EXEC_NEM_WAIT,
97 VMCPUSTATE_STARTED_EXEC_NEM_CANCELED,
98 /** Halted. */
99 VMCPUSTATE_STARTED_HALTED,
100
101 /** The end of valid virtual CPU states. */
102 VMCPUSTATE_END,
103
104 /** Ensure 32-bit type. */
105 VMCPUSTATE_32BIT_HACK = 0x7fffffff
106} VMCPUSTATE;
107
108/** Enables 64-bit FFs. */
109#define VMCPU_WITH_64_BIT_FFS
110
111
112/**
113 * The cross context virtual CPU structure.
114 *
115 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
116 */
117typedef struct VMCPU
118{
119 /** @name Volatile per-cpu data.
120 * @{ */
121 /** Per CPU forced action.
122 * See the VMCPU_FF_* \#defines. Updated atomically. */
123#ifdef VMCPU_WITH_64_BIT_FFS
124 uint64_t volatile fLocalForcedActions;
125#else
126 uint32_t volatile fLocalForcedActions;
127 uint32_t fForLocalForcedActionsExpansion;
128#endif
129 /** The CPU state. */
130 VMCPUSTATE volatile enmState;
131
132 /** Padding up to 64 bytes. */
133 uint8_t abAlignment0[64 - 12];
134 /** @} */
135
136 /** IEM part.
137 * @remarks This comes first as it allows the use of 8-bit immediates for the
138 * first 64 bytes of the structure, reducing code size a wee bit. */
139#if defined(VMM_INCLUDED_SRC_include_IEMInternal_h) || defined(VMM_INCLUDED_SRC_include_IEMInternal_armv8_h) /* For PDB hacking. */
140 union VMCPUUNIONIEMFULL
141#else
142 union VMCPUUNIONIEMSTUB
143#endif
144 {
145#if defined(VMM_INCLUDED_SRC_include_IEMInternal_h) || defined(VMM_INCLUDED_SRC_include_IEMInternal_armv8_h)
146 struct IEMCPU s;
147#endif
148 uint8_t padding[32832]; /* multiple of 64 */
149 } iem;
150
151 /** @name Static per-cpu data.
152 * (Putting this after IEM, hoping that it's less frequently used than it.)
153 * @{ */
154 /** Ring-3 Host Context VM Pointer. */
155 PVMR3 pVMR3;
156 /** Ring-0 Host Context VM Pointer, currently used by VTG/dtrace. */
157 RTR0PTR pVCpuR0ForVtg;
158 /** Raw-mode Context VM Pointer. */
159 uint32_t pVMRC;
160 /** Padding for new raw-mode (long mode). */
161 uint32_t pVMRCPadding;
162 /** Pointer to the ring-3 UVMCPU structure. */
163 PUVMCPU pUVCpu;
164 /** The native thread handle. */
165 RTNATIVETHREAD hNativeThread;
166 /** The native R0 thread handle. (different from the R3 handle!) */
167 RTNATIVETHREAD hNativeThreadR0;
168 /** The IPRT thread handle (for VMMDevTesting). */
169 RTTHREAD hThread;
170 /** The CPU ID.
171 * This is the index into the VM::aCpu array. */
172#ifdef IN_RING0
173 VMCPUID idCpuUnsafe;
174#else
175 VMCPUID idCpu;
176#endif
177
178 /** Align the structures below bit on a 64-byte boundary and make sure it starts
179 * at the same offset in both 64-bit and 32-bit builds.
180 *
181 * @remarks The alignments of the members that are larger than 48 bytes should be
182 * 64-byte for cache line reasons. structs containing small amounts of
183 * data could be lumped together at the end with a < 64 byte padding
184 * following it (to grow into and align the struct size).
185 */
186 uint8_t abAlignment1[64 - 6 * (HC_ARCH_BITS == 32 ? 4 : 8) - 8 - 4];
187 /** @} */
188
189 /** HM part. */
190 union VMCPUUNIONHM
191 {
192#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
193 struct HMCPU s;
194#endif
195 uint8_t padding[9984]; /* multiple of 64 */
196 } hm;
197
198 /** NEM part. */
199 union VMCPUUNIONNEM
200 {
201#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
202 struct NEMCPU s;
203#endif
204 uint8_t padding[4608]; /* multiple of 64 */
205 } nem;
206
207 /** TRPM part. */
208 union VMCPUUNIONTRPM
209 {
210#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
211 struct TRPMCPU s;
212#endif
213 uint8_t padding[128]; /* multiple of 64 */
214 } trpm;
215
216 /** TM part. */
217 union VMCPUUNIONTM
218 {
219#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
220 struct TMCPU s;
221#endif
222 uint8_t padding[5760]; /* multiple of 64 */
223 } tm;
224
225 /** VMM part. */
226 union VMCPUUNIONVMM
227 {
228#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
229 struct VMMCPU s;
230#endif
231 uint8_t padding[9536]; /* multiple of 64 */
232 } vmm;
233
234 /** PDM part. */
235 union VMCPUUNIONPDM
236 {
237#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
238 struct PDMCPU s;
239#endif
240 uint8_t padding[256]; /* multiple of 64 */
241 } pdm;
242
243 /** IOM part. */
244 union VMCPUUNIONIOM
245 {
246#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
247 struct IOMCPU s;
248#endif
249 uint8_t padding[512]; /* multiple of 64 */
250 } iom;
251
252 /** DBGF part.
253 * @todo Combine this with other tiny structures. */
254 union VMCPUUNIONDBGF
255 {
256#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
257 struct DBGFCPU s;
258#endif
259 uint8_t padding[512]; /* multiple of 64 */
260 } dbgf;
261
262 /** GIM part. */
263 union VMCPUUNIONGIM
264 {
265#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
266 struct GIMCPU s;
267#endif
268 uint8_t padding[512]; /* multiple of 64 */
269 } gim;
270
271#if defined(VBOX_VMM_TARGET_ARMV8)
272 /** GIC part. */
273 union VMCPUUNIONGIC
274 {
275# ifdef VMM_INCLUDED_SRC_include_GICInternal_h
276 struct GICCPU s;
277# endif
278 uint8_t padding[3840]; /* multiple of 64 */
279 } gic;
280#else
281 /** APIC part. */
282 union VMCPUUNIONAPIC
283 {
284# ifdef VMM_INCLUDED_SRC_include_APICInternal_h
285 struct APICCPU s;
286# endif
287 uint8_t padding[3840]; /* multiple of 64 */
288 } apic;
289#endif
290
291 /*
292 * Some less frequently used global members that doesn't need to take up
293 * precious space at the head of the structure.
294 */
295
296 /** Trace groups enable flags. */
297 uint32_t fTraceGroups; /* 64 / 44 */
298 /** Number of collisions hashing the ring-0 EMT handle. */
299 uint8_t cEmtHashCollisions;
300 uint8_t abAdHoc[3];
301 /** Profiling samples for use by ad hoc profiling. */
302 STAMPROFILEADV aStatAdHoc[8]; /* size: 40*8 = 320 */
303
304 /** Align the following members on page boundary. */
305 uint8_t abAlignment2[696];
306
307 /** PGM part. */
308 union VMCPUUNIONPGM
309 {
310#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
311 struct PGMCPU s;
312#endif
313 uint8_t padding[4096 + 28672]; /* multiple of 4096 */
314 } pgm;
315
316 /** CPUM part. */
317 union VMCPUUNIONCPUM
318 {
319#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
320 struct CPUMCPU s;
321#endif
322#ifdef VMCPU_INCL_CPUM_GST_CTX
323 /** The guest CPUM context for direct use by execution engines.
324 * This is not for general consumption, but for HM, REM, IEM, and maybe a few
325 * others. The rest will use the function based CPUM API. */
326 CPUMCTX GstCtx;
327#endif
328 uint8_t padding[102400]; /* multiple of 4096 */
329 } cpum;
330
331 /** EM part. */
332 union VMCPUUNIONEM
333 {
334#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
335 struct EMCPU s;
336#endif
337 uint8_t padding[40960]; /* multiple of 4096 */
338 } em;
339
340} VMCPU;
341
342
343#ifndef VBOX_FOR_DTRACE_LIB
344/* Make sure the structure size is aligned on a 16384 boundary for arm64 purposes. */
345AssertCompileSizeAlignment(VMCPU, 16384);
346
347/** @name Operations on VMCPU::enmState
348 * @{ */
349/** Gets the VMCPU state. */
350#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
351/** Sets the VMCPU state. */
352#define VMCPU_SET_STATE(pVCpu, enmNewState) \
353 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
354/** Cmpares and sets the VMCPU state. */
355#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
356 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
357/** Checks the VMCPU state. */
358#ifdef VBOX_STRICT
359# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
360 do { \
361 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
362 AssertMsg(enmState == (enmExpectedState), \
363 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
364 enmState, enmExpectedState, (pVCpu)->idCpu)); \
365 } while (0)
366
367# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) \
368 do { \
369 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
370 AssertMsg( enmState == (enmExpectedState) \
371 || enmState == (a_enmExpectedState2), \
372 ("enmState=%d enmExpectedState=%d enmExpectedState2=%d idCpu=%u\n", \
373 enmState, enmExpectedState, a_enmExpectedState2, (pVCpu)->idCpu)); \
374 } while (0)
375#else
376# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) do { } while (0)
377# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) do { } while (0)
378#endif
379/** Tests if the state means that the CPU is started. */
380#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
381/** Tests if the state means that the CPU is stopped. */
382#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
383/** @} */
384
385
386/** The name of the raw-mode context VMM Core module. */
387#define VMMRC_MAIN_MODULE_NAME "VMMRC.rc"
388/** The name of the ring-0 context VMM Core module. */
389#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
390
391
392/** VM Forced Action Flags.
393 *
394 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
395 * action mask of a VM.
396 *
397 * Available VM bits:
398 * 0, 1, 5, 6, 7, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
399 *
400 *
401 * Available VMCPU bits:
402 * 14, 15, 36 to 63
403 *
404 * @todo If we run low on VMCPU, we may consider merging the SELM bits
405 *
406 * @{
407 */
408/** The virtual sync clock has been stopped, go to TM until it has been
409 * restarted... */
410#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(VM_FF_TM_VIRTUAL_SYNC_BIT)
411#define VM_FF_TM_VIRTUAL_SYNC_BIT 2
412/** PDM Queues are pending. */
413#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
414/** The bit number for VM_FF_PDM_QUEUES. */
415#define VM_FF_PDM_QUEUES_BIT 3
416/** PDM DMA transfers are pending. */
417#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
418/** The bit number for VM_FF_PDM_DMA. */
419#define VM_FF_PDM_DMA_BIT 4
420/** This action forces the VM to call DBGF so DBGF can service debugger
421 * requests in the emulation thread.
422 * This action flag stays asserted till DBGF clears it.*/
423#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
424/** The bit number for VM_FF_DBGF. */
425#define VM_FF_DBGF_BIT 8
426/** This action forces the VM to service pending requests from other
427 * thread or requests which must be executed in another context. */
428#define VM_FF_REQUEST RT_BIT_32(VM_FF_REQUEST_BIT)
429#define VM_FF_REQUEST_BIT 9
430/** Check for VM state changes and take appropriate action. */
431#define VM_FF_CHECK_VM_STATE RT_BIT_32(VM_FF_CHECK_VM_STATE_BIT)
432/** The bit number for VM_FF_CHECK_VM_STATE. */
433#define VM_FF_CHECK_VM_STATE_BIT 10
434/** Reset the VM. (postponed) */
435#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
436/** The bit number for VM_FF_RESET. */
437#define VM_FF_RESET_BIT 11
438/** EMT rendezvous in VMM. */
439#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
440/** The bit number for VM_FF_EMT_RENDEZVOUS. */
441#define VM_FF_EMT_RENDEZVOUS_BIT 12
442
443/** PGM needs to allocate handy pages. */
444#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(VM_FF_PGM_NEED_HANDY_PAGES_BIT)
445#define VM_FF_PGM_NEED_HANDY_PAGES_BIT 18
446/** PGM is out of memory.
447 * Abandon all loops and code paths which can be resumed and get up to the EM
448 * loops. */
449#define VM_FF_PGM_NO_MEMORY RT_BIT_32(VM_FF_PGM_NO_MEMORY_BIT)
450#define VM_FF_PGM_NO_MEMORY_BIT 19
451 /** PGM is about to perform a lightweight pool flush
452 * Guest SMP: all EMT threads should return to ring 3
453 */
454#define VM_FF_PGM_POOL_FLUSH_PENDING RT_BIT_32(VM_FF_PGM_POOL_FLUSH_PENDING_BIT)
455#define VM_FF_PGM_POOL_FLUSH_PENDING_BIT 20
456/** Suspend the VM - debug only. */
457#define VM_FF_DEBUG_SUSPEND RT_BIT_32(VM_FF_DEBUG_SUSPEND_BIT)
458#define VM_FF_DEBUG_SUSPEND_BIT 31
459
460
461#if defined(VBOX_VMM_TARGET_ARMV8)
462/** This action forces the VM to inject an IRQ into the guest. */
463# define VMCPU_FF_INTERRUPT_IRQ RT_BIT_64(VMCPU_FF_INTERRUPT_IRQ_BIT)
464# define VMCPU_FF_INTERRUPT_IRQ_BIT 0
465/** This action forces the VM to inject an FIQ into the guest. */
466# define VMCPU_FF_INTERRUPT_FIQ RT_BIT_64(VMCPU_FF_INTERRUPT_FIQ_BIT)
467# define VMCPU_FF_INTERRUPT_FIQ_BIT 1
468#else
469/** This action forces the VM to check any pending interrupts on the APIC. */
470# define VMCPU_FF_INTERRUPT_APIC RT_BIT_64(VMCPU_FF_INTERRUPT_APIC_BIT)
471# define VMCPU_FF_INTERRUPT_APIC_BIT 0
472/** This action forces the VM to check any pending interrups on the PIC. */
473# define VMCPU_FF_INTERRUPT_PIC RT_BIT_64(VMCPU_FF_INTERRUPT_PIC_BIT)
474# define VMCPU_FF_INTERRUPT_PIC_BIT 1
475#endif
476/** This action forces the VM to schedule and run pending timer (TM).
477 * @remarks Don't move - PATM compatibility. */
478#define VMCPU_FF_TIMER RT_BIT_64(VMCPU_FF_TIMER_BIT)
479#define VMCPU_FF_TIMER_BIT 2
480/** This action forces the VM to check any pending NMIs. */
481#define VMCPU_FF_INTERRUPT_NMI RT_BIT_64(VMCPU_FF_INTERRUPT_NMI_BIT)
482#define VMCPU_FF_INTERRUPT_NMI_BIT 3
483/** This action forces the VM to check any pending SMIs. */
484#define VMCPU_FF_INTERRUPT_SMI RT_BIT_64(VMCPU_FF_INTERRUPT_SMI_BIT)
485#define VMCPU_FF_INTERRUPT_SMI_BIT 4
486/** PDM critical section unlocking is pending, process promptly upon return to R3. */
487#define VMCPU_FF_PDM_CRITSECT RT_BIT_64(VMCPU_FF_PDM_CRITSECT_BIT)
488#define VMCPU_FF_PDM_CRITSECT_BIT 5
489/** Special EM internal force flag that is used by EMUnhaltAndWakeUp() to force
490 * the virtual CPU out of the next (/current) halted state. It is not processed
491 * nor cleared by emR3ForcedActions (similar to VMCPU_FF_BLOCK_NMIS), instead it
492 * is cleared the next time EM leaves the HALTED state. */
493#define VMCPU_FF_UNHALT RT_BIT_64(VMCPU_FF_UNHALT_BIT)
494#define VMCPU_FF_UNHALT_BIT 6
495/** Pending IEM action (mask). */
496#define VMCPU_FF_IEM RT_BIT_64(VMCPU_FF_IEM_BIT)
497/** Pending IEM action (bit number). */
498#define VMCPU_FF_IEM_BIT 7
499/** Pending APIC action (bit number). */
500#define VMCPU_FF_UPDATE_APIC_BIT 8
501/** This action forces the VM to update APIC's asynchronously arrived
502 * interrupts as pending interrupts. */
503#define VMCPU_FF_UPDATE_APIC RT_BIT_64(VMCPU_FF_UPDATE_APIC_BIT)
504/** This action forces the VM to service pending requests from other
505 * thread or requests which must be executed in another context. */
506#define VMCPU_FF_REQUEST RT_BIT_64(VMCPU_FF_REQUEST_BIT)
507#define VMCPU_FF_REQUEST_BIT 9
508/** Pending DBGF event (alternative to passing VINF_EM_DBG_EVENT around). */
509#define VMCPU_FF_DBGF RT_BIT_64(VMCPU_FF_DBGF_BIT)
510/** The bit number for VMCPU_FF_DBGF. */
511#define VMCPU_FF_DBGF_BIT 10
512/** Hardware virtualized nested-guest interrupt pending. */
513#define VMCPU_FF_INTERRUPT_NESTED_GUEST RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT)
514#define VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT 11
515/** This action forces PGM to update changes to CR3 when the guest was in HM mode
516 * (when using nested paging). */
517#define VMCPU_FF_HM_UPDATE_CR3 RT_BIT_64(VMCPU_FF_HM_UPDATE_CR3_BIT)
518#define VMCPU_FF_HM_UPDATE_CR3_BIT 12
519/* Bit 13 used to be VMCPU_FF_HM_UPDATE_PAE_PDPES. */
520/** This action forces the VM to resync the page tables before going
521 * back to execute guest code. (GLOBAL FLUSH) */
522#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_BIT)
523#define VMCPU_FF_PGM_SYNC_CR3_BIT 16
524/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
525 * (NON-GLOBAL FLUSH) */
526#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT)
527#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT 17
528/** Check for pending TLB shootdown actions (deprecated)
529 * Reserved for future HM re-use if necessary / safe.
530 * Consumer: HM */
531#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED RT_BIT_64(VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT)
532#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT 18
533/** Check for pending TLB flush action.
534 * Consumer: HM
535 * @todo rename to VMCPU_FF_HM_TLB_FLUSH */
536#define VMCPU_FF_TLB_FLUSH RT_BIT_64(VMCPU_FF_TLB_FLUSH_BIT)
537/** The bit number for VMCPU_FF_TLB_FLUSH. */
538#define VMCPU_FF_TLB_FLUSH_BIT 19
539/* 20 used to be VMCPU_FF_TRPM_SYNC_IDT (raw-mode only). */
540/* 21 used to be VMCPU_FF_SELM_SYNC_TSS (raw-mode only). */
541/* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */
542/* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */
543/* 24 used to be VMCPU_FF_INHIBIT_INTERRUPTS, which moved to CPUMCTX::eflags.uBoth in v7.0.4. */
544/* 25 used to be VMCPU_FF_BLOCK_NMIS, which moved to CPUMCTX::eflags.uBoth in v7.0.4. */
545/** Force return to Ring-3. */
546#define VMCPU_FF_TO_R3 RT_BIT_64(VMCPU_FF_TO_R3_BIT)
547#define VMCPU_FF_TO_R3_BIT 28
548/** Force return to ring-3 to service pending I/O or MMIO write.
549 * This is a backup for mechanism VINF_IOM_R3_IOPORT_COMMIT_WRITE and
550 * VINF_IOM_R3_MMIO_COMMIT_WRITE, allowing VINF_EM_DBG_BREAKPOINT and similar
551 * status codes to be propagated at the same time without loss. */
552#define VMCPU_FF_IOM RT_BIT_64(VMCPU_FF_IOM_BIT)
553#define VMCPU_FF_IOM_BIT 29
554/* 30 used to be VMCPU_FF_CPUM */
555/** VMX-preemption timer expired. */
556#define VMCPU_FF_VMX_PREEMPT_TIMER RT_BIT_64(VMCPU_FF_VMX_PREEMPT_TIMER_BIT)
557#define VMCPU_FF_VMX_PREEMPT_TIMER_BIT 31
558/** Pending MTF (Monitor Trap Flag) event. */
559#define VMCPU_FF_VMX_MTF RT_BIT_64(VMCPU_FF_VMX_MTF_BIT)
560#define VMCPU_FF_VMX_MTF_BIT 32
561/** VMX APIC-write emulation pending.
562 * @todo possible candidate for internal EFLAGS, or maybe just a summary bit
563 * (see also VMCPU_FF_VMX_INT_WINDOW). */
564#define VMCPU_FF_VMX_APIC_WRITE RT_BIT_64(VMCPU_FF_VMX_APIC_WRITE_BIT)
565#define VMCPU_FF_VMX_APIC_WRITE_BIT 33
566/** VMX interrupt-window event pending.
567 *
568 * "Pending" is misleading here, it would be better to say that the event need
569 * to be generated at the next opportunity and that this flag causes it to be
570 * polled for on every instruction boundrary and such.
571 *
572 * @todo Change the IEM side of this to not poll but to track down the places
573 * where it can be generated and set an internal EFLAGS bit that causes it
574 * to be checked out when finishing the current instruction. */
575#define VMCPU_FF_VMX_INT_WINDOW RT_BIT_64(VMCPU_FF_VMX_INT_WINDOW_BIT)
576#define VMCPU_FF_VMX_INT_WINDOW_BIT 34
577/** VMX NMI-window event pending.
578 * Same "pending" comment and todo in VMCPU_FF_VMX_INT_WINDOW. */
579#define VMCPU_FF_VMX_NMI_WINDOW RT_BIT_64(VMCPU_FF_VMX_NMI_WINDOW_BIT)
580#define VMCPU_FF_VMX_NMI_WINDOW_BIT 35
581
582
583/** Externally VM forced actions. Used to quit the idle/wait loop. */
584#define VM_FF_EXTERNAL_SUSPENDED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS )
585/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
586#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK ( VMCPU_FF_REQUEST | VMCPU_FF_DBGF )
587
588/** Externally forced VM actions. Used to quit the idle/wait loop. */
589#define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \
590 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS )
591/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
592#if defined(VBOX_VMM_TARGET_ARMV8)
593# define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \
594 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
595 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF )
596#else
597# define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
598 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
599 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \
600 | VMCPU_FF_INTERRUPT_NESTED_GUEST)
601#endif
602
603/** High priority VM pre-execution actions. */
604#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC \
605 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
606 | VM_FF_EMT_RENDEZVOUS )
607/** High priority VMCPU pre-execution actions. */
608#if defined(VBOX_VMM_TARGET_ARMV8)
609# define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \
610 | VMCPU_FF_DBGF )
611#else
612# define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
613 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_DBGF \
614 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
615 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
616 | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW )
617#endif
618
619/** High priority VM pre raw-mode execution mask. */
620#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY )
621/** High priority VMCPU pre raw-mode execution mask. */
622#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL )
623
624/** High priority post-execution actions. */
625#define VM_FF_HIGH_PRIORITY_POST_MASK ( VM_FF_PGM_NO_MEMORY )
626/** High priority post-execution actions. */
627#define VMCPU_FF_HIGH_PRIORITY_POST_MASK ( VMCPU_FF_PDM_CRITSECT | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_IEM | VMCPU_FF_IOM )
628
629/** Normal priority VM post-execution actions. */
630#define VM_FF_NORMAL_PRIORITY_POST_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET \
631 | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
632/** Normal priority VMCPU post-execution actions. */
633#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK ( VMCPU_FF_DBGF )
634
635/** Normal priority VM actions. */
636#define VM_FF_NORMAL_PRIORITY_MASK ( VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
637/** Normal priority VMCPU actions. */
638#define VMCPU_FF_NORMAL_PRIORITY_MASK ( VMCPU_FF_REQUEST )
639
640/** Flags to clear before resuming guest execution. */
641#define VMCPU_FF_RESUME_GUEST_MASK ( VMCPU_FF_TO_R3 )
642
643
644/** VM flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
645#define VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
646 | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_RESET)
647/** VM flags that cause the REP[|NE|E] STRINS loops to yield. */
648#define VM_FF_YIELD_REPSTR_MASK ( VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
649 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_DBGF | VM_FF_DEBUG_SUSPEND )
650/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
651#ifdef IN_RING3
652# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF \
653 | VMCPU_FF_VMX_MTF )
654#else
655# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_IEM | VMCPU_FF_IOM | VMCPU_FF_PGM_SYNC_CR3 \
656 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_VMX_MTF )
657#endif
658
659#if !defined(VBOX_VMM_TARGET_ARMV8)
660/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
661 * enabled. */
662# define VMCPU_FF_YIELD_REPSTR_MASK ( VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
663 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
664 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_PDM_CRITSECT \
665 | VMCPU_FF_TIMER | VMCPU_FF_REQUEST \
666 | VMCPU_FF_INTERRUPT_NESTED_GUEST )
667/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
668 * disabled. */
669# define VMCPU_FF_YIELD_REPSTR_NOINT_MASK ( VMCPU_FF_YIELD_REPSTR_MASK \
670 & ~( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
671 | VMCPU_FF_INTERRUPT_NESTED_GUEST) )
672#endif
673
674/** VM Flags that cause the HM loops to go back to ring-3. */
675#define VM_FF_HM_TO_R3_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
676 | VM_FF_PDM_QUEUES | VM_FF_EMT_RENDEZVOUS)
677/** VMCPU Flags that cause the HM loops to go back to ring-3. */
678#define VMCPU_FF_HM_TO_R3_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT \
679 | VMCPU_FF_IEM | VMCPU_FF_IOM)
680
681/** High priority ring-0 VM pre HM-mode execution mask. */
682#define VM_FF_HP_R0_PRE_HM_MASK (VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
683/** High priority ring-0 VMCPU pre HM-mode execution mask. */
684#define VMCPU_FF_HP_R0_PRE_HM_MASK ( VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 \
685 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST \
686 | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER)
687/** High priority ring-0 VM pre HM-mode execution mask, single stepping. */
688#define VM_FF_HP_R0_PRE_HM_STEP_MASK (VM_FF_HP_R0_PRE_HM_MASK & ~( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES \
689 | VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST \
690 | VM_FF_PDM_DMA) )
691/** High priority ring-0 VMCPU pre HM-mode execution mask, single stepping. */
692#define VMCPU_FF_HP_R0_PRE_HM_STEP_MASK (VMCPU_FF_HP_R0_PRE_HM_MASK & ~( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER \
693 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_REQUEST) )
694
695/** All the VMX nested-guest flags. */
696#define VMCPU_FF_VMX_ALL_MASK ( VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
697 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW )
698
699/** All the forced VM flags. */
700#define VM_FF_ALL_MASK (UINT32_MAX)
701/** All the forced VMCPU flags. */
702#define VMCPU_FF_ALL_MASK (UINT32_MAX)
703
704/** All the forced VM flags except those related to raw-mode and hardware
705 * assisted execution. */
706#define VM_FF_ALL_REM_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
707/** All the forced VMCPU flags except those related to raw-mode and hardware
708 * assisted execution. */
709#define VMCPU_FF_ALL_REM_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_TLB_FLUSH))
710/** @} */
711
712/** @def VM_FF_SET
713 * Sets a single force action flag.
714 *
715 * @param pVM The cross context VM structure.
716 * @param fFlag The flag to set.
717 */
718#define VM_FF_SET(pVM, fFlag) do { \
719 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
720 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
721 ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
722 } while (0)
723
724/** @def VMCPU_FF_SET
725 * Sets a single force action flag for the given VCPU.
726 *
727 * @param pVCpu The cross context virtual CPU structure.
728 * @param fFlag The flag to set.
729 * @sa VMCPU_FF_SET_MASK
730 */
731#ifdef VMCPU_WITH_64_BIT_FFS
732# define VMCPU_FF_SET(pVCpu, fFlag) do { \
733 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
734 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
735 ASMAtomicBitSet(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
736 } while (0)
737#else
738# define VMCPU_FF_SET(pVCpu, fFlag) do { \
739 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
740 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
741 ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag)); \
742 } while (0)
743#endif
744
745/** @def VMCPU_FF_SET_MASK
746 * Sets a two or more force action flag for the given VCPU.
747 *
748 * @param pVCpu The cross context virtual CPU structure.
749 * @param fFlags The flags to set.
750 * @sa VMCPU_FF_SET
751 */
752#ifdef VMCPU_WITH_64_BIT_FFS
753# if ARCH_BITS > 32
754# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
755 do { ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
756# else
757# define VMCPU_FF_SET_MASK(pVCpu, fFlags) do { \
758 if (!((fFlags) >> 32)) ASMAtomicOrU32((uint32_t volatile *)&pVCpu->fLocalForcedActions, (uint32_t)(fFlags)); \
759 else ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); \
760 } while (0)
761# endif
762#else
763# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
764 do { ASMAtomicOrU32(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
765#endif
766
767/** @def VM_FF_CLEAR
768 * Clears a single force action flag.
769 *
770 * @param pVM The cross context VM structure.
771 * @param fFlag The flag to clear.
772 */
773#define VM_FF_CLEAR(pVM, fFlag) do { \
774 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
775 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
776 ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
777 } while (0)
778
779/** @def VMCPU_FF_CLEAR
780 * Clears a single force action flag for the given VCPU.
781 *
782 * @param pVCpu The cross context virtual CPU structure.
783 * @param fFlag The flag to clear.
784 */
785#ifdef VMCPU_WITH_64_BIT_FFS
786# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
787 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
788 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
789 ASMAtomicBitClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
790 } while (0)
791#else
792# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
793 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
794 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
795 ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag)); \
796 } while (0)
797#endif
798
799/** @def VMCPU_FF_CLEAR_MASK
800 * Clears two or more force action flags for the given VCPU.
801 *
802 * @param pVCpu The cross context virtual CPU structure.
803 * @param fFlags The flags to clear.
804 */
805#ifdef VMCPU_WITH_64_BIT_FFS
806# if ARCH_BITS > 32
807# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
808 do { ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
809# else
810# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) do { \
811 if (!((fFlags) >> 32)) ASMAtomicAndU32((uint32_t volatile *)&(pVCpu)->fLocalForcedActions, ~(uint32_t)(fFlags)); \
812 else ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); \
813 } while (0)
814# endif
815#else
816# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
817 do { ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
818#endif
819
820/** @def VM_FF_IS_SET
821 * Checks if single a force action flag is set.
822 *
823 * @param pVM The cross context VM structure.
824 * @param fFlag The flag to check.
825 * @sa VM_FF_IS_ANY_SET
826 */
827#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
828# define VM_FF_IS_SET(pVM, fFlag) RT_BOOL((pVM)->fGlobalForcedActions & (fFlag))
829#else
830# define VM_FF_IS_SET(pVM, fFlag) \
831 ([](PVM a_pVM) -> bool \
832 { \
833 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
834 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
835 return RT_BOOL(a_pVM->fGlobalForcedActions & (fFlag)); \
836 }(pVM))
837#endif
838
839/** @def VMCPU_FF_IS_SET
840 * Checks if a single force action flag is set for the given VCPU.
841 *
842 * @param pVCpu The cross context virtual CPU structure.
843 * @param fFlag The flag to check.
844 * @sa VMCPU_FF_IS_ANY_SET
845 */
846#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
847# define VMCPU_FF_IS_SET(pVCpu, fFlag) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlag))
848#else
849# define VMCPU_FF_IS_SET(pVCpu, fFlag) \
850 ([](PCVMCPU a_pVCpu) -> bool \
851 { \
852 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
853 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
854 return RT_BOOL(a_pVCpu->fLocalForcedActions & (fFlag)); \
855 }(pVCpu))
856#endif
857
858/** @def VM_FF_IS_ANY_SET
859 * Checks if one or more force action in the specified set is pending.
860 *
861 * @param pVM The cross context VM structure.
862 * @param fFlags The flags to check for.
863 * @sa VM_FF_IS_SET
864 */
865#define VM_FF_IS_ANY_SET(pVM, fFlags) RT_BOOL((pVM)->fGlobalForcedActions & (fFlags))
866
867/** @def VMCPU_FF_IS_ANY_SET
868 * Checks if two or more force action flags in the specified set is set for the given VCPU.
869 *
870 * @param pVCpu The cross context virtual CPU structure.
871 * @param fFlags The flags to check for.
872 * @sa VMCPU_FF_IS_SET
873 */
874#define VMCPU_FF_IS_ANY_SET(pVCpu, fFlags) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlags))
875
876/** @def VM_FF_TEST_AND_CLEAR
877 * Checks if one (!) force action in the specified set is pending and clears it atomically
878 *
879 * @returns true if the bit was set.
880 * @returns false if the bit was clear.
881 * @param pVM The cross context VM structure.
882 * @param fFlag Flag constant to check and clear (_BIT is appended).
883 */
884#define VM_FF_TEST_AND_CLEAR(pVM, fFlag) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, fFlag##_BIT))
885
886/** @def VMCPU_FF_TEST_AND_CLEAR
887 * Checks if one (!) force action in the specified set is pending and clears it atomically
888 *
889 * @returns true if the bit was set.
890 * @returns false if the bit was clear.
891 * @param pVCpu The cross context virtual CPU structure.
892 * @param fFlag Flag constant to check and clear (_BIT is appended).
893 */
894#define VMCPU_FF_TEST_AND_CLEAR(pVCpu, fFlag) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT))
895
896/** @def VM_FF_IS_PENDING_EXCEPT
897 * Checks if one or more force action in the specified set is pending while one
898 * or more other ones are not.
899 *
900 * @param pVM The cross context VM structure.
901 * @param fFlags The flags to check for.
902 * @param fExcpt The flags that should not be set.
903 */
904#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) \
905 ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
906
907/** @def VM_IS_EMT
908 * Checks if the current thread is the emulation thread (EMT).
909 *
910 * @remark The ring-0 variation will need attention if we expand the ring-0
911 * code to let threads other than EMT mess around with the VM.
912 */
913#ifdef IN_RC
914# define VM_IS_EMT(pVM) true
915#else
916# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
917#endif
918
919/** @def VMCPU_IS_EMT
920 * Checks if the current thread is the emulation thread (EMT) for the specified
921 * virtual CPU.
922 */
923#ifdef IN_RC
924# define VMCPU_IS_EMT(pVCpu) true
925#else
926# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
927#endif
928
929/** @def VM_ASSERT_EMT
930 * Asserts that the current thread IS the emulation thread (EMT).
931 */
932#ifdef IN_RC
933# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
934#elif defined(IN_RING0)
935# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
936#else
937# define VM_ASSERT_EMT(pVM) \
938 AssertMsg(VM_IS_EMT(pVM), \
939 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
940#endif
941
942/** @def VMCPU_ASSERT_EMT
943 * Asserts that the current thread IS the emulation thread (EMT) of the
944 * specified virtual CPU.
945 */
946#ifdef IN_RC
947# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
948#elif defined(IN_RING0)
949# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
950 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%u\n", \
951 RTThreadNativeSelf(), (pVCpu) ? (pVCpu)->hNativeThreadR0 : 0, \
952 (pVCpu) ? (pVCpu)->idCpu : 0))
953#else
954# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
955 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
956 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
957#endif
958
959/** @def VM_ASSERT_EMT_RETURN
960 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
961 */
962#ifdef IN_RC
963# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
964#elif defined(IN_RING0)
965# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
966#else
967# define VM_ASSERT_EMT_RETURN(pVM, rc) \
968 AssertMsgReturn(VM_IS_EMT(pVM), \
969 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
970 (rc))
971#endif
972
973/** @def VMCPU_ASSERT_EMT_RETURN
974 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
975 */
976#ifdef IN_RC
977# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
978#elif defined(IN_RING0)
979# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
980#else
981# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
982 AssertMsgReturn(VMCPU_IS_EMT(pVCpu), \
983 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
984 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
985 (rc))
986#endif
987
988/** @def VMCPU_ASSERT_EMT_OR_GURU
989 * Asserts that the current thread IS the emulation thread (EMT) of the
990 * specified virtual CPU.
991 */
992#if defined(IN_RC) || defined(IN_RING0)
993# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) Assert( VMCPU_IS_EMT(pVCpu) \
994 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
995 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS )
996#else
997# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) \
998 AssertMsg( VMCPU_IS_EMT(pVCpu) \
999 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
1000 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS, \
1001 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1002 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
1003#endif
1004
1005/** @def VMCPU_ASSERT_EMT_OR_NOT_RUNNING
1006 * Asserts that the current thread IS the emulation thread (EMT) of the
1007 * specified virtual CPU or the VM is not running.
1008 */
1009#if defined(IN_RC) || defined(IN_RING0)
1010# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
1011 Assert( VMCPU_IS_EMT(pVCpu) \
1012 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)) )
1013#else
1014# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
1015 AssertMsg( VMCPU_IS_EMT(pVCpu) \
1016 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)), \
1017 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1018 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
1019#endif
1020
1021/** @def VMSTATE_IS_RUNNING
1022 * Checks if the given state indicates a running VM.
1023 */
1024#define VMSTATE_IS_RUNNING(a_enmVMState) \
1025 ( (a_enmVMState) == VMSTATE_RUNNING \
1026 || (a_enmVMState) == VMSTATE_RUNNING_LS )
1027
1028/** @def VM_IS_RUNNING_FOR_ASSERTIONS_ONLY
1029 * Checks if the VM is running.
1030 * @note This is only for pure debug assertions. No AssertReturn or similar!
1031 * @sa VMSTATE_IS_RUNNING
1032 */
1033#define VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM) \
1034 ( (pVM)->enmVMState == VMSTATE_RUNNING \
1035 || (pVM)->enmVMState == VMSTATE_RUNNING_LS )
1036
1037
1038/** @def VMSTATE_IS_POWERED_ON
1039 * Checks if the given state indicates the VM is powered on.
1040 *
1041 * @note Excludes all error states, so a powered on VM that hit a fatal error,
1042 * guru meditation, state load failure or similar will not be considered
1043 * powered on by this test.
1044 */
1045#define VMSTATE_IS_POWERED_ON(a_enmVMState) \
1046 ( (a_enmVMState) >= VMSTATE_RESUMING && (a_enmVMState) < VMSTATE_POWERING_OFF )
1047
1048/** @def VM_ASSERT_IS_NOT_RUNNING
1049 * Asserts that the VM is not running.
1050 */
1051#if defined(IN_RC) || defined(IN_RING0)
1052#define VM_ASSERT_IS_NOT_RUNNING(pVM) Assert(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM))
1053#else
1054#define VM_ASSERT_IS_NOT_RUNNING(pVM) AssertMsg(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM), \
1055 ("VM is running. enmVMState=%d\n", (pVM)->enmVMState))
1056#endif
1057
1058/** @def VM_ASSERT_EMT0
1059 * Asserts that the current thread IS emulation thread \#0 (EMT0).
1060 */
1061#ifdef IN_RING3
1062# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT((a_pVM)->apCpusR3[0])
1063#else
1064# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT(&(a_pVM)->aCpus[0])
1065#endif
1066
1067/** @def VM_ASSERT_EMT0_RETURN
1068 * Asserts that the current thread IS emulation thread \#0 (EMT0) and returns if
1069 * it isn't.
1070 */
1071#ifdef IN_RING3
1072# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN((pVM)->apCpusR3[0], (rc))
1073#else
1074# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN(&(pVM)->aCpus[0], (rc))
1075#endif
1076
1077
1078/**
1079 * Asserts that the current thread is NOT the emulation thread.
1080 */
1081#define VM_ASSERT_OTHER_THREAD(pVM) \
1082 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
1083
1084
1085/** @def VM_ASSERT_STATE
1086 * Asserts a certain VM state.
1087 */
1088#define VM_ASSERT_STATE(pVM, _enmState) \
1089 AssertMsg((pVM)->enmVMState == (_enmState), \
1090 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
1091
1092/** @def VM_ASSERT_STATE_RETURN
1093 * Asserts a certain VM state and returns if it doesn't match.
1094 */
1095#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
1096 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
1097 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
1098 (rc))
1099
1100/** @def VM_IS_VALID_EXT
1101 * Asserts a the VM handle is valid for external access, i.e. not being destroy
1102 * or terminated. */
1103#define VM_IS_VALID_EXT(pVM) \
1104 ( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
1105 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
1106 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
1107 && VM_IS_EMT(pVM))) )
1108
1109/** @def VM_ASSERT_VALID_EXT_RETURN
1110 * Asserts a the VM handle is valid for external access, i.e. not being
1111 * destroy or terminated.
1112 */
1113#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
1114 AssertMsgReturn(VM_IS_VALID_EXT(pVM), \
1115 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
1116 ? VMGetStateName(pVM->enmVMState) : ""), \
1117 (rc))
1118
1119/** @def VMCPU_ASSERT_VALID_EXT_RETURN
1120 * Asserts a the VMCPU handle is valid for external access, i.e. not being
1121 * destroy or terminated.
1122 */
1123#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
1124 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
1125 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
1126 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
1127 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
1128 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
1129 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
1130 (rc))
1131
1132#endif /* !VBOX_FOR_DTRACE_LIB */
1133
1134
1135/**
1136 * Helper that HM and NEM uses for safely modifying VM::bMainExecutionEngine.
1137 *
1138 * ONLY HM and NEM MAY USE THIS!
1139 *
1140 * @param a_pVM The cross context VM structure.
1141 * @param a_bValue The new value.
1142 * @internal
1143 */
1144#define VM_SET_MAIN_EXECUTION_ENGINE(a_pVM, a_bValue) \
1145 do { \
1146 *const_cast<uint8_t *>(&(a_pVM)->bMainExecutionEngine) = (a_bValue); \
1147 ASMCompilerBarrier(); /* just to be on the safe side */ \
1148 } while (0)
1149
1150/**
1151 * Checks whether iem-executes-all-mode is used.
1152 *
1153 * @retval true if IEM is used.
1154 * @retval false if not.
1155 *
1156 * @param a_pVM The cross context VM structure.
1157 * @sa VM_IS_HM_OR_NEM_ENABLED, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1158 * @internal
1159 */
1160#define VM_IS_EXEC_ENGINE_IEM(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_IEM)
1161
1162/**
1163 * Checks whether HM (VT-x/AMD-V) or NEM is being used by this VM.
1164 *
1165 * @retval true if either is used.
1166 * @retval false if software virtualization (raw-mode) is used.
1167 *
1168 * @param a_pVM The cross context VM structure.
1169 * @sa VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1170 * @internal
1171 */
1172#define VM_IS_HM_OR_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine != VM_EXEC_ENGINE_IEM)
1173
1174/**
1175 * Checks whether HM is being used by this VM.
1176 *
1177 * @retval true if HM (VT-x/AMD-v) is used.
1178 * @retval false if not.
1179 *
1180 * @param a_pVM The cross context VM structure.
1181 * @sa VM_IS_NEM_ENABLED, VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_OR_NEM_ENABLED.
1182 * @internal
1183 */
1184#define VM_IS_HM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT)
1185
1186/**
1187 * Checks whether NEM is being used by this VM.
1188 *
1189 * @retval true if a native hypervisor API is used.
1190 * @retval false if not.
1191 *
1192 * @param a_pVM The cross context VM structure.
1193 * @sa VM_IS_HM_ENABLED, VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_OR_NEM_ENABLED.
1194 * @internal
1195 */
1196#define VM_IS_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
1197
1198
1199/**
1200 * The cross context VM structure.
1201 *
1202 * It contains all the VM data which have to be available in all contexts.
1203 * Even if it contains all the data the idea is to use APIs not to modify all
1204 * the members all around the place. Therefore we make use of unions to hide
1205 * everything which isn't local to the current source module. This means we'll
1206 * have to pay a little bit of attention when adding new members to structures
1207 * in the unions and make sure to keep the padding sizes up to date.
1208 *
1209 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
1210 */
1211typedef struct VM
1212{
1213 /** The state of the VM.
1214 * This field is read only to everyone except the VM and EM. */
1215 VMSTATE volatile enmVMState;
1216 /** Forced action flags.
1217 * See the VM_FF_* \#defines. Updated atomically.
1218 */
1219 volatile uint32_t fGlobalForcedActions;
1220 /** Pointer to the array of page descriptors for the VM structure allocation. */
1221 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
1222 /** Session handle. For use when calling SUPR0 APIs. */
1223#ifdef IN_RING0
1224 PSUPDRVSESSION pSessionUnsafe;
1225#else
1226 PSUPDRVSESSION pSession;
1227#endif
1228 /** Pointer to the ring-3 VM structure. */
1229 PUVM pUVM;
1230 /** Ring-3 Host Context VM Pointer. */
1231#ifdef IN_RING0
1232 R3PTRTYPE(struct VM *) pVMR3Unsafe;
1233#else
1234 R3PTRTYPE(struct VM *) pVMR3;
1235#endif
1236 /** Ring-0 Host Context VM pointer for making ring-0 calls. */
1237 R0PTRTYPE(struct VM *) pVMR0ForCall;
1238 /** Raw-mode Context VM Pointer. */
1239 uint32_t pVMRC;
1240 /** Padding for new raw-mode (long mode). */
1241 uint32_t pVMRCPadding;
1242
1243 /** The GVM VM handle. Only the GVM should modify this field. */
1244#ifdef IN_RING0
1245 uint32_t hSelfUnsafe;
1246#else
1247 uint32_t hSelf;
1248#endif
1249 /** Number of virtual CPUs. */
1250#ifdef IN_RING0
1251 uint32_t cCpusUnsafe;
1252#else
1253 uint32_t cCpus;
1254#endif
1255 /** CPU excution cap (1-100) */
1256 uint32_t uCpuExecutionCap;
1257
1258 /** Size of the VM structure. */
1259 uint32_t cbSelf;
1260 /** Size of the VMCPU structure. */
1261 uint32_t cbVCpu;
1262 /** Structure version number (TBD). */
1263 uint32_t uStructVersion;
1264
1265 /** @name Various items that are frequently accessed.
1266 * @{ */
1267 /** The main execution engine, VM_EXEC_ENGINE_XXX.
1268 * This is set early during vmR3InitRing3 by HM or NEM. */
1269 uint8_t const bMainExecutionEngine;
1270
1271 /** Hardware VM support is available and enabled.
1272 * Determined very early during init.
1273 * This is placed here for performance reasons.
1274 * @todo obsoleted by bMainExecutionEngine, eliminate. */
1275 bool fHMEnabled;
1276 /** @} */
1277
1278 /** Alignment padding. */
1279 uint8_t uPadding1[6];
1280
1281 /** @name Debugging
1282 * @{ */
1283 /** Ring-3 Host Context VM Pointer. */
1284 R3PTRTYPE(RTTRACEBUF) hTraceBufR3;
1285 /** Ring-0 Host Context VM Pointer. */
1286 R0PTRTYPE(RTTRACEBUF) hTraceBufR0;
1287 /** @} */
1288
1289 /** Max EMT hash lookup collisions (in GVMM). */
1290 uint8_t cMaxEmtHashCollisions;
1291
1292 /** Padding - the unions must be aligned on a 64 bytes boundary. */
1293 uint8_t abAlignment3[HC_ARCH_BITS == 64 ? 23 : 51];
1294
1295 /** CPUM part. */
1296 union
1297 {
1298#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
1299 struct CPUM s;
1300#endif
1301#ifdef VBOX_INCLUDED_vmm_cpum_h
1302 /** Read only info exposed about the host and guest CPUs. */
1303 struct
1304 {
1305 /** Padding for hidden fields. */
1306 uint8_t abHidden0[64 + 48];
1307 /** Guest CPU feature information. */
1308 CPUMFEATURES GuestFeatures;
1309 } const ro;
1310#endif
1311 /** @todo this is rather bloated because of static MSR range allocation.
1312 * Probably a good idea to move it to a separate R0 allocation... */
1313 uint8_t padding[8832 + 128*8192 + 0x1d00]; /* multiple of 64 */
1314 } cpum;
1315
1316 /** PGM part.
1317 * @note 16384 aligned for zero and mmio page storage. */
1318 union
1319 {
1320#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
1321 struct PGM s;
1322#endif
1323 uint8_t padding[53888]; /* multiple of 64 */
1324 } pgm;
1325
1326 /** VMM part. */
1327 union
1328 {
1329#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
1330 struct VMM s;
1331#endif
1332 uint8_t padding[1600]; /* multiple of 64 */
1333 } vmm;
1334
1335 /** HM part. */
1336 union
1337 {
1338#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
1339 struct HM s;
1340#endif
1341 uint8_t padding[5504]; /* multiple of 64 */
1342 } hm;
1343
1344 /** TRPM part. */
1345 union
1346 {
1347#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
1348 struct TRPM s;
1349#endif
1350 uint8_t padding[2048]; /* multiple of 64 */
1351 } trpm;
1352
1353 /** SELM part. */
1354 union
1355 {
1356#ifdef VMM_INCLUDED_SRC_include_SELMInternal_h
1357 struct SELM s;
1358#endif
1359 uint8_t padding[768]; /* multiple of 64 */
1360 } selm;
1361
1362 /** MM part. */
1363 union
1364 {
1365#ifdef VMM_INCLUDED_SRC_include_MMInternal_h
1366 struct MM s;
1367#endif
1368 uint8_t padding[192]; /* multiple of 64 */
1369 } mm;
1370
1371 /** PDM part. */
1372 union
1373 {
1374#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
1375 struct PDM s;
1376#endif
1377 uint8_t padding[22400]; /* multiple of 64 */
1378 } pdm;
1379
1380 /** IOM part. */
1381 union
1382 {
1383#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
1384 struct IOM s;
1385#endif
1386 uint8_t padding[1152]; /* multiple of 64 */
1387 } iom;
1388
1389 /** EM part. */
1390 union
1391 {
1392#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
1393 struct EM s;
1394#endif
1395 uint8_t padding[256]; /* multiple of 64 */
1396 } em;
1397
1398 /** NEM part. */
1399 union
1400 {
1401#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
1402 struct NEM s;
1403#endif
1404 uint8_t padding[4608]; /* multiple of 64 */
1405 } nem;
1406
1407 /** TM part. */
1408 union
1409 {
1410#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
1411 struct TM s;
1412#endif
1413 uint8_t padding[10112]; /* multiple of 64 */
1414 } tm;
1415
1416 /** DBGF part. */
1417 union
1418 {
1419#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
1420 struct DBGF s;
1421#endif
1422#ifdef VBOX_INCLUDED_vmm_dbgf_h
1423 /** Read only info exposed about interrupt breakpoints and selected events. */
1424 struct
1425 {
1426 /** Bitmap of enabled hardware interrupt breakpoints. */
1427 uint32_t bmHardIntBreakpoints[256 / 32];
1428 /** Bitmap of enabled software interrupt breakpoints. */
1429 uint32_t bmSoftIntBreakpoints[256 / 32];
1430 /** Bitmap of selected events.
1431 * This includes non-selectable events too for simplicity, we maintain the
1432 * state for some of these, as it may come in handy. */
1433 uint64_t bmSelectedEvents[(DBGFEVENT_END + 63) / 64];
1434 /** Enabled hardware interrupt breakpoints. */
1435 uint32_t cHardIntBreakpoints;
1436 /** Enabled software interrupt breakpoints. */
1437 uint32_t cSoftIntBreakpoints;
1438 /** The number of selected events. */
1439 uint32_t cSelectedEvents;
1440 /** The number of enabled hardware breakpoints. */
1441 uint8_t cEnabledHwBreakpoints;
1442 /** The number of enabled hardware I/O breakpoints. */
1443 uint8_t cEnabledHwIoBreakpoints;
1444 uint8_t au8Alignment1[2]; /**< Alignment padding. */
1445 /** The number of enabled INT3 breakpoints. */
1446 uint32_t volatile cEnabledInt3Breakpoints;
1447 } const ro;
1448#endif
1449 uint8_t padding[2432]; /* multiple of 64 */
1450 } dbgf;
1451
1452 /** SSM part. */
1453 union
1454 {
1455#ifdef VMM_INCLUDED_SRC_include_SSMInternal_h
1456 struct SSM s;
1457#endif
1458 uint8_t padding[128]; /* multiple of 64 */
1459 } ssm;
1460
1461 union
1462 {
1463#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
1464 struct GIM s;
1465#endif
1466 uint8_t padding[448]; /* multiple of 64 */
1467 } gim;
1468
1469#if defined(VBOX_VMM_TARGET_ARMV8)
1470 union
1471 {
1472# ifdef VMM_INCLUDED_SRC_include_GICInternal_h
1473 struct GIC s;
1474# endif
1475 uint8_t padding[128]; /* multiple of 8 */
1476 } gic;
1477#else
1478 union
1479 {
1480# ifdef VMM_INCLUDED_SRC_include_APICInternal_h
1481 struct APIC s;
1482# endif
1483 uint8_t padding[128]; /* multiple of 8 */
1484 } apic;
1485#endif
1486
1487 /* ---- begin small stuff ---- */
1488
1489 /** VM part. */
1490 union
1491 {
1492#ifdef VMM_INCLUDED_SRC_include_VMInternal_h
1493 struct VMINT s;
1494#endif
1495 uint8_t padding[32]; /* multiple of 8 */
1496 } vm;
1497
1498 /** CFGM part. */
1499 union
1500 {
1501#ifdef VMM_INCLUDED_SRC_include_CFGMInternal_h
1502 struct CFGM s;
1503#endif
1504 uint8_t padding[8]; /* multiple of 8 */
1505 } cfgm;
1506
1507 /** IEM part. */
1508 union
1509 {
1510#ifdef VMM_INCLUDED_SRC_include_IEMInternal_h
1511 struct IEM s;
1512#endif
1513 uint8_t padding[16]; /* multiple of 8 */
1514 } iem;
1515
1516 /** Statistics for ring-0 only components. */
1517 struct
1518 {
1519 /** GMMR0 stats. */
1520 struct
1521 {
1522 /** Chunk TLB hits. */
1523 uint64_t cChunkTlbHits;
1524 /** Chunk TLB misses. */
1525 uint64_t cChunkTlbMisses;
1526 } gmm;
1527 uint64_t au64Padding[6]; /* probably more comming here... */
1528 } R0Stats;
1529
1530 union
1531 {
1532#ifdef VMM_INCLUDED_SRC_include_GCMInternal_h
1533 struct GCM s;
1534#endif
1535 uint8_t padding[32]; /* multiple of 8 */
1536 } gcm;
1537
1538 /** Padding for aligning the structure size on a page boundrary. */
1539 uint8_t abAlignment2[8872 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
1540
1541 /* ---- end small stuff ---- */
1542
1543 /** Array of VMCPU ring-3 pointers. */
1544 PVMCPUR3 apCpusR3[VMM_MAX_CPU_COUNT];
1545
1546 /* This point is aligned on a 16384 boundrary (for arm64 purposes). */
1547} VM;
1548#ifndef VBOX_FOR_DTRACE_LIB
1549//AssertCompileSizeAlignment(VM, 16384);
1550#endif
1551
1552
1553#ifdef IN_RC
1554RT_C_DECLS_BEGIN
1555
1556/** The VM structure.
1557 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1558 * globals which we should avoid using.
1559 */
1560extern DECLIMPORT(VM) g_VM;
1561
1562/** The VMCPU structure for virtual CPU \#0.
1563 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1564 * globals which we should avoid using.
1565 */
1566extern DECLIMPORT(VMCPU) g_VCpu0;
1567
1568RT_C_DECLS_END
1569#endif
1570
1571/** @} */
1572
1573#endif /* !VBOX_INCLUDED_vmm_vm_h */
1574
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette