VirtualBox

source: vbox/trunk/include/VBox/vmm/vm.h@ 100591

Last change on this file since 100591 was 100230, checked in by vboxsync, 20 months ago

VBox/vmm/vm.h: Corrected VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK to not include VMCPU_FF_PGM_SYNC_CR3 and VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL as these aren't relevant to IEM and not handled by the innermost loops, so we'd end up spinning in string instructions till something got us out to the main EM loop. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.0 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_vm_h
37#define VBOX_INCLUDED_vmm_vm_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#ifndef VBOX_FOR_DTRACE_LIB
43# ifndef USING_VMM_COMMON_DEFS
44# error "Compile job does not include VMM_COMMON_DEFS from src/VBox/VMM/Config.kmk - make sure you really need to include this file!"
45# endif
46# include <iprt/param.h>
47# include <VBox/param.h>
48# include <VBox/types.h>
49# include <VBox/vmm/cpum.h>
50# include <VBox/vmm/stam.h>
51# include <VBox/vmm/vmapi.h>
52# include <VBox/vmm/vmm.h>
53# include <VBox/sup.h>
54#else
55# pragma D depends_on library vbox-types.d
56# pragma D depends_on library CPUMInternal.d
57# define VMM_INCLUDED_SRC_include_CPUMInternal_h
58#endif
59
60
61
62/** @defgroup grp_vm The Virtual Machine
63 * @ingroup grp_vmm
64 * @{
65 */
66
67/**
68 * The state of a Virtual CPU.
69 *
70 * The basic state indicated here is whether the CPU has been started or not. In
71 * addition, there are sub-states when started for assisting scheduling (GVMM
72 * mostly).
73 *
74 * The transition out of the STOPPED state is done by a vmR3PowerOn.
75 * The transition back to the STOPPED state is done by vmR3PowerOff.
76 *
77 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
78 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
79 */
80typedef enum VMCPUSTATE
81{
82 /** The customary invalid zero. */
83 VMCPUSTATE_INVALID = 0,
84
85 /** Virtual CPU has not yet been started. */
86 VMCPUSTATE_STOPPED,
87
88 /** CPU started. */
89 VMCPUSTATE_STARTED,
90 /** CPU started in HM context. */
91 VMCPUSTATE_STARTED_HM,
92 /** Executing guest code and can be poked (RC or STI bits of HM). */
93 VMCPUSTATE_STARTED_EXEC,
94 /** Executing guest code using NEM. */
95 VMCPUSTATE_STARTED_EXEC_NEM,
96 VMCPUSTATE_STARTED_EXEC_NEM_WAIT,
97 VMCPUSTATE_STARTED_EXEC_NEM_CANCELED,
98 /** Halted. */
99 VMCPUSTATE_STARTED_HALTED,
100
101 /** The end of valid virtual CPU states. */
102 VMCPUSTATE_END,
103
104 /** Ensure 32-bit type. */
105 VMCPUSTATE_32BIT_HACK = 0x7fffffff
106} VMCPUSTATE;
107
108/** Enables 64-bit FFs. */
109#define VMCPU_WITH_64_BIT_FFS
110
111
112/**
113 * The cross context virtual CPU structure.
114 *
115 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
116 */
117typedef struct VMCPU
118{
119 /** @name Volatile per-cpu data.
120 * @{ */
121 /** Per CPU forced action.
122 * See the VMCPU_FF_* \#defines. Updated atomically. */
123#ifdef VMCPU_WITH_64_BIT_FFS
124 uint64_t volatile fLocalForcedActions;
125#else
126 uint32_t volatile fLocalForcedActions;
127 uint32_t fForLocalForcedActionsExpansion;
128#endif
129 /** The CPU state. */
130 VMCPUSTATE volatile enmState;
131
132#if defined(VBOX_VMM_TARGET_ARMV8)
133 uint32_t u32Alignment0;
134 /** The number of nano seconds when the vTimer of the associated vCPU is supposed to activate
135 * required to get out of a halt (due to wfi/wfe).
136 *
137 * @note This actually should go into TMCPU but this drags in a whole lot of padding changes
138 * and I'm not sure yet whether this will remain in this form anyway.
139 */
140 uint64_t cNsVTimerActivate;
141 /** Padding up to 64 bytes. */
142 uint8_t abAlignment0[64 - 12 - 8 - 4];
143#else
144 /** Padding up to 64 bytes. */
145 uint8_t abAlignment0[64 - 12];
146#endif
147 /** @} */
148
149 /** IEM part.
150 * @remarks This comes first as it allows the use of 8-bit immediates for the
151 * first 64 bytes of the structure, reducing code size a wee bit. */
152#if defined(VMM_INCLUDED_SRC_include_IEMInternal_h) || defined(VMM_INCLUDED_SRC_include_IEMInternal_armv8_h) /* For PDB hacking. */
153 union VMCPUUNIONIEMFULL
154#else
155 union VMCPUUNIONIEMSTUB
156#endif
157 {
158#if defined(VMM_INCLUDED_SRC_include_IEMInternal_h) || defined(VMM_INCLUDED_SRC_include_IEMInternal_armv8_h)
159 struct IEMCPU s;
160#endif
161 uint8_t padding[32832]; /* multiple of 64 */
162 } iem;
163
164 /** @name Static per-cpu data.
165 * (Putting this after IEM, hoping that it's less frequently used than it.)
166 * @{ */
167 /** Ring-3 Host Context VM Pointer. */
168 PVMR3 pVMR3;
169 /** Ring-0 Host Context VM Pointer, currently used by VTG/dtrace. */
170 RTR0PTR pVCpuR0ForVtg;
171 /** Raw-mode Context VM Pointer. */
172 uint32_t pVMRC;
173 /** Padding for new raw-mode (long mode). */
174 uint32_t pVMRCPadding;
175 /** Pointer to the ring-3 UVMCPU structure. */
176 PUVMCPU pUVCpu;
177 /** The native thread handle. */
178 RTNATIVETHREAD hNativeThread;
179 /** The native R0 thread handle. (different from the R3 handle!) */
180 RTNATIVETHREAD hNativeThreadR0;
181 /** The IPRT thread handle (for VMMDevTesting). */
182 RTTHREAD hThread;
183 /** The CPU ID.
184 * This is the index into the VM::aCpu array. */
185#ifdef IN_RING0
186 VMCPUID idCpuUnsafe;
187#else
188 VMCPUID idCpu;
189#endif
190
191 /** Align the structures below bit on a 64-byte boundary and make sure it starts
192 * at the same offset in both 64-bit and 32-bit builds.
193 *
194 * @remarks The alignments of the members that are larger than 48 bytes should be
195 * 64-byte for cache line reasons. structs containing small amounts of
196 * data could be lumped together at the end with a < 64 byte padding
197 * following it (to grow into and align the struct size).
198 */
199 uint8_t abAlignment1[64 - 6 * (HC_ARCH_BITS == 32 ? 4 : 8) - 8 - 4];
200 /** @} */
201
202 /** HM part. */
203 union VMCPUUNIONHM
204 {
205#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
206 struct HMCPU s;
207#endif
208 uint8_t padding[9984]; /* multiple of 64 */
209 } hm;
210
211 /** NEM part. */
212 union VMCPUUNIONNEM
213 {
214#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
215 struct NEMCPU s;
216#endif
217 uint8_t padding[4608]; /* multiple of 64 */
218 } nem;
219
220 /** TRPM part. */
221 union VMCPUUNIONTRPM
222 {
223#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
224 struct TRPMCPU s;
225#endif
226 uint8_t padding[128]; /* multiple of 64 */
227 } trpm;
228
229 /** TM part. */
230 union VMCPUUNIONTM
231 {
232#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
233 struct TMCPU s;
234#endif
235 uint8_t padding[5760]; /* multiple of 64 */
236 } tm;
237
238 /** VMM part. */
239 union VMCPUUNIONVMM
240 {
241#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
242 struct VMMCPU s;
243#endif
244 uint8_t padding[9536]; /* multiple of 64 */
245 } vmm;
246
247 /** PDM part. */
248 union VMCPUUNIONPDM
249 {
250#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
251 struct PDMCPU s;
252#endif
253 uint8_t padding[256]; /* multiple of 64 */
254 } pdm;
255
256 /** IOM part. */
257 union VMCPUUNIONIOM
258 {
259#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
260 struct IOMCPU s;
261#endif
262 uint8_t padding[512]; /* multiple of 64 */
263 } iom;
264
265 /** DBGF part.
266 * @todo Combine this with other tiny structures. */
267 union VMCPUUNIONDBGF
268 {
269#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
270 struct DBGFCPU s;
271#endif
272 uint8_t padding[512]; /* multiple of 64 */
273 } dbgf;
274
275 /** GIM part. */
276 union VMCPUUNIONGIM
277 {
278#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
279 struct GIMCPU s;
280#endif
281 uint8_t padding[512]; /* multiple of 64 */
282 } gim;
283
284#if defined(VBOX_VMM_TARGET_ARMV8)
285 /** GIC part. */
286 union VMCPUUNIONGIC
287 {
288# ifdef VMM_INCLUDED_SRC_include_GICInternal_h
289 struct GICCPU s;
290# endif
291 uint8_t padding[3840]; /* multiple of 64 */
292 } gic;
293#else
294 /** APIC part. */
295 union VMCPUUNIONAPIC
296 {
297# ifdef VMM_INCLUDED_SRC_include_APICInternal_h
298 struct APICCPU s;
299# endif
300 uint8_t padding[3840]; /* multiple of 64 */
301 } apic;
302#endif
303
304 /*
305 * Some less frequently used global members that doesn't need to take up
306 * precious space at the head of the structure.
307 */
308
309 /** Trace groups enable flags. */
310 uint32_t fTraceGroups; /* 64 / 44 */
311 /** Number of collisions hashing the ring-0 EMT handle. */
312 uint8_t cEmtHashCollisions;
313 uint8_t abAdHoc[3];
314 /** Profiling samples for use by ad hoc profiling. */
315 STAMPROFILEADV aStatAdHoc[8]; /* size: 40*8 = 320 */
316
317 /** Align the following members on page boundary. */
318 uint8_t abAlignment2[696];
319
320 /** PGM part. */
321 union VMCPUUNIONPGM
322 {
323#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
324 struct PGMCPU s;
325#endif
326 uint8_t padding[4096 + 28672]; /* multiple of 4096 */
327 } pgm;
328
329 /** CPUM part. */
330 union VMCPUUNIONCPUM
331 {
332#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
333 struct CPUMCPU s;
334#endif
335#ifdef VMCPU_INCL_CPUM_GST_CTX
336 /** The guest CPUM context for direct use by execution engines.
337 * This is not for general consumption, but for HM, REM, IEM, and maybe a few
338 * others. The rest will use the function based CPUM API. */
339 CPUMCTX GstCtx;
340#endif
341 uint8_t padding[102400]; /* multiple of 4096 */
342 } cpum;
343
344 /** EM part. */
345 union VMCPUUNIONEM
346 {
347#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
348 struct EMCPU s;
349#endif
350 uint8_t padding[40960]; /* multiple of 4096 */
351 } em;
352
353} VMCPU;
354
355
356#ifndef VBOX_FOR_DTRACE_LIB
357/* Make sure the structure size is aligned on a 16384 boundary for arm64 purposes. */
358AssertCompileSizeAlignment(VMCPU, 16384);
359
360/** @name Operations on VMCPU::enmState
361 * @{ */
362/** Gets the VMCPU state. */
363#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
364/** Sets the VMCPU state. */
365#define VMCPU_SET_STATE(pVCpu, enmNewState) \
366 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
367/** Cmpares and sets the VMCPU state. */
368#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
369 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
370/** Checks the VMCPU state. */
371#ifdef VBOX_STRICT
372# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
373 do { \
374 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
375 AssertMsg(enmState == (enmExpectedState), \
376 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
377 enmState, enmExpectedState, (pVCpu)->idCpu)); \
378 } while (0)
379
380# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) \
381 do { \
382 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
383 AssertMsg( enmState == (enmExpectedState) \
384 || enmState == (a_enmExpectedState2), \
385 ("enmState=%d enmExpectedState=%d enmExpectedState2=%d idCpu=%u\n", \
386 enmState, enmExpectedState, a_enmExpectedState2, (pVCpu)->idCpu)); \
387 } while (0)
388#else
389# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) do { } while (0)
390# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) do { } while (0)
391#endif
392/** Tests if the state means that the CPU is started. */
393#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
394/** Tests if the state means that the CPU is stopped. */
395#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
396/** @} */
397
398
399/** The name of the raw-mode context VMM Core module. */
400#define VMMRC_MAIN_MODULE_NAME "VMMRC.rc"
401/** The name of the ring-0 context VMM Core module. */
402#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
403
404
405/** VM Forced Action Flags.
406 *
407 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
408 * action mask of a VM.
409 *
410 * Available VM bits:
411 * 0, 1, 5, 6, 7, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
412 *
413 *
414 * Available VMCPU bits:
415 * 14, 15, 36 to 63
416 *
417 * @todo If we run low on VMCPU, we may consider merging the SELM bits
418 *
419 * @{
420 */
421/** The virtual sync clock has been stopped, go to TM until it has been
422 * restarted... */
423#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(VM_FF_TM_VIRTUAL_SYNC_BIT)
424#define VM_FF_TM_VIRTUAL_SYNC_BIT 2
425/** PDM Queues are pending. */
426#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
427/** The bit number for VM_FF_PDM_QUEUES. */
428#define VM_FF_PDM_QUEUES_BIT 3
429/** PDM DMA transfers are pending. */
430#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
431/** The bit number for VM_FF_PDM_DMA. */
432#define VM_FF_PDM_DMA_BIT 4
433/** This action forces the VM to call DBGF so DBGF can service debugger
434 * requests in the emulation thread.
435 * This action flag stays asserted till DBGF clears it.*/
436#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
437/** The bit number for VM_FF_DBGF. */
438#define VM_FF_DBGF_BIT 8
439/** This action forces the VM to service pending requests from other
440 * thread or requests which must be executed in another context. */
441#define VM_FF_REQUEST RT_BIT_32(VM_FF_REQUEST_BIT)
442#define VM_FF_REQUEST_BIT 9
443/** Check for VM state changes and take appropriate action. */
444#define VM_FF_CHECK_VM_STATE RT_BIT_32(VM_FF_CHECK_VM_STATE_BIT)
445/** The bit number for VM_FF_CHECK_VM_STATE. */
446#define VM_FF_CHECK_VM_STATE_BIT 10
447/** Reset the VM. (postponed) */
448#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
449/** The bit number for VM_FF_RESET. */
450#define VM_FF_RESET_BIT 11
451/** EMT rendezvous in VMM. */
452#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
453/** The bit number for VM_FF_EMT_RENDEZVOUS. */
454#define VM_FF_EMT_RENDEZVOUS_BIT 12
455
456/** PGM needs to allocate handy pages. */
457#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(VM_FF_PGM_NEED_HANDY_PAGES_BIT)
458#define VM_FF_PGM_NEED_HANDY_PAGES_BIT 18
459/** PGM is out of memory.
460 * Abandon all loops and code paths which can be resumed and get up to the EM
461 * loops. */
462#define VM_FF_PGM_NO_MEMORY RT_BIT_32(VM_FF_PGM_NO_MEMORY_BIT)
463#define VM_FF_PGM_NO_MEMORY_BIT 19
464 /** PGM is about to perform a lightweight pool flush
465 * Guest SMP: all EMT threads should return to ring 3
466 */
467#define VM_FF_PGM_POOL_FLUSH_PENDING RT_BIT_32(VM_FF_PGM_POOL_FLUSH_PENDING_BIT)
468#define VM_FF_PGM_POOL_FLUSH_PENDING_BIT 20
469/** Suspend the VM - debug only. */
470#define VM_FF_DEBUG_SUSPEND RT_BIT_32(VM_FF_DEBUG_SUSPEND_BIT)
471#define VM_FF_DEBUG_SUSPEND_BIT 31
472
473
474#if defined(VBOX_VMM_TARGET_ARMV8)
475/** This action forces the VM to inject an IRQ into the guest. */
476# define VMCPU_FF_INTERRUPT_IRQ RT_BIT_64(VMCPU_FF_INTERRUPT_IRQ_BIT)
477# define VMCPU_FF_INTERRUPT_IRQ_BIT 0
478/** This action forces the VM to inject an FIQ into the guest. */
479# define VMCPU_FF_INTERRUPT_FIQ RT_BIT_64(VMCPU_FF_INTERRUPT_FIQ_BIT)
480# define VMCPU_FF_INTERRUPT_FIQ_BIT 1
481#else
482/** This action forces the VM to check any pending interrupts on the APIC. */
483# define VMCPU_FF_INTERRUPT_APIC RT_BIT_64(VMCPU_FF_INTERRUPT_APIC_BIT)
484# define VMCPU_FF_INTERRUPT_APIC_BIT 0
485/** This action forces the VM to check any pending interrups on the PIC. */
486# define VMCPU_FF_INTERRUPT_PIC RT_BIT_64(VMCPU_FF_INTERRUPT_PIC_BIT)
487# define VMCPU_FF_INTERRUPT_PIC_BIT 1
488#endif
489/** This action forces the VM to schedule and run pending timer (TM).
490 * @remarks Don't move - PATM compatibility. */
491#define VMCPU_FF_TIMER RT_BIT_64(VMCPU_FF_TIMER_BIT)
492#define VMCPU_FF_TIMER_BIT 2
493/** This action forces the VM to check any pending NMIs. */
494#define VMCPU_FF_INTERRUPT_NMI RT_BIT_64(VMCPU_FF_INTERRUPT_NMI_BIT)
495#define VMCPU_FF_INTERRUPT_NMI_BIT 3
496/** This action forces the VM to check any pending SMIs. */
497#define VMCPU_FF_INTERRUPT_SMI RT_BIT_64(VMCPU_FF_INTERRUPT_SMI_BIT)
498#define VMCPU_FF_INTERRUPT_SMI_BIT 4
499/** PDM critical section unlocking is pending, process promptly upon return to R3. */
500#define VMCPU_FF_PDM_CRITSECT RT_BIT_64(VMCPU_FF_PDM_CRITSECT_BIT)
501#define VMCPU_FF_PDM_CRITSECT_BIT 5
502/** Special EM internal force flag that is used by EMUnhaltAndWakeUp() to force
503 * the virtual CPU out of the next (/current) halted state. It is not processed
504 * nor cleared by emR3ForcedActions (similar to VMCPU_FF_BLOCK_NMIS), instead it
505 * is cleared the next time EM leaves the HALTED state. */
506#define VMCPU_FF_UNHALT RT_BIT_64(VMCPU_FF_UNHALT_BIT)
507#define VMCPU_FF_UNHALT_BIT 6
508/** Pending IEM action (mask). */
509#define VMCPU_FF_IEM RT_BIT_64(VMCPU_FF_IEM_BIT)
510/** Pending IEM action (bit number). */
511#define VMCPU_FF_IEM_BIT 7
512/** Pending APIC action (bit number). */
513#define VMCPU_FF_UPDATE_APIC_BIT 8
514/** This action forces the VM to update APIC's asynchronously arrived
515 * interrupts as pending interrupts. */
516#define VMCPU_FF_UPDATE_APIC RT_BIT_64(VMCPU_FF_UPDATE_APIC_BIT)
517/** This action forces the VM to service pending requests from other
518 * thread or requests which must be executed in another context. */
519#define VMCPU_FF_REQUEST RT_BIT_64(VMCPU_FF_REQUEST_BIT)
520#define VMCPU_FF_REQUEST_BIT 9
521/** Pending DBGF event (alternative to passing VINF_EM_DBG_EVENT around). */
522#define VMCPU_FF_DBGF RT_BIT_64(VMCPU_FF_DBGF_BIT)
523/** The bit number for VMCPU_FF_DBGF. */
524#define VMCPU_FF_DBGF_BIT 10
525/** Hardware virtualized nested-guest interrupt pending. */
526#define VMCPU_FF_INTERRUPT_NESTED_GUEST RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT)
527#define VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT 11
528/** This action forces PGM to update changes to CR3 when the guest was in HM mode
529 * (when using nested paging). */
530#define VMCPU_FF_HM_UPDATE_CR3 RT_BIT_64(VMCPU_FF_HM_UPDATE_CR3_BIT)
531#define VMCPU_FF_HM_UPDATE_CR3_BIT 12
532#if defined(VBOX_VMM_TARGET_ARMV8)
533# define VMCPU_FF_VTIMER_ACTIVATED RT_BIT_64(VMCPU_FF_VTIMER_ACTIVATED_BIT)
534# define VMCPU_FF_VTIMER_ACTIVATED_BIT 13
535#else
536/* Bit 13 used to be VMCPU_FF_HM_UPDATE_PAE_PDPES. */
537#endif
538/** This action forces the VM to resync the page tables before going
539 * back to execute guest code. (GLOBAL FLUSH) */
540#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_BIT)
541#define VMCPU_FF_PGM_SYNC_CR3_BIT 16
542/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
543 * (NON-GLOBAL FLUSH) */
544#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT)
545#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT 17
546/** Check for pending TLB shootdown actions (deprecated)
547 * Reserved for future HM re-use if necessary / safe.
548 * Consumer: HM */
549#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED RT_BIT_64(VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT)
550#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT 18
551/** Check for pending TLB flush action.
552 * Consumer: HM
553 * @todo rename to VMCPU_FF_HM_TLB_FLUSH */
554#define VMCPU_FF_TLB_FLUSH RT_BIT_64(VMCPU_FF_TLB_FLUSH_BIT)
555/** The bit number for VMCPU_FF_TLB_FLUSH. */
556#define VMCPU_FF_TLB_FLUSH_BIT 19
557/* 20 used to be VMCPU_FF_TRPM_SYNC_IDT (raw-mode only). */
558/* 21 used to be VMCPU_FF_SELM_SYNC_TSS (raw-mode only). */
559/* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */
560/* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */
561/* 24 used to be VMCPU_FF_INHIBIT_INTERRUPTS, which moved to CPUMCTX::eflags.uBoth in v7.0.4. */
562/* 25 used to be VMCPU_FF_BLOCK_NMIS, which moved to CPUMCTX::eflags.uBoth in v7.0.4. */
563/** Force return to Ring-3. */
564#define VMCPU_FF_TO_R3 RT_BIT_64(VMCPU_FF_TO_R3_BIT)
565#define VMCPU_FF_TO_R3_BIT 28
566/** Force return to ring-3 to service pending I/O or MMIO write.
567 * This is a backup for mechanism VINF_IOM_R3_IOPORT_COMMIT_WRITE and
568 * VINF_IOM_R3_MMIO_COMMIT_WRITE, allowing VINF_EM_DBG_BREAKPOINT and similar
569 * status codes to be propagated at the same time without loss. */
570#define VMCPU_FF_IOM RT_BIT_64(VMCPU_FF_IOM_BIT)
571#define VMCPU_FF_IOM_BIT 29
572/* 30 used to be VMCPU_FF_CPUM */
573/** VMX-preemption timer expired. */
574#define VMCPU_FF_VMX_PREEMPT_TIMER RT_BIT_64(VMCPU_FF_VMX_PREEMPT_TIMER_BIT)
575#define VMCPU_FF_VMX_PREEMPT_TIMER_BIT 31
576/** Pending MTF (Monitor Trap Flag) event. */
577#define VMCPU_FF_VMX_MTF RT_BIT_64(VMCPU_FF_VMX_MTF_BIT)
578#define VMCPU_FF_VMX_MTF_BIT 32
579/** VMX APIC-write emulation pending.
580 * @todo possible candidate for internal EFLAGS, or maybe just a summary bit
581 * (see also VMCPU_FF_VMX_INT_WINDOW). */
582#define VMCPU_FF_VMX_APIC_WRITE RT_BIT_64(VMCPU_FF_VMX_APIC_WRITE_BIT)
583#define VMCPU_FF_VMX_APIC_WRITE_BIT 33
584/** VMX interrupt-window event pending.
585 *
586 * "Pending" is misleading here, it would be better to say that the event need
587 * to be generated at the next opportunity and that this flag causes it to be
588 * polled for on every instruction boundrary and such.
589 *
590 * @todo Change the IEM side of this to not poll but to track down the places
591 * where it can be generated and set an internal EFLAGS bit that causes it
592 * to be checked out when finishing the current instruction. */
593#define VMCPU_FF_VMX_INT_WINDOW RT_BIT_64(VMCPU_FF_VMX_INT_WINDOW_BIT)
594#define VMCPU_FF_VMX_INT_WINDOW_BIT 34
595/** VMX NMI-window event pending.
596 * Same "pending" comment and todo in VMCPU_FF_VMX_INT_WINDOW. */
597#define VMCPU_FF_VMX_NMI_WINDOW RT_BIT_64(VMCPU_FF_VMX_NMI_WINDOW_BIT)
598#define VMCPU_FF_VMX_NMI_WINDOW_BIT 35
599
600
601/** Externally VM forced actions. Used to quit the idle/wait loop. */
602#define VM_FF_EXTERNAL_SUSPENDED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS )
603/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
604#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK ( VMCPU_FF_REQUEST | VMCPU_FF_DBGF )
605
606/** Externally forced VM actions. Used to quit the idle/wait loop. */
607#define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \
608 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS )
609/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
610#if defined(VBOX_VMM_TARGET_ARMV8)
611# define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \
612 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
613 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \
614 | VMCPU_FF_VTIMER_ACTIVATED)
615#else
616# define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
617 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
618 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \
619 | VMCPU_FF_INTERRUPT_NESTED_GUEST)
620#endif
621
622/** High priority VM pre-execution actions. */
623#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC \
624 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
625 | VM_FF_EMT_RENDEZVOUS )
626/** High priority VMCPU pre-execution actions. */
627#if defined(VBOX_VMM_TARGET_ARMV8)
628# define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \
629 | VMCPU_FF_DBGF )
630#else
631# define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
632 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_DBGF \
633 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
634 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
635 | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW )
636#endif
637
638/** High priority VM pre raw-mode execution mask. */
639#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY )
640/** High priority VMCPU pre raw-mode execution mask. */
641#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL )
642
643/** High priority post-execution actions. */
644#define VM_FF_HIGH_PRIORITY_POST_MASK ( VM_FF_PGM_NO_MEMORY )
645/** High priority post-execution actions. */
646#define VMCPU_FF_HIGH_PRIORITY_POST_MASK ( VMCPU_FF_PDM_CRITSECT | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_IEM | VMCPU_FF_IOM )
647
648/** Normal priority VM post-execution actions. */
649#define VM_FF_NORMAL_PRIORITY_POST_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET \
650 | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
651/** Normal priority VMCPU post-execution actions. */
652#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK ( VMCPU_FF_DBGF )
653
654/** Normal priority VM actions. */
655#define VM_FF_NORMAL_PRIORITY_MASK ( VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
656/** Normal priority VMCPU actions. */
657#define VMCPU_FF_NORMAL_PRIORITY_MASK ( VMCPU_FF_REQUEST )
658
659/** Flags to clear before resuming guest execution. */
660#define VMCPU_FF_RESUME_GUEST_MASK ( VMCPU_FF_TO_R3 )
661
662
663/** VM flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
664#define VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
665 | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_RESET)
666/** VM flags that cause the REP[|NE|E] STRINS loops to yield. */
667#define VM_FF_YIELD_REPSTR_MASK ( VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
668 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_DBGF | VM_FF_DEBUG_SUSPEND )
669/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
670#ifdef IN_RING3
671# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK (VMCPU_FF_DBGF | VMCPU_FF_VMX_MTF)
672#else
673# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK (VMCPU_FF_TO_R3 | VMCPU_FF_IEM | VMCPU_FF_IOM | VMCPU_FF_DBGF | VMCPU_FF_VMX_MTF)
674#endif
675
676#if !defined(VBOX_VMM_TARGET_ARMV8)
677/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
678 * enabled. */
679# define VMCPU_FF_YIELD_REPSTR_MASK ( VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
680 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
681 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_PDM_CRITSECT \
682 | VMCPU_FF_TIMER | VMCPU_FF_REQUEST \
683 | VMCPU_FF_INTERRUPT_NESTED_GUEST )
684/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
685 * disabled. */
686# define VMCPU_FF_YIELD_REPSTR_NOINT_MASK ( VMCPU_FF_YIELD_REPSTR_MASK \
687 & ~( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
688 | VMCPU_FF_INTERRUPT_NESTED_GUEST) )
689#endif
690
691/** VM Flags that cause the HM loops to go back to ring-3. */
692#define VM_FF_HM_TO_R3_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
693 | VM_FF_PDM_QUEUES | VM_FF_EMT_RENDEZVOUS)
694/** VMCPU Flags that cause the HM loops to go back to ring-3. */
695#define VMCPU_FF_HM_TO_R3_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT \
696 | VMCPU_FF_IEM | VMCPU_FF_IOM)
697
698/** High priority ring-0 VM pre HM-mode execution mask. */
699#define VM_FF_HP_R0_PRE_HM_MASK (VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
700/** High priority ring-0 VMCPU pre HM-mode execution mask. */
701#define VMCPU_FF_HP_R0_PRE_HM_MASK ( VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 \
702 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST \
703 | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER)
704/** High priority ring-0 VM pre HM-mode execution mask, single stepping. */
705#define VM_FF_HP_R0_PRE_HM_STEP_MASK (VM_FF_HP_R0_PRE_HM_MASK & ~( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES \
706 | VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST \
707 | VM_FF_PDM_DMA) )
708/** High priority ring-0 VMCPU pre HM-mode execution mask, single stepping. */
709#define VMCPU_FF_HP_R0_PRE_HM_STEP_MASK (VMCPU_FF_HP_R0_PRE_HM_MASK & ~( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER \
710 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_REQUEST) )
711
712/** All the VMX nested-guest flags. */
713#define VMCPU_FF_VMX_ALL_MASK ( VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
714 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW )
715
716/** All the forced VM flags. */
717#define VM_FF_ALL_MASK (UINT32_MAX)
718/** All the forced VMCPU flags. */
719#define VMCPU_FF_ALL_MASK (UINT32_MAX)
720
721/** All the forced VM flags except those related to raw-mode and hardware
722 * assisted execution. */
723#define VM_FF_ALL_REM_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
724/** All the forced VMCPU flags except those related to raw-mode and hardware
725 * assisted execution. */
726#define VMCPU_FF_ALL_REM_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_TLB_FLUSH))
727
728#ifndef VBOX_FOR_DTRACE_LIB
729AssertCompile( ((VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK | VM_FF_YIELD_REPSTR_MASK)
730 & (VM_FF_HIGH_PRIORITY_PRE_RAW_MASK & ~VM_FF_ALL_REM_MASK)) == 0);
731AssertCompile((VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK & (VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK & ~VMCPU_FF_ALL_REM_MASK)) == 0);
732#endif
733
734/** @} */
735
736/** @def VM_FF_SET
737 * Sets a single force action flag.
738 *
739 * @param pVM The cross context VM structure.
740 * @param fFlag The flag to set.
741 */
742#define VM_FF_SET(pVM, fFlag) do { \
743 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
744 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
745 ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
746 } while (0)
747
748/** @def VMCPU_FF_SET
749 * Sets a single force action flag for the given VCPU.
750 *
751 * @param pVCpu The cross context virtual CPU structure.
752 * @param fFlag The flag to set.
753 * @sa VMCPU_FF_SET_MASK
754 */
755#ifdef VMCPU_WITH_64_BIT_FFS
756# define VMCPU_FF_SET(pVCpu, fFlag) do { \
757 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
758 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
759 ASMAtomicBitSet(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
760 } while (0)
761#else
762# define VMCPU_FF_SET(pVCpu, fFlag) do { \
763 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
764 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
765 ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag)); \
766 } while (0)
767#endif
768
769/** @def VMCPU_FF_SET_MASK
770 * Sets a two or more force action flag for the given VCPU.
771 *
772 * @param pVCpu The cross context virtual CPU structure.
773 * @param fFlags The flags to set.
774 * @sa VMCPU_FF_SET
775 */
776#ifdef VMCPU_WITH_64_BIT_FFS
777# if ARCH_BITS > 32
778# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
779 do { ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
780# else
781# define VMCPU_FF_SET_MASK(pVCpu, fFlags) do { \
782 if (!((fFlags) >> 32)) ASMAtomicOrU32((uint32_t volatile *)&pVCpu->fLocalForcedActions, (uint32_t)(fFlags)); \
783 else ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); \
784 } while (0)
785# endif
786#else
787# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
788 do { ASMAtomicOrU32(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
789#endif
790
791/** @def VM_FF_CLEAR
792 * Clears a single force action flag.
793 *
794 * @param pVM The cross context VM structure.
795 * @param fFlag The flag to clear.
796 */
797#define VM_FF_CLEAR(pVM, fFlag) do { \
798 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
799 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
800 ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
801 } while (0)
802
803/** @def VMCPU_FF_CLEAR
804 * Clears a single force action flag for the given VCPU.
805 *
806 * @param pVCpu The cross context virtual CPU structure.
807 * @param fFlag The flag to clear.
808 */
809#ifdef VMCPU_WITH_64_BIT_FFS
810# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
811 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
812 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
813 ASMAtomicBitClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
814 } while (0)
815#else
816# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
817 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
818 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
819 ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag)); \
820 } while (0)
821#endif
822
823/** @def VMCPU_FF_CLEAR_MASK
824 * Clears two or more force action flags for the given VCPU.
825 *
826 * @param pVCpu The cross context virtual CPU structure.
827 * @param fFlags The flags to clear.
828 */
829#ifdef VMCPU_WITH_64_BIT_FFS
830# if ARCH_BITS > 32
831# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
832 do { ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
833# else
834# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) do { \
835 if (!((fFlags) >> 32)) ASMAtomicAndU32((uint32_t volatile *)&(pVCpu)->fLocalForcedActions, ~(uint32_t)(fFlags)); \
836 else ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); \
837 } while (0)
838# endif
839#else
840# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
841 do { ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
842#endif
843
844/** @def VM_FF_IS_SET
845 * Checks if single a force action flag is set.
846 *
847 * @param pVM The cross context VM structure.
848 * @param fFlag The flag to check.
849 * @sa VM_FF_IS_ANY_SET
850 */
851#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
852# define VM_FF_IS_SET(pVM, fFlag) RT_BOOL((pVM)->fGlobalForcedActions & (fFlag))
853#else
854# define VM_FF_IS_SET(pVM, fFlag) \
855 ([](PVM a_pVM) -> bool \
856 { \
857 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
858 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
859 return RT_BOOL(a_pVM->fGlobalForcedActions & (fFlag)); \
860 }(pVM))
861#endif
862
863/** @def VMCPU_FF_IS_SET
864 * Checks if a single force action flag is set for the given VCPU.
865 *
866 * @param pVCpu The cross context virtual CPU structure.
867 * @param fFlag The flag to check.
868 * @sa VMCPU_FF_IS_ANY_SET
869 */
870#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
871# define VMCPU_FF_IS_SET(pVCpu, fFlag) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlag))
872#else
873# define VMCPU_FF_IS_SET(pVCpu, fFlag) \
874 ([](PCVMCPU a_pVCpu) -> bool \
875 { \
876 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
877 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
878 return RT_BOOL(a_pVCpu->fLocalForcedActions & (fFlag)); \
879 }(pVCpu))
880#endif
881
882/** @def VM_FF_IS_ANY_SET
883 * Checks if one or more force action in the specified set is pending.
884 *
885 * @param pVM The cross context VM structure.
886 * @param fFlags The flags to check for.
887 * @sa VM_FF_IS_SET
888 */
889#define VM_FF_IS_ANY_SET(pVM, fFlags) RT_BOOL((pVM)->fGlobalForcedActions & (fFlags))
890
891/** @def VMCPU_FF_IS_ANY_SET
892 * Checks if two or more force action flags in the specified set is set for the given VCPU.
893 *
894 * @param pVCpu The cross context virtual CPU structure.
895 * @param fFlags The flags to check for.
896 * @sa VMCPU_FF_IS_SET
897 */
898#define VMCPU_FF_IS_ANY_SET(pVCpu, fFlags) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlags))
899
900/** @def VM_FF_TEST_AND_CLEAR
901 * Checks if one (!) force action in the specified set is pending and clears it atomically
902 *
903 * @returns true if the bit was set.
904 * @returns false if the bit was clear.
905 * @param pVM The cross context VM structure.
906 * @param fFlag Flag constant to check and clear (_BIT is appended).
907 */
908#define VM_FF_TEST_AND_CLEAR(pVM, fFlag) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, fFlag##_BIT))
909
910/** @def VMCPU_FF_TEST_AND_CLEAR
911 * Checks if one (!) force action in the specified set is pending and clears it atomically
912 *
913 * @returns true if the bit was set.
914 * @returns false if the bit was clear.
915 * @param pVCpu The cross context virtual CPU structure.
916 * @param fFlag Flag constant to check and clear (_BIT is appended).
917 */
918#define VMCPU_FF_TEST_AND_CLEAR(pVCpu, fFlag) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT))
919
920/** @def VM_FF_IS_PENDING_EXCEPT
921 * Checks if one or more force action in the specified set is pending while one
922 * or more other ones are not.
923 *
924 * @param pVM The cross context VM structure.
925 * @param fFlags The flags to check for.
926 * @param fExcpt The flags that should not be set.
927 */
928#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) \
929 ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
930
931/** @def VM_IS_EMT
932 * Checks if the current thread is the emulation thread (EMT).
933 *
934 * @remark The ring-0 variation will need attention if we expand the ring-0
935 * code to let threads other than EMT mess around with the VM.
936 */
937#ifdef IN_RC
938# define VM_IS_EMT(pVM) true
939#else
940# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
941#endif
942
943/** @def VMCPU_IS_EMT
944 * Checks if the current thread is the emulation thread (EMT) for the specified
945 * virtual CPU.
946 */
947#ifdef IN_RC
948# define VMCPU_IS_EMT(pVCpu) true
949#else
950# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
951#endif
952
953/** @def VM_ASSERT_EMT
954 * Asserts that the current thread IS the emulation thread (EMT).
955 */
956#ifdef IN_RC
957# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
958#elif defined(IN_RING0)
959# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
960#else
961# define VM_ASSERT_EMT(pVM) \
962 AssertMsg(VM_IS_EMT(pVM), \
963 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
964#endif
965
966/** @def VMCPU_ASSERT_EMT
967 * Asserts that the current thread IS the emulation thread (EMT) of the
968 * specified virtual CPU.
969 */
970#ifdef IN_RC
971# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
972#elif defined(IN_RING0)
973# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
974 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%u\n", \
975 RTThreadNativeSelf(), (pVCpu) ? (pVCpu)->hNativeThreadR0 : 0, \
976 (pVCpu) ? (pVCpu)->idCpu : 0))
977#else
978# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
979 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
980 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
981#endif
982
983/** @def VM_ASSERT_EMT_RETURN
984 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
985 */
986#ifdef IN_RC
987# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
988#elif defined(IN_RING0)
989# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
990#else
991# define VM_ASSERT_EMT_RETURN(pVM, rc) \
992 AssertMsgReturn(VM_IS_EMT(pVM), \
993 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
994 (rc))
995#endif
996
997/** @def VMCPU_ASSERT_EMT_RETURN
998 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
999 */
1000#ifdef IN_RC
1001# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
1002#elif defined(IN_RING0)
1003# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
1004#else
1005# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
1006 AssertMsgReturn(VMCPU_IS_EMT(pVCpu), \
1007 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1008 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
1009 (rc))
1010#endif
1011
1012/** @def VMCPU_ASSERT_EMT_OR_GURU
1013 * Asserts that the current thread IS the emulation thread (EMT) of the
1014 * specified virtual CPU.
1015 */
1016#if defined(IN_RC) || defined(IN_RING0)
1017# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) Assert( VMCPU_IS_EMT(pVCpu) \
1018 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
1019 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS )
1020#else
1021# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) \
1022 AssertMsg( VMCPU_IS_EMT(pVCpu) \
1023 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
1024 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS, \
1025 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1026 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
1027#endif
1028
1029/** @def VMCPU_ASSERT_EMT_OR_NOT_RUNNING
1030 * Asserts that the current thread IS the emulation thread (EMT) of the
1031 * specified virtual CPU or the VM is not running.
1032 */
1033#if defined(IN_RC) || defined(IN_RING0)
1034# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
1035 Assert( VMCPU_IS_EMT(pVCpu) \
1036 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)) )
1037#else
1038# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
1039 AssertMsg( VMCPU_IS_EMT(pVCpu) \
1040 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)), \
1041 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1042 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
1043#endif
1044
1045/** @def VMSTATE_IS_RUNNING
1046 * Checks if the given state indicates a running VM.
1047 */
1048#define VMSTATE_IS_RUNNING(a_enmVMState) \
1049 ( (a_enmVMState) == VMSTATE_RUNNING \
1050 || (a_enmVMState) == VMSTATE_RUNNING_LS )
1051
1052/** @def VM_IS_RUNNING_FOR_ASSERTIONS_ONLY
1053 * Checks if the VM is running.
1054 * @note This is only for pure debug assertions. No AssertReturn or similar!
1055 * @sa VMSTATE_IS_RUNNING
1056 */
1057#define VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM) \
1058 ( (pVM)->enmVMState == VMSTATE_RUNNING \
1059 || (pVM)->enmVMState == VMSTATE_RUNNING_LS )
1060
1061
1062/** @def VMSTATE_IS_POWERED_ON
1063 * Checks if the given state indicates the VM is powered on.
1064 *
1065 * @note Excludes all error states, so a powered on VM that hit a fatal error,
1066 * guru meditation, state load failure or similar will not be considered
1067 * powered on by this test.
1068 */
1069#define VMSTATE_IS_POWERED_ON(a_enmVMState) \
1070 ( (a_enmVMState) >= VMSTATE_RESUMING && (a_enmVMState) < VMSTATE_POWERING_OFF )
1071
1072/** @def VM_ASSERT_IS_NOT_RUNNING
1073 * Asserts that the VM is not running.
1074 */
1075#if defined(IN_RC) || defined(IN_RING0)
1076#define VM_ASSERT_IS_NOT_RUNNING(pVM) Assert(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM))
1077#else
1078#define VM_ASSERT_IS_NOT_RUNNING(pVM) AssertMsg(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM), \
1079 ("VM is running. enmVMState=%d\n", (pVM)->enmVMState))
1080#endif
1081
1082/** @def VM_ASSERT_EMT0
1083 * Asserts that the current thread IS emulation thread \#0 (EMT0).
1084 */
1085#ifdef IN_RING3
1086# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT((a_pVM)->apCpusR3[0])
1087#else
1088# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT(&(a_pVM)->aCpus[0])
1089#endif
1090
1091/** @def VM_ASSERT_EMT0_RETURN
1092 * Asserts that the current thread IS emulation thread \#0 (EMT0) and returns if
1093 * it isn't.
1094 */
1095#ifdef IN_RING3
1096# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN((pVM)->apCpusR3[0], (rc))
1097#else
1098# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN(&(pVM)->aCpus[0], (rc))
1099#endif
1100
1101
1102/**
1103 * Asserts that the current thread is NOT the emulation thread.
1104 */
1105#define VM_ASSERT_OTHER_THREAD(pVM) \
1106 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
1107
1108
1109/** @def VM_ASSERT_STATE
1110 * Asserts a certain VM state.
1111 */
1112#define VM_ASSERT_STATE(pVM, _enmState) \
1113 AssertMsg((pVM)->enmVMState == (_enmState), \
1114 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
1115
1116/** @def VM_ASSERT_STATE_RETURN
1117 * Asserts a certain VM state and returns if it doesn't match.
1118 */
1119#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
1120 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
1121 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
1122 (rc))
1123
1124/** @def VM_IS_VALID_EXT
1125 * Asserts a the VM handle is valid for external access, i.e. not being destroy
1126 * or terminated. */
1127#define VM_IS_VALID_EXT(pVM) \
1128 ( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
1129 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
1130 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
1131 && VM_IS_EMT(pVM))) )
1132
1133/** @def VM_ASSERT_VALID_EXT_RETURN
1134 * Asserts a the VM handle is valid for external access, i.e. not being
1135 * destroy or terminated.
1136 */
1137#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
1138 AssertMsgReturn(VM_IS_VALID_EXT(pVM), \
1139 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
1140 ? VMGetStateName(pVM->enmVMState) : ""), \
1141 (rc))
1142
1143/** @def VMCPU_ASSERT_VALID_EXT_RETURN
1144 * Asserts a the VMCPU handle is valid for external access, i.e. not being
1145 * destroy or terminated.
1146 */
1147#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
1148 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
1149 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
1150 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
1151 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
1152 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
1153 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
1154 (rc))
1155
1156#endif /* !VBOX_FOR_DTRACE_LIB */
1157
1158
1159/**
1160 * Helper that HM and NEM uses for safely modifying VM::bMainExecutionEngine.
1161 *
1162 * ONLY HM and NEM MAY USE THIS!
1163 *
1164 * @param a_pVM The cross context VM structure.
1165 * @param a_bValue The new value.
1166 * @internal
1167 */
1168#define VM_SET_MAIN_EXECUTION_ENGINE(a_pVM, a_bValue) \
1169 do { \
1170 *const_cast<uint8_t *>(&(a_pVM)->bMainExecutionEngine) = (a_bValue); \
1171 ASMCompilerBarrier(); /* just to be on the safe side */ \
1172 } while (0)
1173
1174/**
1175 * Checks whether iem-executes-all-mode is used.
1176 *
1177 * @retval true if IEM is used.
1178 * @retval false if not.
1179 *
1180 * @param a_pVM The cross context VM structure.
1181 * @sa VM_IS_HM_OR_NEM_ENABLED, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1182 * @internal
1183 */
1184#define VM_IS_EXEC_ENGINE_IEM(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_IEM)
1185
1186/**
1187 * Checks whether HM (VT-x/AMD-V) or NEM is being used by this VM.
1188 *
1189 * @retval true if either is used.
1190 * @retval false if software virtualization (raw-mode) is used.
1191 *
1192 * @param a_pVM The cross context VM structure.
1193 * @sa VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1194 * @internal
1195 */
1196#define VM_IS_HM_OR_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine != VM_EXEC_ENGINE_IEM)
1197
1198/**
1199 * Checks whether HM is being used by this VM.
1200 *
1201 * @retval true if HM (VT-x/AMD-v) is used.
1202 * @retval false if not.
1203 *
1204 * @param a_pVM The cross context VM structure.
1205 * @sa VM_IS_NEM_ENABLED, VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_OR_NEM_ENABLED.
1206 * @internal
1207 */
1208#define VM_IS_HM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT)
1209
1210/**
1211 * Checks whether NEM is being used by this VM.
1212 *
1213 * @retval true if a native hypervisor API is used.
1214 * @retval false if not.
1215 *
1216 * @param a_pVM The cross context VM structure.
1217 * @sa VM_IS_HM_ENABLED, VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_OR_NEM_ENABLED.
1218 * @internal
1219 */
1220#define VM_IS_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
1221
1222
1223/**
1224 * The cross context VM structure.
1225 *
1226 * It contains all the VM data which have to be available in all contexts.
1227 * Even if it contains all the data the idea is to use APIs not to modify all
1228 * the members all around the place. Therefore we make use of unions to hide
1229 * everything which isn't local to the current source module. This means we'll
1230 * have to pay a little bit of attention when adding new members to structures
1231 * in the unions and make sure to keep the padding sizes up to date.
1232 *
1233 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
1234 */
1235typedef struct VM
1236{
1237 /** The state of the VM.
1238 * This field is read only to everyone except the VM and EM. */
1239 VMSTATE volatile enmVMState;
1240 /** Forced action flags.
1241 * See the VM_FF_* \#defines. Updated atomically.
1242 */
1243 volatile uint32_t fGlobalForcedActions;
1244 /** Pointer to the array of page descriptors for the VM structure allocation. */
1245 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
1246 /** Session handle. For use when calling SUPR0 APIs. */
1247#ifdef IN_RING0
1248 PSUPDRVSESSION pSessionUnsafe;
1249#else
1250 PSUPDRVSESSION pSession;
1251#endif
1252 /** Pointer to the ring-3 VM structure. */
1253 PUVM pUVM;
1254 /** Ring-3 Host Context VM Pointer. */
1255#ifdef IN_RING0
1256 R3PTRTYPE(struct VM *) pVMR3Unsafe;
1257#else
1258 R3PTRTYPE(struct VM *) pVMR3;
1259#endif
1260 /** Ring-0 Host Context VM pointer for making ring-0 calls. */
1261 R0PTRTYPE(struct VM *) pVMR0ForCall;
1262 /** Raw-mode Context VM Pointer. */
1263 uint32_t pVMRC;
1264 /** Padding for new raw-mode (long mode). */
1265 uint32_t pVMRCPadding;
1266
1267 /** The GVM VM handle. Only the GVM should modify this field. */
1268#ifdef IN_RING0
1269 uint32_t hSelfUnsafe;
1270#else
1271 uint32_t hSelf;
1272#endif
1273 /** Number of virtual CPUs. */
1274#ifdef IN_RING0
1275 uint32_t cCpusUnsafe;
1276#else
1277 uint32_t cCpus;
1278#endif
1279 /** CPU excution cap (1-100) */
1280 uint32_t uCpuExecutionCap;
1281
1282 /** Size of the VM structure. */
1283 uint32_t cbSelf;
1284 /** Size of the VMCPU structure. */
1285 uint32_t cbVCpu;
1286 /** Structure version number (TBD). */
1287 uint32_t uStructVersion;
1288
1289 /** @name Various items that are frequently accessed.
1290 * @{ */
1291 /** The main execution engine, VM_EXEC_ENGINE_XXX.
1292 * This is set early during vmR3InitRing3 by HM or NEM. */
1293 uint8_t const bMainExecutionEngine;
1294
1295 /** Hardware VM support is available and enabled.
1296 * Determined very early during init.
1297 * This is placed here for performance reasons.
1298 * @todo obsoleted by bMainExecutionEngine, eliminate. */
1299 bool fHMEnabled;
1300 /** @} */
1301
1302 /** Alignment padding. */
1303 uint8_t uPadding1[6];
1304
1305 /** @name Debugging
1306 * @{ */
1307 /** Ring-3 Host Context VM Pointer. */
1308 R3PTRTYPE(RTTRACEBUF) hTraceBufR3;
1309 /** Ring-0 Host Context VM Pointer. */
1310 R0PTRTYPE(RTTRACEBUF) hTraceBufR0;
1311 /** @} */
1312
1313 /** Max EMT hash lookup collisions (in GVMM). */
1314 uint8_t cMaxEmtHashCollisions;
1315
1316 /** Padding - the unions must be aligned on a 64 bytes boundary. */
1317 uint8_t abAlignment3[HC_ARCH_BITS == 64 ? 23 : 51];
1318
1319 /** CPUM part. */
1320 union
1321 {
1322#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
1323 struct CPUM s;
1324#endif
1325#ifdef VBOX_INCLUDED_vmm_cpum_h
1326 /** Read only info exposed about the host and guest CPUs. */
1327 struct
1328 {
1329 /** Padding for hidden fields. */
1330 uint8_t abHidden0[64 + 48];
1331 /** Guest CPU feature information. */
1332 CPUMFEATURES GuestFeatures;
1333 } const ro;
1334#endif
1335 /** @todo this is rather bloated because of static MSR range allocation.
1336 * Probably a good idea to move it to a separate R0 allocation... */
1337 uint8_t padding[8832 + 128*8192 + 0x1d00]; /* multiple of 64 */
1338 } cpum;
1339
1340 /** PGM part.
1341 * @note 16384 aligned for zero and mmio page storage. */
1342 union
1343 {
1344#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
1345 struct PGM s;
1346#endif
1347 uint8_t padding[53888]; /* multiple of 64 */
1348 } pgm;
1349
1350 /** VMM part. */
1351 union
1352 {
1353#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
1354 struct VMM s;
1355#endif
1356 uint8_t padding[1600]; /* multiple of 64 */
1357 } vmm;
1358
1359 /** HM part. */
1360 union
1361 {
1362#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
1363 struct HM s;
1364#endif
1365 uint8_t padding[5504]; /* multiple of 64 */
1366 } hm;
1367
1368 /** TRPM part. */
1369 union
1370 {
1371#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
1372 struct TRPM s;
1373#endif
1374 uint8_t padding[2048]; /* multiple of 64 */
1375 } trpm;
1376
1377 /** SELM part. */
1378 union
1379 {
1380#ifdef VMM_INCLUDED_SRC_include_SELMInternal_h
1381 struct SELM s;
1382#endif
1383 uint8_t padding[768]; /* multiple of 64 */
1384 } selm;
1385
1386 /** MM part. */
1387 union
1388 {
1389#ifdef VMM_INCLUDED_SRC_include_MMInternal_h
1390 struct MM s;
1391#endif
1392 uint8_t padding[192]; /* multiple of 64 */
1393 } mm;
1394
1395 /** PDM part. */
1396 union
1397 {
1398#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
1399 struct PDM s;
1400#endif
1401 uint8_t padding[22400]; /* multiple of 64 */
1402 } pdm;
1403
1404 /** IOM part. */
1405 union
1406 {
1407#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
1408 struct IOM s;
1409#endif
1410 uint8_t padding[1152]; /* multiple of 64 */
1411 } iom;
1412
1413 /** EM part. */
1414 union
1415 {
1416#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
1417 struct EM s;
1418#endif
1419 uint8_t padding[256]; /* multiple of 64 */
1420 } em;
1421
1422 /** NEM part. */
1423 union
1424 {
1425#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
1426 struct NEM s;
1427#endif
1428 uint8_t padding[4608]; /* multiple of 64 */
1429 } nem;
1430
1431 /** TM part. */
1432 union
1433 {
1434#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
1435 struct TM s;
1436#endif
1437 uint8_t padding[10112]; /* multiple of 64 */
1438 } tm;
1439
1440 /** DBGF part. */
1441 union
1442 {
1443#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
1444 struct DBGF s;
1445#endif
1446#ifdef VBOX_INCLUDED_vmm_dbgf_h
1447 /** Read only info exposed about interrupt breakpoints and selected events. */
1448 struct
1449 {
1450 /** Bitmap of enabled hardware interrupt breakpoints. */
1451 uint32_t bmHardIntBreakpoints[256 / 32];
1452 /** Bitmap of enabled software interrupt breakpoints. */
1453 uint32_t bmSoftIntBreakpoints[256 / 32];
1454 /** Bitmap of selected events.
1455 * This includes non-selectable events too for simplicity, we maintain the
1456 * state for some of these, as it may come in handy. */
1457 uint64_t bmSelectedEvents[(DBGFEVENT_END + 63) / 64];
1458 /** Enabled hardware interrupt breakpoints. */
1459 uint32_t cHardIntBreakpoints;
1460 /** Enabled software interrupt breakpoints. */
1461 uint32_t cSoftIntBreakpoints;
1462 /** The number of selected events. */
1463 uint32_t cSelectedEvents;
1464 /** The number of enabled hardware breakpoints. */
1465 uint8_t cEnabledHwBreakpoints;
1466 /** The number of enabled hardware I/O breakpoints. */
1467 uint8_t cEnabledHwIoBreakpoints;
1468 uint8_t au8Alignment1[2]; /**< Alignment padding. */
1469 /** The number of enabled INT3 breakpoints. */
1470 uint32_t volatile cEnabledInt3Breakpoints;
1471 } const ro;
1472#endif
1473 uint8_t padding[2432]; /* multiple of 64 */
1474 } dbgf;
1475
1476 /** SSM part. */
1477 union
1478 {
1479#ifdef VMM_INCLUDED_SRC_include_SSMInternal_h
1480 struct SSM s;
1481#endif
1482 uint8_t padding[128]; /* multiple of 64 */
1483 } ssm;
1484
1485 union
1486 {
1487#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
1488 struct GIM s;
1489#endif
1490 uint8_t padding[448]; /* multiple of 64 */
1491 } gim;
1492
1493#if defined(VBOX_VMM_TARGET_ARMV8)
1494 union
1495 {
1496# ifdef VMM_INCLUDED_SRC_include_GICInternal_h
1497 struct GIC s;
1498# endif
1499 uint8_t padding[128]; /* multiple of 8 */
1500 } gic;
1501#else
1502 union
1503 {
1504# ifdef VMM_INCLUDED_SRC_include_APICInternal_h
1505 struct APIC s;
1506# endif
1507 uint8_t padding[128]; /* multiple of 8 */
1508 } apic;
1509#endif
1510
1511 /* ---- begin small stuff ---- */
1512
1513 /** VM part. */
1514 union
1515 {
1516#ifdef VMM_INCLUDED_SRC_include_VMInternal_h
1517 struct VMINT s;
1518#endif
1519 uint8_t padding[32]; /* multiple of 8 */
1520 } vm;
1521
1522 /** CFGM part. */
1523 union
1524 {
1525#ifdef VMM_INCLUDED_SRC_include_CFGMInternal_h
1526 struct CFGM s;
1527#endif
1528 uint8_t padding[8]; /* multiple of 8 */
1529 } cfgm;
1530
1531 /** IEM part. */
1532 union
1533 {
1534#ifdef VMM_INCLUDED_SRC_include_IEMInternal_h
1535 struct IEM s;
1536#endif
1537 uint8_t padding[16]; /* multiple of 8 */
1538 } iem;
1539
1540 /** Statistics for ring-0 only components. */
1541 struct
1542 {
1543 /** GMMR0 stats. */
1544 struct
1545 {
1546 /** Chunk TLB hits. */
1547 uint64_t cChunkTlbHits;
1548 /** Chunk TLB misses. */
1549 uint64_t cChunkTlbMisses;
1550 } gmm;
1551 uint64_t au64Padding[6]; /* probably more comming here... */
1552 } R0Stats;
1553
1554 union
1555 {
1556#ifdef VMM_INCLUDED_SRC_include_GCMInternal_h
1557 struct GCM s;
1558#endif
1559 uint8_t padding[32]; /* multiple of 8 */
1560 } gcm;
1561
1562 /** Padding for aligning the structure size on a page boundrary. */
1563 uint8_t abAlignment2[8872 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
1564
1565 /* ---- end small stuff ---- */
1566
1567 /** Array of VMCPU ring-3 pointers. */
1568 PVMCPUR3 apCpusR3[VMM_MAX_CPU_COUNT];
1569
1570 /* This point is aligned on a 16384 boundrary (for arm64 purposes). */
1571} VM;
1572#ifndef VBOX_FOR_DTRACE_LIB
1573//AssertCompileSizeAlignment(VM, 16384);
1574#endif
1575
1576
1577#ifdef IN_RC
1578RT_C_DECLS_BEGIN
1579
1580/** The VM structure.
1581 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1582 * globals which we should avoid using.
1583 */
1584extern DECLIMPORT(VM) g_VM;
1585
1586/** The VMCPU structure for virtual CPU \#0.
1587 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1588 * globals which we should avoid using.
1589 */
1590extern DECLIMPORT(VMCPU) g_VCpu0;
1591
1592RT_C_DECLS_END
1593#endif
1594
1595/** @} */
1596
1597#endif /* !VBOX_INCLUDED_vmm_vm_h */
1598
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette