VirtualBox

source: vbox/trunk/include/VBox/vmm/vm.h@ 108994

Last change on this file since 108994 was 108968, checked in by vboxsync, 6 days ago

VMM,Main,Devices: Respect VBOX_VMM_TARGET_ARMV8 correctly on amd64 hosts (for IEM debugging purposes). jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 66.6 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_vm_h
37#define VBOX_INCLUDED_vmm_vm_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#ifndef VBOX_FOR_DTRACE_LIB
43# ifndef USING_VMM_COMMON_DEFS
44# error "Compile job does not include VMM_COMMON_DEFS from src/VBox/VMM/Config.kmk - make sure you really need to include this file!"
45# endif
46# include <iprt/param.h>
47# include <VBox/param.h>
48# include <VBox/types.h>
49# include <VBox/vmm/cpum.h>
50# include <VBox/vmm/stam.h>
51# include <VBox/vmm/vmapi.h>
52# include <VBox/vmm/vmm.h>
53# include <VBox/param.h>
54# include <VBox/sup.h>
55#else
56# pragma D depends_on library vbox-types.d
57# pragma D depends_on library CPUMInternal.d
58# define VMM_INCLUDED_SRC_include_CPUMInternal_h
59# define VBOX_VMM_TARGET_AGNOSTIC
60#endif
61
62#if !defined(VBOX_VMM_TARGET_AGNOSTIC) \
63 && !defined(VBOX_VMM_TARGET_X86) \
64 && !defined(VBOX_VMM_TARGET_ARMV8)
65# error "VMM target not defined"
66#endif
67
68
69
70/** @defgroup grp_vm The Virtual Machine
71 * @ingroup grp_vmm
72 * @{
73 */
74
75/**
76 * The state of a Virtual CPU.
77 *
78 * The basic state indicated here is whether the CPU has been started or not. In
79 * addition, there are sub-states when started for assisting scheduling (GVMM
80 * mostly).
81 *
82 * The transition out of the STOPPED state is done by a vmR3PowerOn.
83 * The transition back to the STOPPED state is done by vmR3PowerOff.
84 *
85 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
86 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
87 */
88typedef enum VMCPUSTATE
89{
90 /** The customary invalid zero. */
91 VMCPUSTATE_INVALID = 0,
92
93 /** Virtual CPU has not yet been started. */
94 VMCPUSTATE_STOPPED,
95
96 /** CPU started. */
97 VMCPUSTATE_STARTED,
98 /** CPU started in HM context. */
99 VMCPUSTATE_STARTED_HM,
100 /** Executing guest code and can be poked (RC or STI bits of HM). */
101 VMCPUSTATE_STARTED_EXEC,
102 /** Executing guest code using NEM. */
103 VMCPUSTATE_STARTED_EXEC_NEM,
104 VMCPUSTATE_STARTED_EXEC_NEM_WAIT,
105 VMCPUSTATE_STARTED_EXEC_NEM_CANCELED,
106 /** Halted. */
107 VMCPUSTATE_STARTED_HALTED,
108
109 /** The end of valid virtual CPU states. */
110 VMCPUSTATE_END,
111
112 /** Ensure 32-bit type. */
113 VMCPUSTATE_32BIT_HACK = 0x7fffffff
114} VMCPUSTATE;
115
116/** Enables 64-bit FFs. */
117#define VMCPU_WITH_64_BIT_FFS
118
119
120/**
121 * The cross context virtual CPU structure.
122 *
123 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
124 */
125typedef struct VMCPU
126{
127 /** @name Volatile per-cpu data.
128 * @{ */
129 /** Per CPU forced action.
130 * See the VMCPU_FF_* \#defines. Updated atomically. */
131#ifdef VMCPU_WITH_64_BIT_FFS
132 uint64_t volatile fLocalForcedActions;
133#else
134 uint32_t volatile fLocalForcedActions;
135 uint32_t fForLocalForcedActionsExpansion;
136#endif
137 /** The CPU state. */
138 VMCPUSTATE volatile enmState;
139
140#ifdef VBOX_VMM_TARGET_ARMV8
141 uint32_t u32Alignment0;
142 /** The number of nano seconds when the vTimer of the associated vCPU is supposed to activate
143 * required to get out of a halt (due to wfi/wfe).
144 *
145 * @note This actually should go into TMCPU but this drags in a whole lot of padding changes
146 * and I'm not sure yet whether this will remain in this form anyway.
147 */
148 uint64_t cNsVTimerActivate;
149 /** Padding up to 64 bytes. */
150 uint8_t abAlignment0[64 - 12 - 8 - 4];
151#else
152 /** Padding up to 64 bytes. */
153 uint8_t abAlignment0[64 - 12];
154#endif
155 /** @} */
156
157 /** IEM part.
158 * @remarks This comes first as it allows the use of 8-bit immediates for the
159 * first 64 bytes of the structure, reducing code size a wee bit. */
160#if defined(VMM_INCLUDED_SRC_include_IEMInternal_h) || defined(VMM_INCLUDED_SRC_include_IEMInternal_armv8_h) /* For PDB hacking. */
161 union VMCPUUNIONIEMFULL
162#else
163 union VMCPUUNIONIEMSTUB
164#endif
165 {
166#if defined(VMM_INCLUDED_SRC_include_IEMInternal_h) || defined(VMM_INCLUDED_SRC_include_IEMInternal_armv8_h)
167 struct IEMCPU s;
168#endif
169 uint8_t padding[ 129984 /* The common base size. */
170#ifdef RT_ARCH_AMD64
171 + 32768 /* For 256 entries per TLBs. */
172#else
173 + 1048576 /* For 8192 entries per TLBs. */
174#endif
175 ]; /* multiple of 64 */
176 } iem;
177
178 /** @name Static per-cpu data.
179 * (Putting this after IEM, hoping that it's less frequently used than it.)
180 * @{ */
181 /** Ring-3 Host Context VM Pointer. */
182 PVMR3 pVMR3;
183 /** Ring-0 Host Context VM Pointer, currently used by VTG/dtrace. */
184 RTR0PTR pVCpuR0ForVtg;
185 /** Raw-mode Context VM Pointer. */
186 uint32_t pVMRC;
187 /** Padding for new raw-mode (long mode). */
188 uint32_t pVMRCPadding;
189 /** Pointer to the ring-3 UVMCPU structure. */
190 PUVMCPU pUVCpu;
191 /** The native thread handle. */
192 RTNATIVETHREAD hNativeThread;
193 /** The native R0 thread handle. (different from the R3 handle!) */
194 RTNATIVETHREAD hNativeThreadR0;
195 /** The IPRT thread handle (for VMMDevTesting). */
196 RTTHREAD hThread;
197 /** The CPU ID.
198 * This is the index into the VM::aCpu array. */
199#ifdef IN_RING0
200 VMCPUID idCpuUnsafe;
201#else
202 VMCPUID idCpu;
203#endif
204 /** The VM target platform architecture.
205 * Same as VM::enmTarget, GVM::enmTarget and GVMCPU::enmTarget. */
206#ifdef IN_RING0
207 VMTARGET enmTargetUnsafe;
208#else
209 VMTARGET enmTarget;
210#endif
211
212#if HC_ARCH_BITS != 64
213 /** Align the structures below bit on a 64-byte boundary and make sure it starts
214 * at the same offset in both 64-bit and 32-bit builds.
215 *
216 * @remarks The alignments of the members that are larger than 48 bytes should be
217 * 64-byte for cache line reasons. structs containing small amounts of
218 * data could be lumped together at the end with a < 64 byte padding
219 * following it (to grow into and align the struct size).
220 */
221 uint8_t abAlignment1[64 - 6 * (HC_ARCH_BITS == 32 ? 4 : 8) - 8 - 4 - 4];
222#endif
223 /** @} */
224
225 /** HM part. */
226 union VMCPUUNIONHM
227 {
228#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
229 struct HMCPU s;
230#endif
231 uint8_t padding[9984]; /* multiple of 64 */
232 } hm;
233
234 /** NEM part. */
235 union VMCPUUNIONNEM
236 {
237#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
238 struct NEMCPU s;
239#endif
240 uint8_t padding[4608]; /* multiple of 64 */
241 } nem;
242
243 /** TRPM part. */
244 union VMCPUUNIONTRPM
245 {
246#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
247 struct TRPMCPU s;
248#endif
249 uint8_t padding[128]; /* multiple of 64 */
250 } trpm;
251
252 /** TM part. */
253 union VMCPUUNIONTM
254 {
255#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
256 struct TMCPU s;
257#endif
258 uint8_t padding[5760]; /* multiple of 64 */
259 } tm;
260
261 /** VMM part. */
262 union VMCPUUNIONVMM
263 {
264#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
265 struct VMMCPU s;
266#endif
267 uint8_t padding[9536]; /* multiple of 64 */
268 } vmm;
269
270 /** PDM part. */
271 union VMCPUUNIONPDM
272 {
273#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
274 struct PDMCPU s;
275#endif
276 uint8_t padding[256]; /* multiple of 64 */
277 } pdm;
278
279 /** IOM part. */
280 union VMCPUUNIONIOM
281 {
282#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
283 struct IOMCPU s;
284#endif
285 uint8_t padding[512]; /* multiple of 64 */
286 } iom;
287
288 /** DBGF part.
289 * @todo Combine this with other tiny structures. */
290 union VMCPUUNIONDBGF
291 {
292#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
293 struct DBGFCPU s;
294#endif
295 uint8_t padding[512]; /* multiple of 64 */
296 } dbgf;
297
298 /** GIM part. */
299 union VMCPUUNIONGIM
300 {
301#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
302 struct GIMCPU s;
303#endif
304 uint8_t padding[512]; /* multiple of 64 */
305 } gim;
306
307 /* Interrupt controller, target specific. */
308 RT_GCC_EXTENSION
309 union
310 {
311#if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC)
312 /** GIC part. */
313 union
314 {
315# ifdef VMM_INCLUDED_SRC_include_GICInternal_h
316 struct GICCPU s;
317# endif
318 uint8_t padding[3840]; /* multiple of 64 */
319 } gic;
320#endif
321#if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC)
322 /** APIC part. */
323 union
324 {
325# ifdef VMM_INCLUDED_SRC_include_APICInternal_h
326 struct APICCPU s;
327# endif
328 uint8_t padding[3840]; /* multiple of 64 */
329 } apic;
330#endif
331 };
332
333 /*
334 * Some less frequently used global members that doesn't need to take up
335 * precious space at the head of the structure.
336 */
337
338 /** Trace groups enable flags. */
339 uint32_t fTraceGroups; /* 64 / 44 */
340 /** Number of collisions hashing the ring-0 EMT handle. */
341 uint8_t cEmtHashCollisions;
342 uint8_t abAdHoc[3];
343 /** Profiling samples for use by ad hoc profiling. */
344 STAMPROFILEADV aStatAdHoc[8]; /* size: 40*8 = 320 */
345
346 /** Align the following members on page boundary. */
347 uint8_t abAlignment2[1848];
348
349 /** PGM part. */
350 union VMCPUUNIONPGM
351 {
352#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
353 struct PGMCPU s;
354#endif
355 uint8_t padding[36864]; /* multiple of 4096 */
356 } pgm;
357
358 /** CPUM part. */
359 union VMCPUUNIONCPUM
360 {
361#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
362 struct CPUMCPU s;
363#endif
364#ifdef VMCPU_INCL_CPUM_GST_CTX
365 /** The guest CPUM context for direct use by execution engines.
366 * This is not for general consumption, but for HM, REM, IEM, and maybe a few
367 * others. The rest will use the function based CPUM API. */
368 CPUMCTX GstCtx;
369#endif
370 uint8_t padding[102400]; /* multiple of 4096 */
371 } cpum;
372
373 /** EM part. */
374 union VMCPUUNIONEM
375 {
376#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
377 struct EMCPU s;
378#endif
379 uint8_t padding[40960]; /* multiple of 4096 */
380 } em;
381 uint8_t abPadding[12288];
382} VMCPU;
383
384
385#ifndef VBOX_FOR_DTRACE_LIB
386# ifndef IN_TSTVMSTRUCT
387/* Make sure the structure size is aligned on a 16384 boundary for arm64 purposes. */
388AssertCompileSizeAlignment(VMCPU, 16384);
389# endif
390
391/** @name Operations on VMCPU::enmState
392 * @{ */
393/** Gets the VMCPU state. */
394#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
395/** Sets the VMCPU state. */
396#define VMCPU_SET_STATE(pVCpu, enmNewState) \
397 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
398/** Cmpares and sets the VMCPU state. */
399#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
400 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
401/** Checks the VMCPU state. */
402#ifdef VBOX_STRICT
403# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
404 do { \
405 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
406 AssertMsg(enmState == (enmExpectedState), \
407 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
408 enmState, enmExpectedState, (pVCpu)->idCpu)); \
409 } while (0)
410
411# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) \
412 do { \
413 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
414 AssertMsg( enmState == (enmExpectedState) \
415 || enmState == (a_enmExpectedState2), \
416 ("enmState=%d enmExpectedState=%d enmExpectedState2=%d idCpu=%u\n", \
417 enmState, enmExpectedState, a_enmExpectedState2, (pVCpu)->idCpu)); \
418 } while (0)
419#else
420# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) do { } while (0)
421# define VMCPU_ASSERT_STATE_2(pVCpu, enmExpectedState, a_enmExpectedState2) do { } while (0)
422#endif
423/** Tests if the state means that the CPU is started. */
424#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
425/** Tests if the state means that the CPU is stopped. */
426#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
427/** @} */
428
429
430/** The name of the raw-mode context VMM Core module. */
431#define VMMRC_MAIN_MODULE_NAME "VMMRC.rc"
432/** The name of the ring-0 context VMM Core module. */
433#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
434
435
436/** VM Forced Action Flags.
437 *
438 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
439 * action mask of a VM.
440 *
441 * Available VM bits:
442 * 5, 6, 7, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
443 *
444 *
445 * Available VMCPU bits:
446 * 14, 15, 36 to 63
447 *
448 * @todo If we run low on VMCPU, we may consider merging the SELM bits
449 *
450 * @{
451 */
452/* Bit 0, bit 1: Reserved and must not be reused. The recompiler ASSUMES it
453 can OR the local and global FFs together and keept the two
454 VMCPU_FF_INTERRUPT_XXX flags uncorrupted. */
455/** The virtual sync clock has been stopped, go to TM until it has been
456 * restarted... */
457#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(VM_FF_TM_VIRTUAL_SYNC_BIT)
458#define VM_FF_TM_VIRTUAL_SYNC_BIT 2
459/** PDM Queues are pending. */
460#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
461/** The bit number for VM_FF_PDM_QUEUES. */
462#define VM_FF_PDM_QUEUES_BIT 3
463/** PDM DMA transfers are pending. */
464#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
465/** The bit number for VM_FF_PDM_DMA. */
466#define VM_FF_PDM_DMA_BIT 4
467/** This action forces the VM to call DBGF so DBGF can service debugger
468 * requests in the emulation thread.
469 * This action flag stays asserted till DBGF clears it.*/
470#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
471/** The bit number for VM_FF_DBGF. */
472#define VM_FF_DBGF_BIT 8
473/** This action forces the VM to service pending requests from other
474 * thread or requests which must be executed in another context. */
475#define VM_FF_REQUEST RT_BIT_32(VM_FF_REQUEST_BIT)
476#define VM_FF_REQUEST_BIT 9
477/** Check for VM state changes and take appropriate action. */
478#define VM_FF_CHECK_VM_STATE RT_BIT_32(VM_FF_CHECK_VM_STATE_BIT)
479/** The bit number for VM_FF_CHECK_VM_STATE. */
480#define VM_FF_CHECK_VM_STATE_BIT 10
481/** Reset the VM. (postponed) */
482#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
483/** The bit number for VM_FF_RESET. */
484#define VM_FF_RESET_BIT 11
485/** EMT rendezvous in VMM. */
486#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
487/** The bit number for VM_FF_EMT_RENDEZVOUS. */
488#define VM_FF_EMT_RENDEZVOUS_BIT 12
489
490/** PGM needs to allocate handy pages. */
491#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(VM_FF_PGM_NEED_HANDY_PAGES_BIT)
492#define VM_FF_PGM_NEED_HANDY_PAGES_BIT 18
493/** PGM is out of memory.
494 * Abandon all loops and code paths which can be resumed and get up to the EM
495 * loops. */
496#define VM_FF_PGM_NO_MEMORY RT_BIT_32(VM_FF_PGM_NO_MEMORY_BIT)
497#define VM_FF_PGM_NO_MEMORY_BIT 19
498 /** PGM is about to perform a lightweight pool flush
499 * Guest SMP: all EMT threads should return to ring 3
500 */
501#define VM_FF_PGM_POOL_FLUSH_PENDING RT_BIT_32(VM_FF_PGM_POOL_FLUSH_PENDING_BIT)
502#define VM_FF_PGM_POOL_FLUSH_PENDING_BIT 20
503/** Suspend the VM - debug only. */
504#define VM_FF_DEBUG_SUSPEND RT_BIT_32(VM_FF_DEBUG_SUSPEND_BIT)
505#define VM_FF_DEBUG_SUSPEND_BIT 31
506
507
508#if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC)
509/** This action forces the VM to inject an IRQ into the guest. */
510# define VMCPU_FF_INTERRUPT_IRQ RT_BIT_64(VMCPU_FF_INTERRUPT_IRQ_BIT)
511# define VMCPU_FF_INTERRUPT_IRQ_BIT 0
512/** This action forces the VM to inject an FIQ into the guest. */
513# define VMCPU_FF_INTERRUPT_FIQ RT_BIT_64(VMCPU_FF_INTERRUPT_FIQ_BIT)
514# define VMCPU_FF_INTERRUPT_FIQ_BIT 1
515#endif
516#if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC)
517/** This action forces the VM to check any pending interrupts on the APIC. */
518# define VMCPU_FF_INTERRUPT_APIC RT_BIT_64(VMCPU_FF_INTERRUPT_APIC_BIT)
519# define VMCPU_FF_INTERRUPT_APIC_BIT 0
520/** This action forces the VM to check any pending interrups on the PIC. */
521# define VMCPU_FF_INTERRUPT_PIC RT_BIT_64(VMCPU_FF_INTERRUPT_PIC_BIT)
522# define VMCPU_FF_INTERRUPT_PIC_BIT 1
523#endif
524/** This action forces the VM to schedule and run pending timer (TM).
525 * @remarks Don't move - PATM compatibility. */
526#define VMCPU_FF_TIMER RT_BIT_64(VMCPU_FF_TIMER_BIT)
527#define VMCPU_FF_TIMER_BIT 2
528/** This action forces the VM to check any pending NMIs. */
529#define VMCPU_FF_INTERRUPT_NMI RT_BIT_64(VMCPU_FF_INTERRUPT_NMI_BIT)
530#define VMCPU_FF_INTERRUPT_NMI_BIT 3
531/** This action forces the VM to check any pending SMIs. */
532#define VMCPU_FF_INTERRUPT_SMI RT_BIT_64(VMCPU_FF_INTERRUPT_SMI_BIT)
533#define VMCPU_FF_INTERRUPT_SMI_BIT 4
534/** PDM critical section unlocking is pending, process promptly upon return to R3. */
535#define VMCPU_FF_PDM_CRITSECT RT_BIT_64(VMCPU_FF_PDM_CRITSECT_BIT)
536#define VMCPU_FF_PDM_CRITSECT_BIT 5
537/** Special EM internal force flag that is used by EMUnhaltAndWakeUp() to force
538 * the virtual CPU out of the next (/current) halted state. It is not processed
539 * nor cleared by emR3ForcedActions (similar to VMCPU_FF_BLOCK_NMIS), instead it
540 * is cleared the next time EM leaves the HALTED state. */
541#define VMCPU_FF_UNHALT RT_BIT_64(VMCPU_FF_UNHALT_BIT)
542#define VMCPU_FF_UNHALT_BIT 6
543/** Pending IEM action (mask). */
544#define VMCPU_FF_IEM RT_BIT_64(VMCPU_FF_IEM_BIT)
545/** Pending IEM action (bit number). */
546#define VMCPU_FF_IEM_BIT 7
547/** Pending APIC action (bit number). */
548#define VMCPU_FF_UPDATE_APIC_BIT 8
549/** This action forces the VM to update APIC's asynchronously arrived
550 * interrupts as pending interrupts. */
551#define VMCPU_FF_UPDATE_APIC RT_BIT_64(VMCPU_FF_UPDATE_APIC_BIT)
552/** This action forces the VM to service pending requests from other
553 * thread or requests which must be executed in another context. */
554#define VMCPU_FF_REQUEST RT_BIT_64(VMCPU_FF_REQUEST_BIT)
555#define VMCPU_FF_REQUEST_BIT 9
556/** Pending DBGF event (alternative to passing VINF_EM_DBG_EVENT around). */
557#define VMCPU_FF_DBGF RT_BIT_64(VMCPU_FF_DBGF_BIT)
558/** The bit number for VMCPU_FF_DBGF. */
559#define VMCPU_FF_DBGF_BIT 10
560/** Hardware virtualized nested-guest interrupt pending. */
561#define VMCPU_FF_INTERRUPT_NESTED_GUEST RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT)
562#define VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT 11
563/** This action forces PGM to update changes to CR3 when the guest was in HM mode
564 * (when using nested paging). */
565#define VMCPU_FF_HM_UPDATE_CR3 RT_BIT_64(VMCPU_FF_HM_UPDATE_CR3_BIT)
566#define VMCPU_FF_HM_UPDATE_CR3_BIT 12
567#if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC)
568# define VMCPU_FF_VTIMER_ACTIVATED RT_BIT_64(VMCPU_FF_VTIMER_ACTIVATED_BIT)
569# define VMCPU_FF_VTIMER_ACTIVATED_BIT 13
570#endif
571#if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC)
572/* Bit 13 used to be VMCPU_FF_HM_UPDATE_PAE_PDPES. */
573#endif
574/** This action forces the VM to resync the page tables before going
575 * back to execute guest code. (GLOBAL FLUSH) */
576#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_BIT)
577#define VMCPU_FF_PGM_SYNC_CR3_BIT 16
578/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
579 * (NON-GLOBAL FLUSH) */
580#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_64(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT)
581#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL_BIT 17
582/** Check for pending TLB shootdown actions (deprecated)
583 * Reserved for future HM re-use if necessary / safe.
584 * Consumer: HM */
585#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED RT_BIT_64(VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT)
586#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED_BIT 18
587/** Check for pending TLB flush action.
588 * Consumer: HM
589 * @todo rename to VMCPU_FF_HM_TLB_FLUSH */
590#define VMCPU_FF_TLB_FLUSH RT_BIT_64(VMCPU_FF_TLB_FLUSH_BIT)
591/** The bit number for VMCPU_FF_TLB_FLUSH. */
592#define VMCPU_FF_TLB_FLUSH_BIT 19
593/* 20 used to be VMCPU_FF_TRPM_SYNC_IDT (raw-mode only). */
594/* 21 used to be VMCPU_FF_SELM_SYNC_TSS (raw-mode only). */
595/* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */
596/* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */
597/* 24 used to be VMCPU_FF_INHIBIT_INTERRUPTS, which moved to CPUMCTX::eflags.uBoth in v7.0.4. */
598/* 25 used to be VMCPU_FF_BLOCK_NMIS, which moved to CPUMCTX::eflags.uBoth in v7.0.4. */
599/** Force return to Ring-3. */
600#define VMCPU_FF_TO_R3 RT_BIT_64(VMCPU_FF_TO_R3_BIT)
601#define VMCPU_FF_TO_R3_BIT 28
602/** Force return to ring-3 to service pending I/O or MMIO write.
603 * This is a backup for mechanism VINF_IOM_R3_IOPORT_COMMIT_WRITE and
604 * VINF_IOM_R3_MMIO_COMMIT_WRITE, allowing VINF_EM_DBG_BREAKPOINT and similar
605 * status codes to be propagated at the same time without loss. */
606#define VMCPU_FF_IOM RT_BIT_64(VMCPU_FF_IOM_BIT)
607#define VMCPU_FF_IOM_BIT 29
608/* 30 used to be VMCPU_FF_CPUM */
609/** VMX-preemption timer expired. */
610#define VMCPU_FF_VMX_PREEMPT_TIMER RT_BIT_64(VMCPU_FF_VMX_PREEMPT_TIMER_BIT)
611#define VMCPU_FF_VMX_PREEMPT_TIMER_BIT 31
612/** Pending MTF (Monitor Trap Flag) event. */
613#define VMCPU_FF_VMX_MTF RT_BIT_64(VMCPU_FF_VMX_MTF_BIT)
614#define VMCPU_FF_VMX_MTF_BIT 32
615/** VMX APIC-write emulation pending.
616 * @todo possible candidate for internal EFLAGS, or maybe just a summary bit
617 * (see also VMCPU_FF_VMX_INT_WINDOW). */
618#define VMCPU_FF_VMX_APIC_WRITE RT_BIT_64(VMCPU_FF_VMX_APIC_WRITE_BIT)
619#define VMCPU_FF_VMX_APIC_WRITE_BIT 33
620/** VMX interrupt-window event pending.
621 *
622 * "Pending" is misleading here, it would be better to say that the event need
623 * to be generated at the next opportunity and that this flag causes it to be
624 * polled for on every instruction boundrary and such.
625 *
626 * @todo Change the IEM side of this to not poll but to track down the places
627 * where it can be generated and set an internal EFLAGS bit that causes it
628 * to be checked out when finishing the current instruction. */
629#define VMCPU_FF_VMX_INT_WINDOW RT_BIT_64(VMCPU_FF_VMX_INT_WINDOW_BIT)
630#define VMCPU_FF_VMX_INT_WINDOW_BIT 34
631/** VMX NMI-window event pending.
632 * Same "pending" comment and todo in VMCPU_FF_VMX_INT_WINDOW. */
633#define VMCPU_FF_VMX_NMI_WINDOW RT_BIT_64(VMCPU_FF_VMX_NMI_WINDOW_BIT)
634#define VMCPU_FF_VMX_NMI_WINDOW_BIT 35
635
636
637/** Externally VM forced actions. Used to quit the idle/wait loop. */
638#define VM_FF_EXTERNAL_SUSPENDED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS )
639/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
640#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK ( VMCPU_FF_REQUEST | VMCPU_FF_DBGF )
641
642/** Externally forced VM actions. Used to quit the idle/wait loop. */
643#define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \
644 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS )
645
646#ifndef VBOX_VMM_TARGET_AGNOSTIC
647/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
648# if defined(VBOX_VMM_TARGET_ARMV8)
649# define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \
650 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
651 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \
652 | VMCPU_FF_VTIMER_ACTIVATED)
653# else
654# define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
655 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
656 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \
657 | VMCPU_FF_INTERRUPT_NESTED_GUEST)
658# endif
659#endif
660
661/** High priority VM pre-execution actions. */
662#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC \
663 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
664 | VM_FF_EMT_RENDEZVOUS )
665#ifndef VBOX_VMM_TARGET_AGNOSTIC
666/** High priority VMCPU pre-execution actions. */
667# if defined(VBOX_VMM_TARGET_ARMV8)
668# define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \
669 | VMCPU_FF_DBGF )
670# else
671# define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
672 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_DBGF \
673 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
674 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
675 | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW )
676# endif
677#endif
678
679/** High priority VM pre raw-mode execution mask. */
680#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY )
681/** High priority VMCPU pre raw-mode execution mask. */
682#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL )
683
684/** High priority post-execution actions. */
685#define VM_FF_HIGH_PRIORITY_POST_MASK ( VM_FF_PGM_NO_MEMORY )
686/** High priority post-execution actions. */
687#define VMCPU_FF_HIGH_PRIORITY_POST_MASK ( VMCPU_FF_PDM_CRITSECT | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_IEM | VMCPU_FF_IOM )
688
689/** Normal priority VM post-execution actions. */
690#define VM_FF_NORMAL_PRIORITY_POST_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET \
691 | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
692/** Normal priority VMCPU post-execution actions. */
693#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK ( VMCPU_FF_DBGF )
694
695/** Normal priority VM actions. */
696#define VM_FF_NORMAL_PRIORITY_MASK ( VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
697/** Normal priority VMCPU actions. */
698#define VMCPU_FF_NORMAL_PRIORITY_MASK ( VMCPU_FF_REQUEST )
699
700/** Flags to clear before resuming guest execution. */
701#define VMCPU_FF_RESUME_GUEST_MASK ( VMCPU_FF_TO_R3 )
702
703
704/** VM flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
705#define VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
706 | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_RESET)
707/** VM flags that cause the REP[|NE|E] STRINS loops to yield. */
708#define VM_FF_YIELD_REPSTR_MASK ( VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
709 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_DBGF | VM_FF_DEBUG_SUSPEND )
710/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
711#ifdef IN_RING3
712# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK (VMCPU_FF_DBGF | VMCPU_FF_VMX_MTF)
713#else
714# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK (VMCPU_FF_TO_R3 | VMCPU_FF_IEM | VMCPU_FF_IOM | VMCPU_FF_DBGF | VMCPU_FF_VMX_MTF)
715#endif
716
717#if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC)
718/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
719 * enabled. */
720# define VMCPU_FF_YIELD_REPSTR_MASK ( VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
721 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
722 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_PDM_CRITSECT \
723 | VMCPU_FF_TIMER | VMCPU_FF_REQUEST \
724 | VMCPU_FF_INTERRUPT_NESTED_GUEST )
725/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
726 * disabled. */
727# define VMCPU_FF_YIELD_REPSTR_NOINT_MASK ( VMCPU_FF_YIELD_REPSTR_MASK \
728 & ~( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
729 | VMCPU_FF_INTERRUPT_NESTED_GUEST) )
730#endif
731
732/** VM Flags that cause the HM loops to go back to ring-3. */
733#define VM_FF_HM_TO_R3_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
734 | VM_FF_PDM_QUEUES | VM_FF_EMT_RENDEZVOUS)
735/** VMCPU Flags that cause the HM loops to go back to ring-3. */
736#define VMCPU_FF_HM_TO_R3_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT \
737 | VMCPU_FF_IEM | VMCPU_FF_IOM)
738
739/** High priority ring-0 VM pre HM-mode execution mask. */
740#define VM_FF_HP_R0_PRE_HM_MASK (VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
741/** High priority ring-0 VMCPU pre HM-mode execution mask. */
742#define VMCPU_FF_HP_R0_PRE_HM_MASK ( VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 \
743 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST \
744 | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER)
745/** High priority ring-0 VM pre HM-mode execution mask, single stepping. */
746#define VM_FF_HP_R0_PRE_HM_STEP_MASK (VM_FF_HP_R0_PRE_HM_MASK & ~( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES \
747 | VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST \
748 | VM_FF_PDM_DMA) )
749/** High priority ring-0 VMCPU pre HM-mode execution mask, single stepping. */
750#define VMCPU_FF_HP_R0_PRE_HM_STEP_MASK (VMCPU_FF_HP_R0_PRE_HM_MASK & ~( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER \
751 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_REQUEST) )
752
753/** All the VMX nested-guest flags. */
754#define VMCPU_FF_VMX_ALL_MASK ( VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \
755 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW )
756
757/** All the forced VM flags. */
758#define VM_FF_ALL_MASK (UINT32_MAX)
759/** All the forced VMCPU flags. */
760#define VMCPU_FF_ALL_MASK ( UINT32_MAX \
761 | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_INT_WINDOW \
762 | VMCPU_FF_VMX_NMI_WINDOW )
763
764/** All the forced VM flags except those related to raw-mode and hardware
765 * assisted execution. */
766#define VM_FF_ALL_REM_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
767/** All the forced VMCPU flags except those related to raw-mode and hardware
768 * assisted execution. */
769#define VMCPU_FF_ALL_REM_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_TLB_FLUSH))
770
771#ifndef VBOX_FOR_DTRACE_LIB
772AssertCompile( ((VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK | VM_FF_YIELD_REPSTR_MASK)
773 & (VM_FF_HIGH_PRIORITY_PRE_RAW_MASK & ~VM_FF_ALL_REM_MASK)) == 0);
774AssertCompile((VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK & (VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK & ~VMCPU_FF_ALL_REM_MASK)) == 0);
775#endif
776
777/** @} */
778
779/** @def VM_FF_SET
780 * Sets a single force action flag.
781 *
782 * @param pVM The cross context VM structure.
783 * @param fFlag The flag to set.
784 */
785#define VM_FF_SET(pVM, fFlag) do { \
786 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
787 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
788 ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
789 } while (0)
790
791/** @def VMCPU_FF_SET
792 * Sets a single force action flag for the given VCPU.
793 *
794 * @param pVCpu The cross context virtual CPU structure.
795 * @param fFlag The flag to set.
796 * @sa VMCPU_FF_SET_MASK
797 */
798#ifdef VMCPU_WITH_64_BIT_FFS
799# define VMCPU_FF_SET(pVCpu, fFlag) do { \
800 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
801 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
802 ASMAtomicBitSet(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
803 } while (0)
804#else
805# define VMCPU_FF_SET(pVCpu, fFlag) do { \
806 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
807 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
808 ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag)); \
809 } while (0)
810#endif
811
812/** @def VMCPU_FF_SET_MASK
813 * Sets a two or more force action flag for the given VCPU.
814 *
815 * @param pVCpu The cross context virtual CPU structure.
816 * @param fFlags The flags to set.
817 * @sa VMCPU_FF_SET
818 */
819#ifdef VMCPU_WITH_64_BIT_FFS
820# if ARCH_BITS > 32
821# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
822 do { ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
823# else
824# define VMCPU_FF_SET_MASK(pVCpu, fFlags) do { \
825 if (!((fFlags) >> 32)) ASMAtomicOrU32((uint32_t volatile *)&pVCpu->fLocalForcedActions, (uint32_t)(fFlags)); \
826 else ASMAtomicOrU64(&pVCpu->fLocalForcedActions, (fFlags)); \
827 } while (0)
828# endif
829#else
830# define VMCPU_FF_SET_MASK(pVCpu, fFlags) \
831 do { ASMAtomicOrU32(&pVCpu->fLocalForcedActions, (fFlags)); } while (0)
832#endif
833
834/** @def VM_FF_CLEAR
835 * Clears a single force action flag.
836 *
837 * @param pVM The cross context VM structure.
838 * @param fFlag The flag to clear.
839 */
840#define VM_FF_CLEAR(pVM, fFlag) do { \
841 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
842 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
843 ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
844 } while (0)
845
846/** @def VMCPU_FF_CLEAR
847 * Clears a single force action flag for the given VCPU.
848 *
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param fFlag The flag to clear.
851 */
852#ifdef VMCPU_WITH_64_BIT_FFS
853# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
854 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
855 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
856 ASMAtomicBitClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT); \
857 } while (0)
858#else
859# define VMCPU_FF_CLEAR(pVCpu, fFlag) do { \
860 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
861 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
862 ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag)); \
863 } while (0)
864#endif
865
866/** @def VMCPU_FF_CLEAR_MASK
867 * Clears two or more force action flags for the given VCPU.
868 *
869 * @param pVCpu The cross context virtual CPU structure.
870 * @param fFlags The flags to clear.
871 */
872#ifdef VMCPU_WITH_64_BIT_FFS
873# if ARCH_BITS > 32
874# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
875 do { ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
876# else
877# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) do { \
878 if (!((fFlags) >> 32)) ASMAtomicAndU32((uint32_t volatile *)&(pVCpu)->fLocalForcedActions, ~(uint32_t)(fFlags)); \
879 else ASMAtomicAndU64(&(pVCpu)->fLocalForcedActions, ~(fFlags)); \
880 } while (0)
881# endif
882#else
883# define VMCPU_FF_CLEAR_MASK(pVCpu, fFlags) \
884 do { ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlags)); } while (0)
885#endif
886
887/** @def VM_FF_IS_SET
888 * Checks if single a force action flag is set.
889 *
890 * @param pVM The cross context VM structure.
891 * @param fFlag The flag to check.
892 * @sa VM_FF_IS_ANY_SET
893 */
894#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
895# define VM_FF_IS_SET(pVM, fFlag) RT_BOOL((pVM)->fGlobalForcedActions & (fFlag))
896#else
897# define VM_FF_IS_SET(pVM, fFlag) \
898 ([](PVM a_pVM) -> bool \
899 { \
900 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
901 AssertCompile((fFlag) == RT_BIT_32(fFlag##_BIT)); \
902 return RT_BOOL(a_pVM->fGlobalForcedActions & (fFlag)); \
903 }(pVM))
904#endif
905
906/** @def VMCPU_FF_IS_SET
907 * Checks if a single force action flag is set for the given VCPU.
908 *
909 * @param pVCpu The cross context virtual CPU structure.
910 * @param fFlag The flag to check.
911 * @sa VMCPU_FF_IS_ANY_SET
912 */
913#if !defined(VBOX_STRICT) || !defined(RT_COMPILER_SUPPORTS_LAMBDA)
914# define VMCPU_FF_IS_SET(pVCpu, fFlag) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlag))
915#else
916# define VMCPU_FF_IS_SET(pVCpu, fFlag) \
917 ([](PCVMCPU a_pVCpu) -> bool \
918 { \
919 AssertCompile(RT_IS_POWER_OF_TWO(fFlag)); \
920 AssertCompile((fFlag) == RT_BIT_64(fFlag##_BIT)); \
921 return RT_BOOL(a_pVCpu->fLocalForcedActions & (fFlag)); \
922 }(pVCpu))
923#endif
924
925/** @def VM_FF_IS_ANY_SET
926 * Checks if one or more force action in the specified set is pending.
927 *
928 * @param pVM The cross context VM structure.
929 * @param fFlags The flags to check for.
930 * @sa VM_FF_IS_SET
931 */
932#define VM_FF_IS_ANY_SET(pVM, fFlags) RT_BOOL((pVM)->fGlobalForcedActions & (fFlags))
933
934/** @def VMCPU_FF_IS_ANY_SET
935 * Checks if two or more force action flags in the specified set is set for the given VCPU.
936 *
937 * @param pVCpu The cross context virtual CPU structure.
938 * @param fFlags The flags to check for.
939 * @sa VMCPU_FF_IS_SET
940 */
941#define VMCPU_FF_IS_ANY_SET(pVCpu, fFlags) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlags))
942
943/** @def VM_FF_TEST_AND_CLEAR
944 * Checks if one (!) force action in the specified set is pending and clears it atomically
945 *
946 * @returns true if the bit was set.
947 * @returns false if the bit was clear.
948 * @param pVM The cross context VM structure.
949 * @param fFlag Flag constant to check and clear (_BIT is appended).
950 */
951#define VM_FF_TEST_AND_CLEAR(pVM, fFlag) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, fFlag##_BIT))
952
953/** @def VMCPU_FF_TEST_AND_CLEAR
954 * Checks if one (!) force action in the specified set is pending and clears it atomically
955 *
956 * @returns true if the bit was set.
957 * @returns false if the bit was clear.
958 * @param pVCpu The cross context virtual CPU structure.
959 * @param fFlag Flag constant to check and clear (_BIT is appended).
960 */
961#define VMCPU_FF_TEST_AND_CLEAR(pVCpu, fFlag) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, fFlag##_BIT))
962
963/** @def VM_FF_IS_PENDING_EXCEPT
964 * Checks if one or more force action in the specified set is pending while one
965 * or more other ones are not.
966 *
967 * @param pVM The cross context VM structure.
968 * @param fFlags The flags to check for.
969 * @param fExcpt The flags that should not be set.
970 */
971#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) \
972 ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
973
974/** @def VM_IS_EMT
975 * Checks if the current thread is the emulation thread (EMT).
976 *
977 * @remark The ring-0 variation will need attention if we expand the ring-0
978 * code to let threads other than EMT mess around with the VM.
979 */
980#ifdef IN_RC
981# define VM_IS_EMT(pVM) true
982#else
983# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
984#endif
985
986/** @def VMCPU_IS_EMT
987 * Checks if the current thread is the emulation thread (EMT) for the specified
988 * virtual CPU.
989 */
990#ifdef IN_RC
991# define VMCPU_IS_EMT(pVCpu) true
992#else
993# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
994#endif
995
996/** @def VM_ASSERT_EMT
997 * Asserts that the current thread IS the emulation thread (EMT).
998 */
999#ifdef IN_RC
1000# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
1001#elif defined(IN_RING0)
1002# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
1003#else
1004# define VM_ASSERT_EMT(pVM) \
1005 AssertMsg(VM_IS_EMT(pVM), \
1006 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
1007#endif
1008
1009/** @def VMCPU_ASSERT_EMT
1010 * Asserts that the current thread IS the emulation thread (EMT) of the
1011 * specified virtual CPU.
1012 */
1013#ifdef IN_RC
1014# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
1015#elif defined(IN_RING0)
1016# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
1017 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%u\n", \
1018 RTThreadNativeSelf(), (pVCpu) ? (pVCpu)->hNativeThreadR0 : 0, \
1019 (pVCpu) ? (pVCpu)->idCpu : 0))
1020#else
1021# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
1022 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1023 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
1024#endif
1025
1026/** @def VM_ASSERT_EMT_RETURN
1027 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
1028 */
1029#ifdef IN_RC
1030# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
1031#elif defined(IN_RING0)
1032# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
1033#else
1034# define VM_ASSERT_EMT_RETURN(pVM, rc) \
1035 AssertMsgReturn(VM_IS_EMT(pVM), \
1036 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
1037 (rc))
1038#endif
1039
1040/** @def VMCPU_ASSERT_EMT_RETURN
1041 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
1042 */
1043#ifdef IN_RC
1044# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
1045#elif defined(IN_RING0)
1046# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
1047#else
1048# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
1049 AssertMsgReturn(VMCPU_IS_EMT(pVCpu), \
1050 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1051 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
1052 (rc))
1053#endif
1054
1055/** @def VMCPU_ASSERT_EMT_OR_GURU
1056 * Asserts that the current thread IS the emulation thread (EMT) of the
1057 * specified virtual CPU.
1058 */
1059#if defined(IN_RC) || defined(IN_RING0)
1060# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) Assert( VMCPU_IS_EMT(pVCpu) \
1061 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
1062 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS )
1063#else
1064# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) \
1065 AssertMsg( VMCPU_IS_EMT(pVCpu) \
1066 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
1067 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS, \
1068 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1069 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
1070#endif
1071
1072/** @def VMCPU_ASSERT_EMT_OR_NOT_RUNNING
1073 * Asserts that the current thread IS the emulation thread (EMT) of the
1074 * specified virtual CPU or the VM is not running.
1075 */
1076#if defined(IN_RC) || defined(IN_RING0)
1077# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
1078 Assert( VMCPU_IS_EMT(pVCpu) \
1079 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)) )
1080#else
1081# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
1082 AssertMsg( VMCPU_IS_EMT(pVCpu) \
1083 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)), \
1084 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
1085 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
1086#endif
1087
1088/** @def VMSTATE_IS_RUNNING
1089 * Checks if the given state indicates a running VM.
1090 */
1091#define VMSTATE_IS_RUNNING(a_enmVMState) \
1092 ( (a_enmVMState) == VMSTATE_RUNNING \
1093 || (a_enmVMState) == VMSTATE_RUNNING_LS )
1094
1095/** @def VM_IS_RUNNING_FOR_ASSERTIONS_ONLY
1096 * Checks if the VM is running.
1097 * @note This is only for pure debug assertions. No AssertReturn or similar!
1098 * @sa VMSTATE_IS_RUNNING
1099 */
1100#define VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM) \
1101 ( (pVM)->enmVMState == VMSTATE_RUNNING \
1102 || (pVM)->enmVMState == VMSTATE_RUNNING_LS )
1103
1104
1105/** @def VMSTATE_IS_POWERED_ON
1106 * Checks if the given state indicates the VM is powered on.
1107 *
1108 * @note Excludes all error states, so a powered on VM that hit a fatal error,
1109 * guru meditation, state load failure or similar will not be considered
1110 * powered on by this test.
1111 */
1112#define VMSTATE_IS_POWERED_ON(a_enmVMState) \
1113 ( (a_enmVMState) >= VMSTATE_RESUMING && (a_enmVMState) < VMSTATE_POWERING_OFF )
1114
1115/** @def VM_ASSERT_IS_NOT_RUNNING
1116 * Asserts that the VM is not running.
1117 */
1118#if defined(IN_RC) || defined(IN_RING0)
1119#define VM_ASSERT_IS_NOT_RUNNING(pVM) Assert(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM))
1120#else
1121#define VM_ASSERT_IS_NOT_RUNNING(pVM) AssertMsg(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM), \
1122 ("VM is running. enmVMState=%d\n", (pVM)->enmVMState))
1123#endif
1124
1125/** @def VM_ASSERT_EMT0
1126 * Asserts that the current thread IS emulation thread \#0 (EMT0).
1127 */
1128#ifdef IN_RING3
1129# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT((a_pVM)->apCpusR3[0])
1130#else
1131# define VM_ASSERT_EMT0(a_pVM) VMCPU_ASSERT_EMT(&(a_pVM)->aCpus[0])
1132#endif
1133
1134/** @def VM_ASSERT_EMT0_RETURN
1135 * Asserts that the current thread IS emulation thread \#0 (EMT0) and returns if
1136 * it isn't.
1137 */
1138#ifdef IN_RING3
1139# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN((pVM)->apCpusR3[0], (rc))
1140#else
1141# define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN(&(pVM)->aCpus[0], (rc))
1142#endif
1143
1144
1145/**
1146 * Asserts that the current thread is NOT the emulation thread.
1147 */
1148#define VM_ASSERT_OTHER_THREAD(pVM) \
1149 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
1150
1151
1152/** @def VM_ASSERT_STATE
1153 * Asserts a certain VM state.
1154 */
1155#define VM_ASSERT_STATE(pVM, _enmState) \
1156 AssertMsg((pVM)->enmVMState == (_enmState), \
1157 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
1158
1159/** @def VM_ASSERT_STATE_RETURN
1160 * Asserts a certain VM state and returns if it doesn't match.
1161 */
1162#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
1163 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
1164 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
1165 (rc))
1166
1167/** @def VM_IS_VALID_EXT
1168 * Check that a VM handle is valid for external access, i.e. not being destroy
1169 * or terminated and matching the target platform architecture (ring-3). */
1170#ifdef VMTARGET_DEFAULT
1171# define VM_IS_VALID_EXT(pVM) \
1172 ( RT_VALID_ALIGNED_PTR(pVM, HOST_PAGE_SIZE_DYNAMIC) \
1173 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
1174 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
1175 && VM_IS_EMT(pVM))) \
1176 && (pVM)->enmTarget == VMTARGET_DEFAULT)
1177#else
1178# define VM_IS_VALID_EXT(pVM) \
1179 ( RT_VALID_ALIGNED_PTR(pVM, HOST_PAGE_SIZE_DYNAMIC) \
1180 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
1181 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
1182 && VM_IS_EMT(pVM))) )
1183#endif
1184
1185/** @def VM_ASSERT_VALID_EXT_RETURN
1186 * Asserts that a VM handle is valid for external access, i.e. not being destroy
1187 * or terminated.
1188 */
1189#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
1190 AssertMsgReturn(VM_IS_VALID_EXT(pVM), \
1191 ("pVM=%p state %s enmTarget=%#x\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, HOST_PAGE_SIZE_DYNAMIC) \
1192 ? VMGetStateName(pVM->enmVMState) : "", (pVM)->enmTarget), \
1193 (rc))
1194
1195/** @def VMCPU_IS_VALID_EXT
1196 * Checks that a VMCPU handle is valid for external access, i.e. not being
1197 * destroy or terminated and matching the target platform architecture (r3). */
1198#ifdef VMTARGET_DEFAULT
1199# define VMCPU_IS_VALID_EXT(a_pVCpu) \
1200 ( RT_VALID_ALIGNED_PTR(a_pVCpu, 64) \
1201 && RT_VALID_ALIGNED_PTR((a_pVCpu)->CTX_SUFF(pVM), HOST_PAGE_SIZE_DYNAMIC) \
1202 && (unsigned)(a_pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
1203 && (pVM)->enmTarget == VMTARGET_DEFAULT)
1204#else
1205# define VMCPU_IS_VALID_EXT(a_pVCpu) \
1206 ( RT_VALID_ALIGNED_PTR(a_pVCpu, 64) \
1207 && RT_VALID_ALIGNED_PTR((a_pVCpu)->CTX_SUFF(pVM), HOST_PAGE_SIZE_DYNAMIC) \
1208 && (unsigned)(a_pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING)
1209#endif
1210
1211/** @def VMCPU_ASSERT_VALID_EXT_RETURN
1212 * Asserts that a VMCPU handle is valid for external access, i.e. not being
1213 * destroy or terminated and matching the target platform architecutre (r3).
1214 */
1215#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
1216 AssertMsgReturn(VMCPU_IS_VALID_EXT(pVCpu), \
1217 ("pVCpu=%p pVM=%p state %s enmTarget=%#x\n", (pVCpu), \
1218 RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
1219 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), HOST_PAGE_SIZE_DYNAMIC) \
1220 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : "", (pVCpu)->enmTarget), \
1221 (rc))
1222
1223#if defined(USING_VMM_COMMON_DEFS) || defined(DOXYGEN_RUNNING)
1224/* Some VMM_COMMON_DEFS defines that actively changes the VM/VMCPU structures
1225 that we bake into the VM_STRUCT_VERSION value. */
1226# ifdef VBOX_WITH_MINIMAL_R0
1227# define VM_STRUCT_VERSION_F_31 RT_BIT_32(31)
1228# else
1229# define VM_STRUCT_VERSION_F_31 UINT32_C(0)
1230# endif
1231# ifdef VBOX_WITH_ONLY_PGM_NEM_MODE
1232# define VM_STRUCT_VERSION_F_30 RT_BIT_32(30)
1233# else
1234# define VM_STRUCT_VERSION_F_30 UINT32_C(0)
1235# endif
1236# ifdef VBOX_WITH_PGM_NEM_MODE
1237# define VM_STRUCT_VERSION_F_29 RT_BIT_32(29)
1238# else
1239# define VM_STRUCT_VERSION_F_29 UINT32_C(0)
1240# endif
1241# ifdef VBOX_WITH_HWVIRT
1242# define VM_STRUCT_VERSION_F_28 RT_BIT_32(28)
1243# else
1244# define VM_STRUCT_VERSION_F_28 UINT32_C(0)
1245# endif
1246
1247/** @def VM_STRUCT_VERSION
1248 * The current VM structure version number. */
1249# define VM_STRUCT_VERSION ( UINT32_C(2) \
1250 | VM_STRUCT_VERSION_F_31 \
1251 | VM_STRUCT_VERSION_F_30 \
1252 | VM_STRUCT_VERSION_F_29 \
1253 | VM_STRUCT_VERSION_F_28 )
1254
1255# if (defined(RT_ARCH_AMD64) && defined(VBOX_WITH_VIRT_ARMV8) && defined(IN_RING0)) || defined(DOXYGEN_RUNNING)
1256/** @def VM_STRUCT_VERSION_NON_NATIVE_TARGETS
1257 * The current VM structure version for the other architecture (hack).
1258 *
1259 * Currently the VBoxVMMArm.dll/so/dylib on x86 differs from VM_STRUCT_VERSION
1260 * in that it will have VBOX_WITH_ONLY_PGM_NEM_MODE & VBOX_WITH_MINIMAL_R0
1261 * defined but not VBOX_WITH_HWVIRT. This is to get the stuff off the ground
1262 * quickly by emulating how it's built on win.arm64 hosts. */
1263# define VM_STRUCT_VERSION_NON_NATIVE_TARGETS \
1264 ( ( VM_STRUCT_VERSION \
1265 | RT_BIT_32(31) /*VBOX_WITH_MINIMAL_R0*/ \
1266 | RT_BIT_32(30) /*VBOX_WITH_ONLY_PGM_NEM_MODE*/ ) \
1267 & ~RT_BIT_32(28) /*VBOX_WITH_HWVIRT*/ )
1268# endif
1269
1270/** @def VM_IS_NON_NATIVE_WITH_LIMITED_R0
1271 * Whether the is a non-default targeted VM and should have the limited ring-0
1272 * presence hack applied.
1273 *
1274 * This is typically used in ring-0 code to skip VM init and termination code.
1275 *
1276 * @param g_GVM The ring-0 VM structure. */
1277# ifdef VM_STRUCT_VERSION_NON_NATIVE_TARGETS
1278# define VM_IS_NON_NATIVE_WITH_LIMITED_R0(g_GVM) (pGVM->enmTarget != VMTARGET_NATIVE)
1279# else
1280# define VM_IS_NON_NATIVE_WITH_LIMITED_R0(g_GVM) (false)
1281# endif
1282
1283#endif
1284#endif /* !VBOX_FOR_DTRACE_LIB */
1285
1286
1287/**
1288 * Helper that HM and NEM uses for safely modifying VM::bMainExecutionEngine.
1289 *
1290 * ONLY HM and NEM MAY USE THIS!
1291 *
1292 * @param a_pVM The cross context VM structure.
1293 * @param a_bValue The new value.
1294 * @internal
1295 */
1296#define VM_SET_MAIN_EXECUTION_ENGINE(a_pVM, a_bValue) \
1297 do { \
1298 *const_cast<uint8_t *>(&(a_pVM)->bMainExecutionEngine) = (a_bValue); \
1299 ASMCompilerBarrier(); /* just to be on the safe side */ \
1300 } while (0)
1301
1302/**
1303 * Checks whether iem-executes-all-mode is used.
1304 *
1305 * @retval true if IEM is used.
1306 * @retval false if not.
1307 *
1308 * @param a_pVM The cross context VM structure.
1309 * @sa VM_IS_HM_OR_NEM_ENABLED, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1310 * @internal
1311 */
1312#define VM_IS_EXEC_ENGINE_IEM(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_IEM)
1313
1314/**
1315 * Checks whether HM (VT-x/AMD-V) or NEM is being used by this VM.
1316 *
1317 * @retval true if either is used.
1318 * @retval false if software virtualization (raw-mode) is used.
1319 *
1320 * @param a_pVM The cross context VM structure.
1321 * @sa VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_ENABLED, VM_IS_NEM_ENABLED.
1322 * @internal
1323 */
1324#define VM_IS_HM_OR_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine != VM_EXEC_ENGINE_IEM)
1325
1326/**
1327 * Checks whether HM is being used by this VM.
1328 *
1329 * @retval true if HM (VT-x/AMD-v) is used.
1330 * @retval false if not.
1331 *
1332 * @param a_pVM The cross context VM structure.
1333 * @sa VM_IS_NEM_ENABLED, VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_OR_NEM_ENABLED.
1334 * @internal
1335 */
1336#define VM_IS_HM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT)
1337
1338/**
1339 * Checks whether NEM is being used by this VM.
1340 *
1341 * @retval true if a native hypervisor API is used.
1342 * @retval false if not.
1343 *
1344 * @param a_pVM The cross context VM structure.
1345 * @sa VM_IS_HM_ENABLED, VM_IS_EXEC_ENGINE_IEM, VM_IS_HM_OR_NEM_ENABLED.
1346 * @internal
1347 */
1348#define VM_IS_NEM_ENABLED(a_pVM) ((a_pVM)->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
1349
1350
1351/**
1352 * The cross context VM structure.
1353 *
1354 * It contains all the VM data which have to be available in all contexts.
1355 * Even if it contains all the data the idea is to use APIs not to modify all
1356 * the members all around the place. Therefore we make use of unions to hide
1357 * everything which isn't local to the current source module. This means we'll
1358 * have to pay a little bit of attention when adding new members to structures
1359 * in the unions and make sure to keep the padding sizes up to date.
1360 *
1361 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
1362 */
1363typedef struct VM
1364{
1365 /** The state of the VM.
1366 * This field is read only to everyone except the VM and EM. */
1367 VMSTATE volatile enmVMState;
1368 /** Forced action flags.
1369 * See the VM_FF_* \#defines. Updated atomically.
1370 */
1371 volatile uint32_t fGlobalForcedActions;
1372 /** Pointer to the array of page descriptors for the VM structure allocation. */
1373 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
1374 /** Session handle. For use when calling SUPR0 APIs. */
1375#ifdef IN_RING0
1376 PSUPDRVSESSION pSessionUnsafe;
1377#else
1378 PSUPDRVSESSION pSession;
1379#endif
1380 /** Pointer to the ring-3 VM structure. */
1381 PUVM pUVM;
1382 /** Ring-3 Host Context VM Pointer. */
1383#ifdef IN_RING0
1384 R3PTRTYPE(struct VM *) pVMR3Unsafe;
1385#else
1386 R3PTRTYPE(struct VM *) pVMR3;
1387#endif
1388 /** Ring-0 Host Context VM pointer for making ring-0 calls. */
1389 R0PTRTYPE(struct VM *) pVMR0ForCall;
1390 /** Raw-mode Context VM Pointer. */
1391 uint32_t pVMRC;
1392 /** Padding for new raw-mode (long mode). */
1393 uint32_t pVMRCPadding;
1394
1395 /** The GVM VM handle. Only the GVM should modify this field. */
1396#ifdef IN_RING0
1397 uint32_t hSelfUnsafe;
1398#else
1399 uint32_t hSelf;
1400#endif
1401 /** Number of virtual CPUs. */
1402#ifdef IN_RING0
1403 uint32_t cCpusUnsafe;
1404#else
1405 uint32_t cCpus;
1406#endif
1407 /** The VM target platform architecture. */
1408#ifdef IN_RING0
1409 VMTARGET enmTargetUnsafe;
1410#else
1411 VMTARGET enmTarget;
1412#endif
1413 /** CPU excution cap (1-100) */
1414 uint32_t uCpuExecutionCap;
1415
1416 /** Size of the VM structure. */
1417 uint32_t cbSelf;
1418 /** Size of the VMCPU structure. */
1419 uint32_t cbVCpu;
1420 /** Structure version number (VM_STRUCT_VERSION). */
1421 uint32_t uStructVersion;
1422
1423 /** @name Various items that are frequently accessed.
1424 * @{ */
1425 /** The main execution engine, VM_EXEC_ENGINE_XXX.
1426 * This is set early during vmR3InitRing3 by HM or NEM. */
1427 uint8_t const bMainExecutionEngine;
1428
1429 /** Hardware VM support is available and enabled.
1430 * Determined very early during init.
1431 * This is placed here for performance reasons.
1432 * @todo obsoleted by bMainExecutionEngine, eliminate. */
1433 bool fHMEnabled;
1434 /** @} */
1435
1436 /** Alignment padding. */
1437 uint8_t uPadding1[2];
1438
1439 /** @name Debugging
1440 * @{ */
1441 /** Ring-3 Host Context VM Pointer. */
1442 R3PTRTYPE(RTTRACEBUF) hTraceBufR3;
1443 /** Ring-0 Host Context VM Pointer. */
1444 R0PTRTYPE(RTTRACEBUF) hTraceBufR0;
1445 /** @} */
1446
1447 /** Max EMT hash lookup collisions (in GVMM). */
1448 uint8_t cMaxEmtHashCollisions;
1449
1450 /** Padding - the unions must be aligned on a 64 bytes boundary. */
1451 uint8_t abAlignment3[HC_ARCH_BITS == 64 ? 23 : 51];
1452
1453 /** CPUM part. */
1454 union
1455 {
1456#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
1457 struct CPUM s;
1458#endif
1459#ifdef VBOX_INCLUDED_vmm_cpum_h
1460 /** Read only info exposed about the host and guest CPUs. */
1461 struct
1462 {
1463 /** Guest CPU feature information. */
1464 CPUMFEATURES GuestFeatures;
1465 } const ro;
1466#endif
1467 /** @todo this is rather bloated because of static MSR range allocation.
1468 * Probably a good idea to move it to a separate R0 allocation... */
1469 uint8_t padding[8832 + 128*8192 + 0x1d00]; /* multiple of 64 */
1470 } cpum;
1471
1472 /** PGM part.
1473 * @note Aligned on 16384 boundrary for zero and mmio page storage. */
1474 union
1475 {
1476#ifdef VMM_INCLUDED_SRC_include_PGMInternal_h
1477 struct PGM s;
1478#endif
1479 uint8_t padding[129728]; /* multiple of 64 */
1480 } pgm;
1481
1482 /** VMM part. */
1483 union
1484 {
1485#ifdef VMM_INCLUDED_SRC_include_VMMInternal_h
1486 struct VMM s;
1487#endif
1488 uint8_t padding[1600]; /* multiple of 64 */
1489 } vmm;
1490
1491 /** HM part. */
1492 union
1493 {
1494#ifdef VMM_INCLUDED_SRC_include_HMInternal_h
1495 struct HM s;
1496#endif
1497 uint8_t padding[5504]; /* multiple of 64 */
1498 } hm;
1499
1500 /** TRPM part. */
1501 union
1502 {
1503#ifdef VMM_INCLUDED_SRC_include_TRPMInternal_h
1504 struct TRPM s;
1505#endif
1506 uint8_t padding[2048]; /* multiple of 64 */
1507 } trpm;
1508
1509 /** SELM part. */
1510 union
1511 {
1512#ifdef VMM_INCLUDED_SRC_include_SELMInternal_h
1513 struct SELM s;
1514#endif
1515 uint8_t padding[768]; /* multiple of 64 */
1516 } selm;
1517
1518 /** MM part. */
1519 union
1520 {
1521#ifdef VMM_INCLUDED_SRC_include_MMInternal_h
1522 struct MM s;
1523#endif
1524 uint8_t padding[192]; /* multiple of 64 */
1525 } mm;
1526
1527 /** PDM part. */
1528 union
1529 {
1530#ifdef VMM_INCLUDED_SRC_include_PDMInternal_h
1531 struct PDM s;
1532#endif
1533 uint8_t padding[22784]; /* multiple of 64 */
1534 } pdm;
1535
1536 /** IOM part. */
1537 union
1538 {
1539#ifdef VMM_INCLUDED_SRC_include_IOMInternal_h
1540 struct IOM s;
1541#endif
1542 uint8_t padding[1152]; /* multiple of 64 */
1543 } iom;
1544
1545 /** EM part. */
1546 union
1547 {
1548#ifdef VMM_INCLUDED_SRC_include_EMInternal_h
1549 struct EM s;
1550#endif
1551 uint8_t padding[256]; /* multiple of 64 */
1552 } em;
1553
1554 /** NEM part. */
1555 union
1556 {
1557#ifdef VMM_INCLUDED_SRC_include_NEMInternal_h
1558 struct NEM s;
1559#endif
1560 uint8_t padding[4608]; /* multiple of 64 */
1561 } nem;
1562
1563 /** TM part. */
1564 union
1565 {
1566#ifdef VMM_INCLUDED_SRC_include_TMInternal_h
1567 struct TM s;
1568#endif
1569 uint8_t padding[10112]; /* multiple of 64 */
1570 } tm;
1571
1572 /** DBGF part. */
1573 union
1574 {
1575#ifdef VMM_INCLUDED_SRC_include_DBGFInternal_h
1576 struct DBGF s;
1577#endif
1578#ifdef VBOX_INCLUDED_vmm_dbgf_h
1579 /** Read only info exposed about interrupt breakpoints and selected events. */
1580 struct
1581 {
1582 /** Bitmap of enabled hardware interrupt breakpoints. */
1583 uint32_t bmHardIntBreakpoints[256 / 32];
1584 /** Bitmap of enabled software interrupt breakpoints. */
1585 uint32_t bmSoftIntBreakpoints[256 / 32];
1586 /** Bitmap of selected events.
1587 * This includes non-selectable events too for simplicity, we maintain the
1588 * state for some of these, as it may come in handy. */
1589 uint64_t bmSelectedEvents[(DBGFEVENT_END + 63) / 64];
1590 /** Enabled hardware interrupt breakpoints. */
1591 uint32_t cHardIntBreakpoints;
1592 /** Enabled software interrupt breakpoints. */
1593 uint32_t cSoftIntBreakpoints;
1594 /** The number of selected events. */
1595 uint32_t cSelectedEvents;
1596 /** The number of enabled hardware breakpoints. */
1597 uint8_t cEnabledHwBreakpoints;
1598 /** The number of enabled hardware I/O breakpoints. */
1599 uint8_t cEnabledHwIoBreakpoints;
1600 uint8_t au8Alignment1[2]; /**< Alignment padding. */
1601 /** The number of enabled software breakpoints. */
1602 uint32_t volatile cEnabledSwBreakpoints;
1603 } const ro;
1604#endif
1605 uint8_t padding[2432]; /* multiple of 64 */
1606 } dbgf;
1607
1608 /** SSM part. */
1609 union
1610 {
1611#ifdef VMM_INCLUDED_SRC_include_SSMInternal_h
1612 struct SSM s;
1613#endif
1614 uint8_t padding[128]; /* multiple of 64 */
1615 } ssm;
1616
1617 union
1618 {
1619#ifdef VMM_INCLUDED_SRC_include_GIMInternal_h
1620 struct GIM s;
1621#endif
1622 uint8_t padding[448]; /* multiple of 64 */
1623 } gim;
1624
1625 /** Interrupt controller, target specific. */
1626 RT_GCC_EXTENSION
1627 union
1628 {
1629#if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC)
1630 union
1631 {
1632# ifdef VMM_INCLUDED_SRC_include_GICInternal_h
1633 struct GIC s;
1634# endif
1635 uint8_t padding[128]; /* multiple of 8 */
1636 } gic;
1637#endif
1638#if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC)
1639 union
1640 {
1641# ifdef VMM_INCLUDED_SRC_include_APICInternal_h
1642 struct APIC s;
1643# endif
1644 uint8_t padding[128]; /* multiple of 8 */
1645 } apic;
1646#endif
1647 };
1648
1649 /* ---- begin small stuff ---- */
1650
1651 /** VM part. */
1652 union
1653 {
1654#ifdef VMM_INCLUDED_SRC_include_VMInternal_h
1655 struct VMINT s;
1656#endif
1657 uint8_t padding[32]; /* multiple of 8 */
1658 } vm;
1659
1660 /** CFGM part. */
1661 union
1662 {
1663#ifdef VMM_INCLUDED_SRC_include_CFGMInternal_h
1664 struct CFGM s;
1665#endif
1666 uint8_t padding[8]; /* multiple of 8 */
1667 } cfgm;
1668
1669 /** IEM part. */
1670 union
1671 {
1672#ifdef VMM_INCLUDED_SRC_include_IEMInternal_h
1673 struct IEM s;
1674#endif
1675 uint8_t padding[16]; /* multiple of 8 */
1676 } iem;
1677
1678 /** Statistics for ring-0 only components. */
1679 struct
1680 {
1681 /** GMMR0 stats. */
1682 struct
1683 {
1684 /** Chunk TLB hits. */
1685 uint64_t cChunkTlbHits;
1686 /** Chunk TLB misses. */
1687 uint64_t cChunkTlbMisses;
1688 } gmm;
1689 uint64_t au64Padding[6]; /* probably more comming here... */
1690 } R0Stats;
1691
1692 union
1693 {
1694#ifdef VMM_INCLUDED_SRC_include_GCMInternal_h
1695 struct GCM s;
1696#endif
1697 uint8_t padding[8]; /* multiple of 8 */
1698 } gcm;
1699
1700 /** Padding for aligning the structure size on a page boundrary. */
1701 uint8_t abAlignment2[0x3900 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];
1702
1703 /* ---- end small stuff ---- */
1704
1705 /** Array of VMCPU ring-3 pointers. */
1706 PVMCPUR3 apCpusR3[VMM_MAX_CPU_COUNT];
1707
1708 /* This point is aligned on a 16384 boundrary (for arm64 purposes). */
1709} VM;
1710#ifndef VBOX_FOR_DTRACE_LIB
1711//AssertCompileSizeAlignment(VM, 16384);
1712#endif
1713
1714
1715#ifdef IN_RC
1716RT_C_DECLS_BEGIN
1717
1718/** The VM structure.
1719 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1720 * globals which we should avoid using.
1721 */
1722extern DECLIMPORT(VM) g_VM;
1723
1724/** The VMCPU structure for virtual CPU \#0.
1725 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1726 * globals which we should avoid using.
1727 */
1728extern DECLIMPORT(VMCPU) g_VCpu0;
1729
1730RT_C_DECLS_END
1731#endif
1732
1733/** @} */
1734
1735#endif /* !VBOX_INCLUDED_vmm_vm_h */
1736
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette