VirtualBox

source: vbox/trunk/include/VBox/vmm/vm.h@ 68851

Last change on this file since 68851 was 68851, checked in by vboxsync, 7 years ago

vm.h: Renamed the VM_IS_RUNNING macro to something indicating that it obviously only suitable for assertions. (variable is volatile, we don't read stuff three times in release builds since we may get three different values.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 52.2 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data.
3 */
4
5/*
6 * Copyright (C) 2006-2016 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_vm_h
27#define ___VBox_vmm_vm_h
28
29#ifndef VBOX_FOR_DTRACE_LIB
30# include <iprt/param.h>
31# include <VBox/types.h>
32# include <VBox/vmm/cpum.h>
33# include <VBox/vmm/stam.h>
34# include <VBox/vmm/vmapi.h>
35# include <VBox/vmm/vmm.h>
36# include <VBox/sup.h>
37#else
38# pragma D depends_on library vbox-types.d
39# pragma D depends_on library CPUMInternal.d
40# define ___CPUMInternal_h
41#endif
42
43
44
45/** @defgroup grp_vm The Virtual Machine
46 * @ingroup grp_vmm
47 * @{
48 */
49
50/**
51 * The state of a Virtual CPU.
52 *
53 * The basic state indicated here is whether the CPU has been started or not. In
54 * addition, there are sub-states when started for assisting scheduling (GVMM
55 * mostly).
56 *
57 * The transition out of the STOPPED state is done by a vmR3PowerOn.
58 * The transition back to the STOPPED state is done by vmR3PowerOff.
59 *
60 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
61 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
62 */
63typedef enum VMCPUSTATE
64{
65 /** The customary invalid zero. */
66 VMCPUSTATE_INVALID = 0,
67
68 /** Virtual CPU has not yet been started. */
69 VMCPUSTATE_STOPPED,
70
71 /** CPU started. */
72 VMCPUSTATE_STARTED,
73 /** CPU started in HM context. */
74 VMCPUSTATE_STARTED_HM,
75 /** Executing guest code and can be poked (RC or STI bits of HM). */
76 VMCPUSTATE_STARTED_EXEC,
77 /** Executing guest code in the recompiler. */
78 VMCPUSTATE_STARTED_EXEC_REM,
79 /** Halted. */
80 VMCPUSTATE_STARTED_HALTED,
81
82 /** The end of valid virtual CPU states. */
83 VMCPUSTATE_END,
84
85 /** Ensure 32-bit type. */
86 VMCPUSTATE_32BIT_HACK = 0x7fffffff
87} VMCPUSTATE;
88
89
90/**
91 * The cross context virtual CPU structure.
92 *
93 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
94 */
95typedef struct VMCPU
96{
97 /** Per CPU forced action.
98 * See the VMCPU_FF_* \#defines. Updated atomically. */
99 uint32_t volatile fLocalForcedActions; /* 0 */
100 /** The CPU state. */
101 VMCPUSTATE volatile enmState; /* 4 */
102
103 /** Pointer to the ring-3 UVMCPU structure. */
104 PUVMCPU pUVCpu; /* 8 */
105 /** Ring-3 Host Context VM Pointer. */
106 PVMR3 pVMR3; /* 16 / 12 */
107 /** Ring-0 Host Context VM Pointer. */
108 PVMR0 pVMR0; /* 24 / 16 */
109 /** Raw-mode Context VM Pointer. */
110 PVMRC pVMRC; /* 32 / 20 */
111 /** The CPU ID.
112 * This is the index into the VM::aCpu array. */
113 VMCPUID idCpu; /* 36 / 24 */
114 /** The native thread handle. */
115 RTNATIVETHREAD hNativeThread; /* 40 / 28 */
116 /** The native R0 thread handle. (different from the R3 handle!) */
117 RTNATIVETHREAD hNativeThreadR0; /* 48 / 32 */
118 /** Which host CPU ID is this EMT running on.
119 * Only valid when in RC or HMR0 with scheduling disabled. */
120 RTCPUID volatile idHostCpu; /* 56 / 36 */
121 /** The CPU set index corresponding to idHostCpu, UINT32_MAX if not valid.
122 * @remarks Best to make sure iHostCpuSet shares cache line with idHostCpu! */
123 uint32_t volatile iHostCpuSet; /* 60 / 40 */
124
125#if HC_ARCH_BITS == 32
126 /** Align the structures below bit on a 64-byte boundary and make sure it starts
127 * at the same offset in both 64-bit and 32-bit builds.
128 *
129 * @remarks The alignments of the members that are larger than 48 bytes should be
130 * 64-byte for cache line reasons. structs containing small amounts of
131 * data could be lumped together at the end with a < 64 byte padding
132 * following it (to grow into and align the struct size).
133 */
134 uint8_t abAlignment1[HC_ARCH_BITS == 64 ? 0 : 20];
135#endif
136
137 /** IEM part.
138 * @remarks This comes first as it allows the use of 8-bit immediates for the
139 * first 64 bytes of the structure, reducing code size a wee bit. */
140#ifdef ___IEMInternal_h /* For PDB hacking. */
141 union VMCPUUNIONIEMFULL
142#else
143 union VMCPUUNIONIEMSTUB
144#endif
145 {
146#ifdef ___IEMInternal_h
147 struct IEMCPU s;
148#endif
149 uint8_t padding[18496]; /* multiple of 64 */
150 } iem;
151
152 /** HM part. */
153 union VMCPUUNIONHM
154 {
155#ifdef ___HMInternal_h
156 struct HMCPU s;
157#endif
158 uint8_t padding[5760]; /* multiple of 64 */
159 } hm;
160
161 /** EM part. */
162 union VMCPUUNIONEM
163 {
164#ifdef ___EMInternal_h
165 struct EMCPU s;
166#endif
167 uint8_t padding[1408]; /* multiple of 64 */
168 } em;
169
170 /** TRPM part. */
171 union VMCPUUNIONTRPM
172 {
173#ifdef ___TRPMInternal_h
174 struct TRPMCPU s;
175#endif
176 uint8_t padding[128]; /* multiple of 64 */
177 } trpm;
178
179 /** TM part. */
180 union VMCPUUNIONTM
181 {
182#ifdef ___TMInternal_h
183 struct TMCPU s;
184#endif
185 uint8_t padding[384]; /* multiple of 64 */
186 } tm;
187
188 /** VMM part. */
189 union VMCPUUNIONVMM
190 {
191#ifdef ___VMMInternal_h
192 struct VMMCPU s;
193#endif
194 uint8_t padding[704]; /* multiple of 64 */
195 } vmm;
196
197 /** PDM part. */
198 union VMCPUUNIONPDM
199 {
200#ifdef ___PDMInternal_h
201 struct PDMCPU s;
202#endif
203 uint8_t padding[256]; /* multiple of 64 */
204 } pdm;
205
206 /** IOM part. */
207 union VMCPUUNIONIOM
208 {
209#ifdef ___IOMInternal_h
210 struct IOMCPU s;
211#endif
212 uint8_t padding[512]; /* multiple of 64 */
213 } iom;
214
215 /** DBGF part.
216 * @todo Combine this with other tiny structures. */
217 union VMCPUUNIONDBGF
218 {
219#ifdef ___DBGFInternal_h
220 struct DBGFCPU s;
221#endif
222 uint8_t padding[256]; /* multiple of 64 */
223 } dbgf;
224
225 /** GIM part. */
226 union VMCPUUNIONGIM
227 {
228#ifdef ___GIMInternal_h
229 struct GIMCPU s;
230#endif
231 uint8_t padding[512]; /* multiple of 64 */
232 } gim;
233
234 /** APIC part. */
235 union VMCPUUNIONAPIC
236 {
237#ifdef ___APICInternal_h
238 struct APICCPU s;
239#endif
240 uint8_t padding[1792]; /* multiple of 64 */
241 } apic;
242
243 /*
244 * Some less frequently used global members that doesn't need to take up
245 * precious space at the head of the structure.
246 */
247
248 /** Trace groups enable flags. */
249 uint32_t fTraceGroups; /* 64 / 44 */
250 /** State data for use by ad hoc profiling. */
251 uint32_t uAdHoc;
252 /** Profiling samples for use by ad hoc profiling. */
253 STAMPROFILEADV aStatAdHoc[8]; /* size: 40*8 = 320 */
254
255 /** Align the following members on page boundary. */
256 uint8_t abAlignment2[2168];
257
258 /** PGM part. */
259 union VMCPUUNIONPGM
260 {
261#ifdef ___PGMInternal_h
262 struct PGMCPU s;
263#endif
264 uint8_t padding[4096]; /* multiple of 4096 */
265 } pgm;
266
267 /** CPUM part. */
268 union VMCPUUNIONCPUM
269 {
270#ifdef ___CPUMInternal_h
271 struct CPUMCPU s;
272#endif
273#ifdef VMCPU_INCL_CPUM_GST_CTX
274 /** The guest CPUM context for direct use by execution engines.
275 * This is not for general consumption, but for HM, REM, IEM, and maybe a few
276 * others. The rest will use the function based CPUM API. */
277 CPUMCTX GstCtx;
278#endif
279 uint8_t padding[4096]; /* multiple of 4096 */
280 } cpum;
281} VMCPU;
282
283
284#ifndef VBOX_FOR_DTRACE_LIB
285
286/** @name Operations on VMCPU::enmState
287 * @{ */
288/** Gets the VMCPU state. */
289#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
290/** Sets the VMCPU state. */
291#define VMCPU_SET_STATE(pVCpu, enmNewState) \
292 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
293/** Cmpares and sets the VMCPU state. */
294#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
295 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
296/** Checks the VMCPU state. */
297#ifdef VBOX_STRICT
298# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
299 do { \
300 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
301 AssertMsg(enmState == (enmExpectedState), \
302 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
303 enmState, enmExpectedState, (pVCpu)->idCpu)); \
304 } while (0)
305#else
306# define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) do { } while (0)
307#endif
308/** Tests if the state means that the CPU is started. */
309#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
310/** Tests if the state means that the CPU is stopped. */
311#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
312/** @} */
313
314
315/** The name of the raw-mode context VMM Core module. */
316#define VMMRC_MAIN_MODULE_NAME "VMMRC.rc"
317/** The name of the ring-0 context VMM Core module. */
318#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
319
320/**
321 * Wrapper macro for avoiding too much \#ifdef VBOX_WITH_RAW_MODE.
322 */
323#ifdef VBOX_WITH_RAW_MODE
324# define VM_WHEN_RAW_MODE(a_WithExpr, a_WithoutExpr) a_WithExpr
325#else
326# define VM_WHEN_RAW_MODE(a_WithExpr, a_WithoutExpr) a_WithoutExpr
327#endif
328
329
330/** VM Forced Action Flags.
331 *
332 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
333 * action mask of a VM.
334 *
335 * Available VM bits:
336 * 0, 1, 5, 6, 7, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 30
337 *
338 *
339 * Available VMCPU bits:
340 * 11, 14, 15, 31
341 *
342 * @todo If we run low on VMCPU, we may consider merging the SELM bits
343 *
344 * @{
345 */
346/** The virtual sync clock has been stopped, go to TM until it has been
347 * restarted... */
348#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(2)
349/** PDM Queues are pending. */
350#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
351/** The bit number for VM_FF_PDM_QUEUES. */
352#define VM_FF_PDM_QUEUES_BIT 3
353/** PDM DMA transfers are pending. */
354#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
355/** The bit number for VM_FF_PDM_DMA. */
356#define VM_FF_PDM_DMA_BIT 4
357/** This action forces the VM to call DBGF so DBGF can service debugger
358 * requests in the emulation thread.
359 * This action flag stays asserted till DBGF clears it.*/
360#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
361/** The bit number for VM_FF_DBGF. */
362#define VM_FF_DBGF_BIT 8
363/** This action forces the VM to service pending requests from other
364 * thread or requests which must be executed in another context. */
365#define VM_FF_REQUEST RT_BIT_32(9)
366/** Check for VM state changes and take appropriate action. */
367#define VM_FF_CHECK_VM_STATE RT_BIT_32(VM_FF_CHECK_VM_STATE_BIT)
368/** The bit number for VM_FF_CHECK_VM_STATE. */
369#define VM_FF_CHECK_VM_STATE_BIT 10
370/** Reset the VM. (postponed) */
371#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
372/** The bit number for VM_FF_RESET. */
373#define VM_FF_RESET_BIT 11
374/** EMT rendezvous in VMM. */
375#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
376/** The bit number for VM_FF_EMT_RENDEZVOUS. */
377#define VM_FF_EMT_RENDEZVOUS_BIT 12
378
379/** PGM needs to allocate handy pages. */
380#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
381/** PGM is out of memory.
382 * Abandon all loops and code paths which can be resumed and get up to the EM
383 * loops. */
384#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
385 /** PGM is about to perform a lightweight pool flush
386 * Guest SMP: all EMT threads should return to ring 3
387 */
388#define VM_FF_PGM_POOL_FLUSH_PENDING RT_BIT_32(20)
389/** REM needs to be informed about handler changes. */
390#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(VM_FF_REM_HANDLER_NOTIFY_BIT)
391/** The bit number for VM_FF_REM_HANDLER_NOTIFY. */
392#define VM_FF_REM_HANDLER_NOTIFY_BIT 29
393/** Suspend the VM - debug only. */
394#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
395
396
397/** This action forces the VM to check any pending interrupts on the APIC. */
398#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
399/** This action forces the VM to check any pending interrups on the PIC. */
400#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
401/** This action forces the VM to schedule and run pending timer (TM).
402 * @remarks Don't move - PATM compatibility. */
403#define VMCPU_FF_TIMER RT_BIT_32(2)
404/** This action forces the VM to check any pending NMIs. */
405#define VMCPU_FF_INTERRUPT_NMI_BIT 3
406#define VMCPU_FF_INTERRUPT_NMI RT_BIT_32(VMCPU_FF_INTERRUPT_NMI_BIT)
407/** This action forces the VM to check any pending SMIs. */
408#define VMCPU_FF_INTERRUPT_SMI_BIT 4
409#define VMCPU_FF_INTERRUPT_SMI RT_BIT_32(VMCPU_FF_INTERRUPT_SMI_BIT)
410/** PDM critical section unlocking is pending, process promptly upon return to R3. */
411#define VMCPU_FF_PDM_CRITSECT RT_BIT_32(5)
412/** Special EM internal force flag that is used by EMUnhaltAndWakeUp() to force
413 * the virtual CPU out of the next (/current) halted state. It is not processed
414 * nor cleared by emR3ForcedActions (similar to VMCPU_FF_BLOCK_NMIS), instead it
415 * is cleared the next time EM leaves the HALTED state. */
416#define VMCPU_FF_UNHALT RT_BIT_32(6)
417/** Pending IEM action (bit number). */
418#define VMCPU_FF_IEM_BIT 7
419/** Pending IEM action (mask). */
420#define VMCPU_FF_IEM RT_BIT_32(VMCPU_FF_IEM_BIT)
421/** Pending APIC action (bit number). */
422#define VMCPU_FF_UPDATE_APIC_BIT 8
423/** This action forces the VM to update APIC's asynchronously arrived
424 * interrupts as pending interrupts. */
425#define VMCPU_FF_UPDATE_APIC RT_BIT_32(VMCPU_FF_UPDATE_APIC_BIT)
426/** This action forces the VM to service pending requests from other
427 * thread or requests which must be executed in another context. */
428#define VMCPU_FF_REQUEST RT_BIT_32(9)
429/** Pending DBGF event (alternative to passing VINF_EM_DBG_EVENT around). */
430#define VMCPU_FF_DBGF RT_BIT_32(VMCPU_FF_DBGF_BIT)
431/** The bit number for VMCPU_FF_DBGF. */
432#define VMCPU_FF_DBGF_BIT 10
433/** This action forces the VM to service any pending updates to CR3 (used only
434 * by HM). */
435#define VMCPU_FF_HM_UPDATE_CR3 RT_BIT_32(12)
436/** This action forces the VM to service any pending updates to PAE PDPEs (used
437 * only by HM). */
438#define VMCPU_FF_HM_UPDATE_PAE_PDPES RT_BIT_32(13)
439/** This action forces the VM to resync the page tables before going
440 * back to execute guest code. (GLOBAL FLUSH) */
441#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
442/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
443 * (NON-GLOBAL FLUSH) */
444#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
445/** Check for pending TLB shootdown actions (deprecated)
446 * Reserved for furture HM re-use if necessary / safe.
447 * Consumer: HM */
448#define VMCPU_FF_TLB_SHOOTDOWN_UNUSED RT_BIT_32(18)
449/** Check for pending TLB flush action.
450 * Consumer: HM
451 * @todo rename to VMCPU_FF_HM_TLB_FLUSH */
452#define VMCPU_FF_TLB_FLUSH RT_BIT_32(VMCPU_FF_TLB_FLUSH_BIT)
453/** The bit number for VMCPU_FF_TLB_FLUSH. */
454#define VMCPU_FF_TLB_FLUSH_BIT 19
455#ifdef VBOX_WITH_RAW_MODE
456/** Check the interrupt and trap gates */
457# define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
458/** Check Guest's TSS ring 0 stack */
459# define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
460/** Check Guest's GDT table */
461# define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
462/** Check Guest's LDT table */
463# define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
464#endif /* VBOX_WITH_RAW_MODE */
465/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
466#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
467/** Block injection of non-maskable interrupts to the guest. */
468#define VMCPU_FF_BLOCK_NMIS RT_BIT_32(25)
469#ifdef VBOX_WITH_RAW_MODE
470/** CSAM needs to scan the page that's being executed */
471# define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
472/** CSAM needs to do some homework. */
473# define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
474#endif /* VBOX_WITH_RAW_MODE */
475/** Force return to Ring-3. */
476#define VMCPU_FF_TO_R3 RT_BIT_32(28)
477/** Force return to ring-3 to service pending I/O or MMIO write.
478 * This is a backup for mechanism VINF_IOM_R3_IOPORT_COMMIT_WRITE and
479 * VINF_IOM_R3_MMIO_COMMIT_WRITE, allowing VINF_EM_DBG_BREAKPOINT and similar
480 * status codes to be propagated at the same time without loss. */
481#define VMCPU_FF_IOM RT_BIT_32(29)
482#ifdef VBOX_WITH_RAW_MODE
483/** CPUM need to adjust CR0.TS/EM before executing raw-mode code again. */
484# define VMCPU_FF_CPUM RT_BIT_32(VMCPU_FF_CPUM_BIT)
485/** The bit number for VMCPU_FF_CPUM. */
486# define VMCPU_FF_CPUM_BIT 30
487#endif /* VBOX_WITH_RAW_MODE */
488/** Hardware virtualized nested-guest interrupt pending. */
489#define VMCPU_FF_INTERRUPT_NESTED_GUEST RT_BIT_32(31)
490
491/** Externally VM forced actions. Used to quit the idle/wait loop. */
492#define VM_FF_EXTERNAL_SUSPENDED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS )
493/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
494#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK ( VMCPU_FF_REQUEST | VMCPU_FF_DBGF )
495
496/** Externally forced VM actions. Used to quit the idle/wait loop. */
497#define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \
498 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS )
499/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
500#define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
501 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \
502 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF )
503
504/** High priority VM pre-execution actions. */
505#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC \
506 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
507 | VM_FF_EMT_RENDEZVOUS )
508/** High priority VMCPU pre-execution actions. */
509#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \
510 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF \
511 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
512 | VM_WHEN_RAW_MODE( VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
513 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0 ) )
514
515/** High priority VM pre raw-mode execution mask. */
516#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY )
517/** High priority VMCPU pre raw-mode execution mask. */
518#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
519 | VMCPU_FF_INHIBIT_INTERRUPTS \
520 | VM_WHEN_RAW_MODE( VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
521 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0) )
522
523/** High priority post-execution actions. */
524#define VM_FF_HIGH_PRIORITY_POST_MASK ( VM_FF_PGM_NO_MEMORY )
525/** High priority post-execution actions. */
526#define VMCPU_FF_HIGH_PRIORITY_POST_MASK ( VMCPU_FF_PDM_CRITSECT | VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_PENDING_ACTION, 0) \
527 | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES \
528 | VMCPU_FF_IEM | VMCPU_FF_IOM )
529
530/** Normal priority VM post-execution actions. */
531#define VM_FF_NORMAL_PRIORITY_POST_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET \
532 | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
533/** Normal priority VMCPU post-execution actions. */
534#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK ( VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF )
535
536/** Normal priority VM actions. */
537#define VM_FF_NORMAL_PRIORITY_MASK ( VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA \
538 | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS)
539/** Normal priority VMCPU actions. */
540#define VMCPU_FF_NORMAL_PRIORITY_MASK ( VMCPU_FF_REQUEST )
541
542/** Flags to clear before resuming guest execution. */
543#define VMCPU_FF_RESUME_GUEST_MASK ( VMCPU_FF_TO_R3 )
544
545
546/** VM flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
547#define VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
548 | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_RESET)
549/** VM flags that cause the REP[|NE|E] STRINS loops to yield. */
550#define VM_FF_YIELD_REPSTR_MASK ( VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
551 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_DBGF | VM_FF_DEBUG_SUSPEND )
552/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield immediately. */
553#ifdef IN_RING3
554# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF )
555#else
556# define VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_IEM | VMCPU_FF_IOM | VMCPU_FF_PGM_SYNC_CR3 \
557 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF )
558#endif
559/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
560 * enabled. */
561#define VMCPU_FF_YIELD_REPSTR_MASK ( VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK \
562 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC \
563 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_PDM_CRITSECT \
564 | VMCPU_FF_TIMER | VMCPU_FF_REQUEST )
565/** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts
566 * disabled. */
567#define VMCPU_FF_YIELD_REPSTR_NOINT_MASK ( VMCPU_FF_YIELD_REPSTR_MASK \
568 & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC) )
569
570/** VM Flags that cause the HM loops to go back to ring-3. */
571#define VM_FF_HM_TO_R3_MASK ( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \
572 | VM_FF_PDM_QUEUES | VM_FF_EMT_RENDEZVOUS)
573/** VMCPU Flags that cause the HM loops to go back to ring-3. */
574#define VMCPU_FF_HM_TO_R3_MASK ( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT \
575 | VMCPU_FF_IEM | VMCPU_FF_IOM)
576
577/** High priority ring-0 VM pre HM-mode execution mask. */
578#define VM_FF_HP_R0_PRE_HM_MASK (VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
579/** High priority ring-0 VMCPU pre HM-mode execution mask. */
580#define VMCPU_FF_HP_R0_PRE_HM_MASK ( VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 \
581 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST)
582/** High priority ring-0 VM pre HM-mode execution mask, single stepping. */
583#define VM_FF_HP_R0_PRE_HM_STEP_MASK (VM_FF_HP_R0_PRE_HM_MASK & ~( VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES \
584 | VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST \
585 | VM_FF_PDM_DMA) )
586/** High priority ring-0 VMCPU pre HM-mode execution mask, single stepping. */
587#define VMCPU_FF_HP_R0_PRE_HM_STEP_MASK (VMCPU_FF_HP_R0_PRE_HM_MASK & ~( VMCPU_FF_TO_R3 | VMCPU_FF_TIMER \
588 | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_REQUEST) )
589
590/** All the forced VM flags. */
591#define VM_FF_ALL_MASK (UINT32_MAX)
592/** All the forced VMCPU flags. */
593#define VMCPU_FF_ALL_MASK (UINT32_MAX)
594
595/** All the forced VM flags except those related to raw-mode and hardware
596 * assisted execution. */
597#define VM_FF_ALL_REM_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
598/** All the forced VMCPU flags except those related to raw-mode and hardware
599 * assisted execution. */
600#define VMCPU_FF_ALL_REM_MASK (~( VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_PDM_CRITSECT \
601 | VMCPU_FF_TLB_FLUSH | VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_PENDING_ACTION, 0) ))
602/** @} */
603
604/** @def VM_FF_SET
605 * Sets a force action flag.
606 *
607 * @param pVM The cross context VM structure.
608 * @param fFlag The flag to set.
609 */
610#if 1
611# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
612#else
613# define VM_FF_SET(pVM, fFlag) \
614 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
615 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
616 } while (0)
617#endif
618
619/** @def VMCPU_FF_SET
620 * Sets a force action flag for the given VCPU.
621 *
622 * @param pVCpu The cross context virtual CPU structure.
623 * @param fFlag The flag to set.
624 */
625#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
626
627/** @def VM_FF_CLEAR
628 * Clears a force action flag.
629 *
630 * @param pVM The cross context VM structure.
631 * @param fFlag The flag to clear.
632 */
633#if 1
634# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
635#else
636# define VM_FF_CLEAR(pVM, fFlag) \
637 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
638 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
639 } while (0)
640#endif
641
642/** @def VMCPU_FF_CLEAR
643 * Clears a force action flag for the given VCPU.
644 *
645 * @param pVCpu The cross context virtual CPU structure.
646 * @param fFlag The flag to clear.
647 */
648#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
649
650/** @def VM_FF_IS_SET
651 * Checks if a force action flag is set.
652 *
653 * @param pVM The cross context VM structure.
654 * @param fFlag The flag to check.
655 */
656#define VM_FF_IS_SET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
657
658/** @def VMCPU_FF_IS_SET
659 * Checks if a force action flag is set for the given VCPU.
660 *
661 * @param pVCpu The cross context virtual CPU structure.
662 * @param fFlag The flag to check.
663 */
664#define VMCPU_FF_IS_SET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
665
666/** @def VM_FF_IS_PENDING
667 * Checks if one or more force action in the specified set is pending.
668 *
669 * @param pVM The cross context VM structure.
670 * @param fFlags The flags to check for.
671 */
672#define VM_FF_IS_PENDING(pVM, fFlags) RT_BOOL((pVM)->fGlobalForcedActions & (fFlags))
673
674/** @def VM_FF_TEST_AND_CLEAR
675 * Checks if one (!) force action in the specified set is pending and clears it atomically
676 *
677 * @returns true if the bit was set.
678 * @returns false if the bit was clear.
679 * @param pVM The cross context VM structure.
680 * @param iBit Bit position to check and clear
681 */
682#define VM_FF_TEST_AND_CLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
683
684/** @def VMCPU_FF_TEST_AND_CLEAR
685 * Checks if one (!) force action in the specified set is pending and clears it atomically
686 *
687 * @returns true if the bit was set.
688 * @returns false if the bit was clear.
689 * @param pVCpu The cross context virtual CPU structure.
690 * @param iBit Bit position to check and clear
691 */
692#define VMCPU_FF_TEST_AND_CLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
693
694/** @def VMCPU_FF_IS_PENDING
695 * Checks if one or more force action in the specified set is pending for the given VCPU.
696 *
697 * @param pVCpu The cross context virtual CPU structure.
698 * @param fFlags The flags to check for.
699 */
700#define VMCPU_FF_IS_PENDING(pVCpu, fFlags) RT_BOOL((pVCpu)->fLocalForcedActions & (fFlags))
701
702/** @def VM_FF_IS_PENDING_EXCEPT
703 * Checks if one or more force action in the specified set is pending while one
704 * or more other ones are not.
705 *
706 * @param pVM The cross context VM structure.
707 * @param fFlags The flags to check for.
708 * @param fExcpt The flags that should not be set.
709 */
710#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
711
712/** @def VMCPU_FF_IS_PENDING_EXCEPT
713 * Checks if one or more force action in the specified set is pending for the given
714 * VCPU while one or more other ones are not.
715 *
716 * @param pVCpu The cross context virtual CPU structure.
717 * @param fFlags The flags to check for.
718 * @param fExcpt The flags that should not be set.
719 */
720#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
721
722/** @def VM_IS_EMT
723 * Checks if the current thread is the emulation thread (EMT).
724 *
725 * @remark The ring-0 variation will need attention if we expand the ring-0
726 * code to let threads other than EMT mess around with the VM.
727 */
728#ifdef IN_RC
729# define VM_IS_EMT(pVM) true
730#else
731# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
732#endif
733
734/** @def VMCPU_IS_EMT
735 * Checks if the current thread is the emulation thread (EMT) for the specified
736 * virtual CPU.
737 */
738#ifdef IN_RC
739# define VMCPU_IS_EMT(pVCpu) true
740#else
741# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
742#endif
743
744/** @def VM_ASSERT_EMT
745 * Asserts that the current thread IS the emulation thread (EMT).
746 */
747#ifdef IN_RC
748# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
749#elif defined(IN_RING0)
750# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
751#else
752# define VM_ASSERT_EMT(pVM) \
753 AssertMsg(VM_IS_EMT(pVM), \
754 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
755#endif
756
757/** @def VMCPU_ASSERT_EMT
758 * Asserts that the current thread IS the emulation thread (EMT) of the
759 * specified virtual CPU.
760 */
761#ifdef IN_RC
762# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
763#elif defined(IN_RING0)
764# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
765 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%u\n", \
766 RTThreadNativeSelf(), (pVCpu) ? (pVCpu)->hNativeThreadR0 : 0, \
767 (pVCpu) ? (pVCpu)->idCpu : 0))
768#else
769# define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \
770 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
771 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
772#endif
773
774/** @def VM_ASSERT_EMT_RETURN
775 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
776 */
777#ifdef IN_RC
778# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
779#elif defined(IN_RING0)
780# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
781#else
782# define VM_ASSERT_EMT_RETURN(pVM, rc) \
783 AssertMsgReturn(VM_IS_EMT(pVM), \
784 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
785 (rc))
786#endif
787
788/** @def VMCPU_ASSERT_EMT_RETURN
789 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
790 */
791#ifdef IN_RC
792# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
793#elif defined(IN_RING0)
794# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
795#else
796# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
797 AssertMsgReturn(VMCPU_IS_EMT(pVCpu), \
798 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
799 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
800 (rc))
801#endif
802
803/** @def VMCPU_ASSERT_EMT_OR_GURU
804 * Asserts that the current thread IS the emulation thread (EMT) of the
805 * specified virtual CPU.
806 */
807#if defined(IN_RC) || defined(IN_RING0)
808# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) Assert( VMCPU_IS_EMT(pVCpu) \
809 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
810 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS )
811#else
812# define VMCPU_ASSERT_EMT_OR_GURU(pVCpu) \
813 AssertMsg( VMCPU_IS_EMT(pVCpu) \
814 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION \
815 || pVCpu->CTX_SUFF(pVM)->enmVMState == VMSTATE_GURU_MEDITATION_LS, \
816 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
817 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
818#endif
819
820/** @def VMCPU_ASSERT_EMT_OR_NOT_RUNNING
821 * Asserts that the current thread IS the emulation thread (EMT) of the
822 * specified virtual CPU or the VM is not running.
823 */
824#if defined(IN_RC) || defined(IN_RING0)
825# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
826 Assert( VMCPU_IS_EMT(pVCpu) \
827 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)) )
828#else
829# define VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu) \
830 AssertMsg( VMCPU_IS_EMT(pVCpu) \
831 || !VM_IS_RUNNING_FOR_ASSERTIONS_ONLY((pVCpu)->CTX_SUFF(pVM)), \
832 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
833 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
834#endif
835
836/** @def VM_IS_RUNNING_FOR_ASSERTIONS_ONLY
837 * Checks if the the VM is running.
838 * @note Thie is only for pure debug assertions. No AssertReturn or similar!
839 */
840#define VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM) \
841 ( (pVM)->enmVMState == VMSTATE_RUNNING \
842 || (pVM)->enmVMState == VMSTATE_RUNNING_LS \
843 || (pVM)->enmVMState == VMSTATE_RUNNING_FT )
844
845/** @def VM_ASSERT_IS_NOT_RUNNING
846 * Asserts that the VM is not running.
847 */
848#if defined(IN_RC) || defined(IN_RING0)
849#define VM_ASSERT_IS_NOT_RUNNING(pVM) Assert(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM))
850#else
851#define VM_ASSERT_IS_NOT_RUNNING(pVM) AssertMsg(!VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM), \
852 ("VM is running. enmVMState=%d\n", (pVM)->enmVMState))
853#endif
854
855/** @def VM_ASSERT_EMT0
856 * Asserts that the current thread IS emulation thread \#0 (EMT0).
857 */
858#define VM_ASSERT_EMT0(pVM) VMCPU_ASSERT_EMT(&(pVM)->aCpus[0])
859
860/** @def VM_ASSERT_EMT0_RETURN
861 * Asserts that the current thread IS emulation thread \#0 (EMT0) and returns if
862 * it isn't.
863 */
864#define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN(&(pVM)->aCpus[0], (rc))
865
866
867/**
868 * Asserts that the current thread is NOT the emulation thread.
869 */
870#define VM_ASSERT_OTHER_THREAD(pVM) \
871 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
872
873
874/** @def VM_ASSERT_STATE
875 * Asserts a certain VM state.
876 */
877#define VM_ASSERT_STATE(pVM, _enmState) \
878 AssertMsg((pVM)->enmVMState == (_enmState), \
879 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
880
881/** @def VM_ASSERT_STATE_RETURN
882 * Asserts a certain VM state and returns if it doesn't match.
883 */
884#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
885 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
886 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
887 (rc))
888
889/** @def VM_IS_VALID_EXT
890 * Asserts a the VM handle is valid for external access, i.e. not being destroy
891 * or terminated. */
892#define VM_IS_VALID_EXT(pVM) \
893 ( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
894 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
895 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
896 && VM_IS_EMT(pVM))) )
897
898/** @def VM_ASSERT_VALID_EXT_RETURN
899 * Asserts a the VM handle is valid for external access, i.e. not being
900 * destroy or terminated.
901 */
902#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
903 AssertMsgReturn(VM_IS_VALID_EXT(pVM), \
904 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
905 ? VMGetStateName(pVM->enmVMState) : ""), \
906 (rc))
907
908/** @def VMCPU_ASSERT_VALID_EXT_RETURN
909 * Asserts a the VMCPU handle is valid for external access, i.e. not being
910 * destroy or terminated.
911 */
912#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
913 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
914 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
915 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
916 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
917 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
918 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
919 (rc))
920
921#endif /* !VBOX_FOR_DTRACE_LIB */
922
923
924
925/**
926 * The cross context VM structure.
927 *
928 * It contains all the VM data which have to be available in all contexts.
929 * Even if it contains all the data the idea is to use APIs not to modify all
930 * the members all around the place. Therefore we make use of unions to hide
931 * everything which isn't local to the current source module. This means we'll
932 * have to pay a little bit of attention when adding new members to structures
933 * in the unions and make sure to keep the padding sizes up to date.
934 *
935 * Run 'kmk run-struct-tests' (from src/VBox/VMM if you like) after updating!
936 */
937typedef struct VM
938{
939 /** The state of the VM.
940 * This field is read only to everyone except the VM and EM. */
941 VMSTATE volatile enmVMState;
942 /** Forced action flags.
943 * See the VM_FF_* \#defines. Updated atomically.
944 */
945 volatile uint32_t fGlobalForcedActions;
946 /** Pointer to the array of page descriptors for the VM structure allocation. */
947 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
948 /** Session handle. For use when calling SUPR0 APIs. */
949 PSUPDRVSESSION pSession;
950 /** Pointer to the ring-3 VM structure. */
951 PUVM pUVM;
952 /** Ring-3 Host Context VM Pointer. */
953 R3PTRTYPE(struct VM *) pVMR3;
954 /** Ring-0 Host Context VM Pointer. */
955 R0PTRTYPE(struct VM *) pVMR0;
956 /** Raw-mode Context VM Pointer. */
957 RCPTRTYPE(struct VM *) pVMRC;
958
959 /** The GVM VM handle. Only the GVM should modify this field. */
960 uint32_t hSelf;
961 /** Number of virtual CPUs. */
962 uint32_t cCpus;
963 /** CPU excution cap (1-100) */
964 uint32_t uCpuExecutionCap;
965
966 /** Size of the VM structure including the VMCPU array. */
967 uint32_t cbSelf;
968
969 /** Offset to the VMCPU array starting from beginning of this structure. */
970 uint32_t offVMCPU;
971
972 /**
973 * VMMSwitcher assembly entry point returning to host context.
974 *
975 * Depending on how the host handles the rc status given in @a eax, this may
976 * return and let the caller resume whatever it was doing prior to the call.
977 *
978 *
979 * @param eax The return code, register.
980 * @remark Assume interrupts disabled.
981 * @remark This method pointer lives here because TRPM needs it.
982 */
983 RTRCPTR pfnVMMRCToHostAsm/*(int32_t eax)*/;
984
985 /**
986 * VMMSwitcher assembly entry point returning to host context without saving the
987 * raw-mode context (hyper) registers.
988 *
989 * Unlike pfnVMMRC2HCAsm, this will not return to the caller. Instead it
990 * expects the caller to save a RC context in CPUM where one might return if the
991 * return code indicate that this is possible.
992 *
993 * This method pointer lives here because TRPM needs it.
994 *
995 * @param eax The return code, register.
996 * @remark Assume interrupts disabled.
997 * @remark This method pointer lives here because TRPM needs it.
998 */
999 RTRCPTR pfnVMMRCToHostAsmNoReturn/*(int32_t eax)*/;
1000
1001 /** @name Various items that are frequently accessed.
1002 * @{ */
1003 /** Whether to recompile user mode code or run it raw/hm. */
1004 bool fRecompileUser;
1005 /** Whether to recompile supervisor mode code or run it raw/hm. */
1006 bool fRecompileSupervisor;
1007 /** Whether raw mode supports ring-1 code or not. */
1008 bool fRawRing1Enabled;
1009 /** PATM enabled flag.
1010 * This is placed here for performance reasons. */
1011 bool fPATMEnabled;
1012 /** CSAM enabled flag.
1013 * This is placed here for performance reasons. */
1014 bool fCSAMEnabled;
1015 /** Hardware VM support is available and enabled.
1016 * Determined very early during init.
1017 * This is placed here for performance reasons. */
1018 bool fHMEnabled;
1019 /** For asserting on fHMEnable usage. */
1020 bool fHMEnabledFixed;
1021 /** Hardware VM support requires a minimal raw-mode context.
1022 * This is never set on 64-bit hosts, only 32-bit hosts requires it. */
1023 bool fHMNeedRawModeCtx;
1024 /** Set when this VM is the master FT node.
1025 * @todo This doesn't need to be here, FTM should store it in it's own
1026 * structures instead. */
1027 bool fFaultTolerantMaster;
1028 /** Large page enabled flag.
1029 * @todo This doesn't need to be here, PGM should store it in it's own
1030 * structures instead. */
1031 bool fUseLargePages;
1032 /** @} */
1033
1034 /** Alignment padding. */
1035 uint8_t uPadding1[2];
1036
1037 /** @name Debugging
1038 * @{ */
1039 /** Raw-mode Context VM Pointer. */
1040 RCPTRTYPE(RTTRACEBUF) hTraceBufRC;
1041 /** Ring-3 Host Context VM Pointer. */
1042 R3PTRTYPE(RTTRACEBUF) hTraceBufR3;
1043 /** Ring-0 Host Context VM Pointer. */
1044 R0PTRTYPE(RTTRACEBUF) hTraceBufR0;
1045 /** @} */
1046
1047#if HC_ARCH_BITS == 32
1048 /** Alignment padding. */
1049 uint32_t uPadding2;
1050#endif
1051
1052 /** @name Switcher statistics (remove)
1053 * @{ */
1054 /** Profiling the total time from Qemu to GC. */
1055 STAMPROFILEADV StatTotalQemuToGC;
1056 /** Profiling the total time from GC to Qemu. */
1057 STAMPROFILEADV StatTotalGCToQemu;
1058 /** Profiling the total time spent in GC. */
1059 STAMPROFILEADV StatTotalInGC;
1060 /** Profiling the total time spent not in Qemu. */
1061 STAMPROFILEADV StatTotalInQemu;
1062 /** Profiling the VMMSwitcher code for going to GC. */
1063 STAMPROFILEADV StatSwitcherToGC;
1064 /** Profiling the VMMSwitcher code for going to HC. */
1065 STAMPROFILEADV StatSwitcherToHC;
1066 STAMPROFILEADV StatSwitcherSaveRegs;
1067 STAMPROFILEADV StatSwitcherSysEnter;
1068 STAMPROFILEADV StatSwitcherDebug;
1069 STAMPROFILEADV StatSwitcherCR0;
1070 STAMPROFILEADV StatSwitcherCR4;
1071 STAMPROFILEADV StatSwitcherJmpCR3;
1072 STAMPROFILEADV StatSwitcherRstrRegs;
1073 STAMPROFILEADV StatSwitcherLgdt;
1074 STAMPROFILEADV StatSwitcherLidt;
1075 STAMPROFILEADV StatSwitcherLldt;
1076 STAMPROFILEADV StatSwitcherTSS;
1077 /** @} */
1078
1079 /** Padding - the unions must be aligned on a 64 bytes boundary and the unions
1080 * must start at the same offset on both 64-bit and 32-bit hosts. */
1081 uint8_t abAlignment3[(HC_ARCH_BITS == 32 ? 24 : 0) + 40];
1082
1083 /** CPUM part. */
1084 union
1085 {
1086#ifdef ___CPUMInternal_h
1087 struct CPUM s;
1088#endif
1089#ifdef ___VBox_vmm_cpum_h
1090 /** Read only info exposed about the host and guest CPUs. */
1091 struct
1092 {
1093 /** Padding for hidden fields. */
1094 uint8_t abHidden0[64];
1095 /** Host CPU feature information. */
1096 CPUMFEATURES HostFeatures;
1097 /** Guest CPU feature information. */
1098 CPUMFEATURES GuestFeatures;
1099 } const ro;
1100#endif
1101 uint8_t padding[1536]; /* multiple of 64 */
1102 } cpum;
1103
1104 /** VMM part. */
1105 union
1106 {
1107#ifdef ___VMMInternal_h
1108 struct VMM s;
1109#endif
1110 uint8_t padding[1600]; /* multiple of 64 */
1111 } vmm;
1112
1113 /** PGM part. */
1114 union
1115 {
1116#ifdef ___PGMInternal_h
1117 struct PGM s;
1118#endif
1119 uint8_t padding[4096*2+6080]; /* multiple of 64 */
1120 } pgm;
1121
1122 /** HM part. */
1123 union
1124 {
1125#ifdef ___HMInternal_h
1126 struct HM s;
1127#endif
1128 uint8_t padding[5440]; /* multiple of 64 */
1129 } hm;
1130
1131 /** TRPM part. */
1132 union
1133 {
1134#ifdef ___TRPMInternal_h
1135 struct TRPM s;
1136#endif
1137 uint8_t padding[5248]; /* multiple of 64 */
1138 } trpm;
1139
1140 /** SELM part. */
1141 union
1142 {
1143#ifdef ___SELMInternal_h
1144 struct SELM s;
1145#endif
1146 uint8_t padding[768]; /* multiple of 64 */
1147 } selm;
1148
1149 /** MM part. */
1150 union
1151 {
1152#ifdef ___MMInternal_h
1153 struct MM s;
1154#endif
1155 uint8_t padding[192]; /* multiple of 64 */
1156 } mm;
1157
1158 /** PDM part. */
1159 union
1160 {
1161#ifdef ___PDMInternal_h
1162 struct PDM s;
1163#endif
1164 uint8_t padding[1920]; /* multiple of 64 */
1165 } pdm;
1166
1167 /** IOM part. */
1168 union
1169 {
1170#ifdef ___IOMInternal_h
1171 struct IOM s;
1172#endif
1173 uint8_t padding[896]; /* multiple of 64 */
1174 } iom;
1175
1176 /** EM part. */
1177 union
1178 {
1179#ifdef ___EMInternal_h
1180 struct EM s;
1181#endif
1182 uint8_t padding[256]; /* multiple of 64 */
1183 } em;
1184
1185 /** TM part. */
1186 union
1187 {
1188#ifdef ___TMInternal_h
1189 struct TM s;
1190#endif
1191 uint8_t padding[2496]; /* multiple of 64 */
1192 } tm;
1193
1194 /** DBGF part. */
1195 union
1196 {
1197#ifdef ___DBGFInternal_h
1198 struct DBGF s;
1199#endif
1200#ifdef ___VBox_vmm_dbgf_h
1201 /** Read only info exposed about interrupt breakpoints and selected events. */
1202 struct
1203 {
1204 /** Bitmap of enabled hardware interrupt breakpoints. */
1205 uint32_t bmHardIntBreakpoints[256 / 32];
1206 /** Bitmap of enabled software interrupt breakpoints. */
1207 uint32_t bmSoftIntBreakpoints[256 / 32];
1208 /** Bitmap of selected events.
1209 * This includes non-selectable events too for simplicity, we maintain the
1210 * state for some of these, as it may come in handy. */
1211 uint64_t bmSelectedEvents[(DBGFEVENT_END + 63) / 64];
1212 /** Enabled hardware interrupt breakpoints. */
1213 uint32_t cHardIntBreakpoints;
1214 /** Enabled software interrupt breakpoints. */
1215 uint32_t cSoftIntBreakpoints;
1216 /** The number of selected events. */
1217 uint32_t cSelectedEvents;
1218 /** The number of enabled hardware breakpoints. */
1219 uint8_t cEnabledHwBreakpoints;
1220 /** The number of enabled hardware I/O breakpoints. */
1221 uint8_t cEnabledHwIoBreakpoints;
1222 /** The number of enabled INT3 breakpoints. */
1223 uint8_t cEnabledInt3Breakpoints;
1224 uint8_t abPadding[1]; /**< Unused padding space up for grabs. */
1225 } const ro;
1226#endif
1227 uint8_t padding[2368]; /* multiple of 64 */
1228 } dbgf;
1229
1230 /** SSM part. */
1231 union
1232 {
1233#ifdef ___SSMInternal_h
1234 struct SSM s;
1235#endif
1236 uint8_t padding[128]; /* multiple of 64 */
1237 } ssm;
1238
1239 /** FTM part. */
1240 union
1241 {
1242#ifdef ___FTMInternal_h
1243 struct FTM s;
1244#endif
1245 uint8_t padding[512]; /* multiple of 64 */
1246 } ftm;
1247
1248#ifdef VBOX_WITH_RAW_MODE
1249 /** PATM part. */
1250 union
1251 {
1252# ifdef ___PATMInternal_h
1253 struct PATM s;
1254# endif
1255 uint8_t padding[768]; /* multiple of 64 */
1256 } patm;
1257
1258 /** CSAM part. */
1259 union
1260 {
1261# ifdef ___CSAMInternal_h
1262 struct CSAM s;
1263# endif
1264 uint8_t padding[1088]; /* multiple of 64 */
1265 } csam;
1266#endif
1267
1268#ifdef VBOX_WITH_REM
1269 /** REM part. */
1270 union
1271 {
1272# ifdef ___REMInternal_h
1273 struct REM s;
1274# endif
1275 uint8_t padding[0x11100]; /* multiple of 64 */
1276 } rem;
1277#endif
1278
1279 union
1280 {
1281#ifdef ___GIMInternal_h
1282 struct GIM s;
1283#endif
1284 uint8_t padding[448]; /* multiple of 64 */
1285 } gim;
1286
1287 union
1288 {
1289#ifdef ___APICInternal_h
1290 struct APIC s;
1291#endif
1292 uint8_t padding[128]; /* multiple of 8 */
1293 } apic;
1294
1295 /* ---- begin small stuff ---- */
1296
1297 /** VM part. */
1298 union
1299 {
1300#ifdef ___VMInternal_h
1301 struct VMINT s;
1302#endif
1303 uint8_t padding[24]; /* multiple of 8 */
1304 } vm;
1305
1306 /** CFGM part. */
1307 union
1308 {
1309#ifdef ___CFGMInternal_h
1310 struct CFGM s;
1311#endif
1312 uint8_t padding[8]; /* multiple of 8 */
1313 } cfgm;
1314
1315 /** Padding for aligning the cpu array on a page boundary. */
1316#if defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
1317 uint8_t abAlignment2[3870];
1318#elif defined(VBOX_WITH_REM) && !defined(VBOX_WITH_RAW_MODE)
1319 uint8_t abAlignment2[1630];
1320#elif !defined(VBOX_WITH_REM) && defined(VBOX_WITH_RAW_MODE)
1321 uint8_t abAlignment2[30];
1322#else
1323 uint8_t abAlignment2[1886];
1324#endif
1325
1326 /* ---- end small stuff ---- */
1327
1328 /** VMCPU array for the configured number of virtual CPUs.
1329 * Must be aligned on a page boundary for TLB hit reasons as well as
1330 * alignment of VMCPU members. */
1331 VMCPU aCpus[1];
1332} VM;
1333
1334
1335#ifdef IN_RC
1336RT_C_DECLS_BEGIN
1337
1338/** The VM structure.
1339 * This is imported from the VMMRCBuiltin module, i.e. it's a one of those magic
1340 * globals which we should avoid using.
1341 */
1342extern DECLIMPORT(VM) g_VM;
1343
1344RT_C_DECLS_END
1345#endif
1346
1347/** @} */
1348
1349#endif
1350
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette