VirtualBox

source: vbox/trunk/include/VBox/vm.h@ 31485

Last change on this file since 31485 was 31359, checked in by vboxsync, 15 years ago

Keep track of the native R0 thread handle for each EMT too. Use that to find the right VCPU

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.8 KB
Line 
1/** @file
2 * VM - The Virtual Machine, data. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2007 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vm_h
27#define ___VBox_vm_h
28
29#include <VBox/cdefs.h>
30#include <VBox/types.h>
31#include <VBox/cpum.h>
32#include <VBox/stam.h>
33#include <VBox/vmapi.h>
34#include <VBox/sup.h>
35#include <VBox/vmm.h>
36
37
38/** @defgroup grp_vm The Virtual Machine
39 * @{
40 */
41
42/**
43 * The state of a Virtual CPU.
44 *
45 * The basic state indicated here is whether the CPU has been started or not. In
46 * addition, there are sub-states when started for assisting scheduling (GVMM
47 * mostly).
48 *
49 * The transision out of the STOPPED state is done by a vmR3PowerOn.
50 * The transision back to the STOPPED state is done by vmR3PowerOff.
51 *
52 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
53 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
54 */
55typedef enum VMCPUSTATE
56{
57 /** The customary invalid zero. */
58 VMCPUSTATE_INVALID = 0,
59
60 /** Virtual CPU has not yet been started. */
61 VMCPUSTATE_STOPPED,
62
63 /** CPU started. */
64 VMCPUSTATE_STARTED,
65 /** Executing guest code and can be poked. */
66 VMCPUSTATE_STARTED_EXEC,
67 /** Executing guest code in the recompiler. */
68 VMCPUSTATE_STARTED_EXEC_REM,
69 /** Halted. */
70 VMCPUSTATE_STARTED_HALTED,
71
72 /** The end of valid virtual CPU states. */
73 VMCPUSTATE_END,
74
75 /** Ensure 32-bit type. */
76 VMCPUSTATE_32BIT_HACK = 0x7fffffff
77} VMCPUSTATE;
78
79
80/**
81 * Per virtual CPU data.
82 */
83typedef struct VMCPU
84{
85 /** Per CPU forced action.
86 * See the VMCPU_FF_* \#defines. Updated atomically. */
87 uint32_t volatile fLocalForcedActions;
88 /** The CPU state. */
89 VMCPUSTATE volatile enmState;
90
91 /** Pointer to the ring-3 UVMCPU structure. */
92 PUVMCPU pUVCpu;
93 /** Ring-3 Host Context VM Pointer. */
94 PVMR3 pVMR3;
95 /** Ring-0 Host Context VM Pointer. */
96 PVMR0 pVMR0;
97 /** Raw-mode Context VM Pointer. */
98 PVMRC pVMRC;
99 /** The CPU ID.
100 * This is the index into the VM::aCpu array. */
101 VMCPUID idCpu;
102 /** The native thread handle. */
103 RTNATIVETHREAD hNativeThread;
104 /** The native R0 thread handle. (different from the R3 handle!) */
105 RTNATIVETHREAD hNativeThreadR0;
106 /** Which host CPU ID is this EMT running on.
107 * Only valid when in RC or HWACCMR0 with scheduling disabled. */
108 RTCPUID volatile idHostCpu;
109
110 /** Align the next bit on a 64-byte boundary and make sure it starts at the same
111 * offset in both 64-bit and 32-bit builds.
112 *
113 * @remarks The aligments of the members that are larger than 48 bytes should be
114 * 64-byte for cache line reasons. structs containing small amounts of
115 * data could be lumped together at the end with a < 64 byte padding
116 * following it (to grow into and align the struct size).
117 * */
118 uint8_t abAlignment1[HC_ARCH_BITS == 32 ? 24 : 4];
119
120 /** CPUM part. */
121 union
122 {
123#ifdef ___CPUMInternal_h
124 struct CPUMCPU s;
125#endif
126 uint8_t padding[3456]; /* multiple of 64 */
127 } cpum;
128
129 /** HWACCM part. */
130 union
131 {
132#ifdef ___HWACCMInternal_h
133 struct HWACCMCPU s;
134#endif
135 uint8_t padding[5312]; /* multiple of 64 */
136 } hwaccm;
137
138 /** EM part. */
139 union
140 {
141#ifdef ___EMInternal_h
142 struct EMCPU s;
143#endif
144 uint8_t padding[1408]; /* multiple of 64 */
145 } em;
146
147 /** TRPM part. */
148 union
149 {
150#ifdef ___TRPMInternal_h
151 struct TRPMCPU s;
152#endif
153 uint8_t padding[128]; /* multiple of 64 */
154 } trpm;
155
156 /** TM part. */
157 union
158 {
159#ifdef ___TMInternal_h
160 struct TMCPU s;
161#endif
162 uint8_t padding[256]; /* multiple of 64 */
163 } tm;
164
165 /** VMM part. */
166 union
167 {
168#ifdef ___VMMInternal_h
169 struct VMMCPU s;
170#endif
171 uint8_t padding[384]; /* multiple of 64 */
172 } vmm;
173
174 /** PDM part. */
175 union
176 {
177#ifdef ___PDMInternal_h
178 struct PDMCPU s;
179#endif
180 uint8_t padding[128]; /* multiple of 64 */
181 } pdm;
182
183 /** IOM part. */
184 union
185 {
186#ifdef ___IOMInternal_h
187 struct IOMCPU s;
188#endif
189 uint8_t padding[512]; /* multiple of 64 */
190 } iom;
191
192 /** DBGF part.
193 * @todo Combine this with other tiny structures. */
194 union
195 {
196#ifdef ___DBGFInternal_h
197 struct DBGFCPU s;
198#endif
199 uint8_t padding[64]; /* multiple of 64 */
200 } dbgf;
201
202 /** Align the following members on page boundrary. */
203 uint8_t abAlignment2[576];
204
205 /** PGM part. */
206 union
207 {
208#ifdef ___PGMInternal_h
209 struct PGMCPU s;
210#endif
211 uint8_t padding[4096]; /* multiple of 4096 */
212 } pgm;
213
214} VMCPU;
215
216
217/** @name Operations on VMCPU::enmState
218 * @{ */
219/** Gets the VMCPU state. */
220#define VMCPU_GET_STATE(pVCpu) ( (pVCpu)->enmState )
221/** Sets the VMCPU state. */
222#define VMCPU_SET_STATE(pVCpu, enmNewState) \
223 ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
224/** Cmpares and sets the VMCPU state. */
225#define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
226 ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
227/** Checks the VMCPU state. */
228#define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
229 do { \
230 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
231 AssertMsg(enmState == (enmExpectedState), \
232 ("enmState=%d enmExpectedState=%d idCpu=%u\n", \
233 enmState, enmExpectedState, (pVCpu)->idCpu)); \
234 } while (0)
235/** Tests if the state means that the CPU is started. */
236#define VMCPUSTATE_IS_STARTED(enmState) ( (enmState) > VMCPUSTATE_STOPPED )
237/** Tests if the state means that the CPU is stopped. */
238#define VMCPUSTATE_IS_STOPPED(enmState) ( (enmState) == VMCPUSTATE_STOPPED )
239/** @} */
240
241
242/** The name of the Guest Context VMM Core module. */
243#define VMMGC_MAIN_MODULE_NAME "VMMGC.gc"
244/** The name of the Ring 0 Context VMM Core module. */
245#define VMMR0_MAIN_MODULE_NAME "VMMR0.r0"
246
247/** VM Forced Action Flags.
248 *
249 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
250 * action mask of a VM.
251 *
252 * @{
253 */
254/** The virtual sync clock has been stopped, go to TM until it has been
255 * restarted... */
256#define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(2)
257/** PDM Queues are pending. */
258#define VM_FF_PDM_QUEUES RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
259/** The bit number for VM_FF_PDM_QUEUES. */
260#define VM_FF_PDM_QUEUES_BIT 3
261/** PDM DMA transfers are pending. */
262#define VM_FF_PDM_DMA RT_BIT_32(VM_FF_PDM_DMA_BIT)
263/** The bit number for VM_FF_PDM_DMA. */
264#define VM_FF_PDM_DMA_BIT 4
265/** This action forces the VM to call DBGF so DBGF can service debugger
266 * requests in the emulation thread.
267 * This action flag stays asserted till DBGF clears it.*/
268#define VM_FF_DBGF RT_BIT_32(VM_FF_DBGF_BIT)
269/** The bit number for VM_FF_DBGF. */
270#define VM_FF_DBGF_BIT 8
271/** This action forces the VM to service pending requests from other
272 * thread or requests which must be executed in another context. */
273#define VM_FF_REQUEST RT_BIT_32(9)
274/** Check for VM state changes and take appropriate action. */
275#define VM_FF_CHECK_VM_STATE RT_BIT_32(VM_FF_CHECK_VM_STATE_BIT)
276/** The bit number for VM_FF_CHECK_VM_STATE. */
277#define VM_FF_CHECK_VM_STATE_BIT 10
278/** Reset the VM. (postponed) */
279#define VM_FF_RESET RT_BIT_32(VM_FF_RESET_BIT)
280/** The bit number for VM_FF_RESET. */
281#define VM_FF_RESET_BIT 11
282/** EMT rendezvous in VMM. */
283#define VM_FF_EMT_RENDEZVOUS RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
284/** The bit number for VM_FF_EMT_RENDEZVOUS. */
285#define VM_FF_EMT_RENDEZVOUS_BIT 12
286
287/** PGM needs to allocate handy pages. */
288#define VM_FF_PGM_NEED_HANDY_PAGES RT_BIT_32(18)
289/** PGM is out of memory.
290 * Abandon all loops and code paths which can be resumed and get up to the EM
291 * loops. */
292#define VM_FF_PGM_NO_MEMORY RT_BIT_32(19)
293 /** PGM is about to perform a lightweight pool flush
294 * Guest SMP: all EMT threads should return to ring 3
295 */
296#define VM_FF_PGM_POOL_FLUSH_PENDING RT_BIT_32(20)
297/** REM needs to be informed about handler changes. */
298#define VM_FF_REM_HANDLER_NOTIFY RT_BIT_32(VM_FF_REM_HANDLER_NOTIFY_BIT)
299/** The bit number for VM_FF_REM_HANDLER_NOTIFY. */
300#define VM_FF_REM_HANDLER_NOTIFY_BIT 29
301/** Suspend the VM - debug only. */
302#define VM_FF_DEBUG_SUSPEND RT_BIT_32(31)
303
304
305/** This action forces the VM to check any pending interrups on the APIC. */
306#define VMCPU_FF_INTERRUPT_APIC RT_BIT_32(0)
307/** This action forces the VM to check any pending interrups on the PIC. */
308#define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1)
309/** This action forces the VM to schedule and run pending timer (TM).
310 * @remarks Don't move - PATM compatability. */
311#define VMCPU_FF_TIMER RT_BIT_32(2)
312/** This action forces the VM to check any pending NMIs. */
313#define VMCPU_FF_INTERRUPT_NMI_BIT 3
314#define VMCPU_FF_INTERRUPT_NMI RT_BIT_32(VMCPU_FF_INTERRUPT_NMI_BIT)
315/** This action forces the VM to check any pending SMIs. */
316#define VMCPU_FF_INTERRUPT_SMI_BIT 4
317#define VMCPU_FF_INTERRUPT_SMI RT_BIT_32(VMCPU_FF_INTERRUPT_SMI_BIT)
318/** PDM critical section unlocking is pending, process promptly upon return to R3. */
319#define VMCPU_FF_PDM_CRITSECT RT_BIT_32(5)
320/** This action forces the VM to service pending requests from other
321 * thread or requests which must be executed in another context. */
322#define VMCPU_FF_REQUEST RT_BIT_32(9)
323/** This action forces the VM to resync the page tables before going
324 * back to execute guest code. (GLOBAL FLUSH) */
325#define VMCPU_FF_PGM_SYNC_CR3 RT_BIT_32(16)
326/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
327 * (NON-GLOBAL FLUSH) */
328#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL RT_BIT_32(17)
329/** Check for pending TLB shootdown actions. */
330#define VMCPU_FF_TLB_SHOOTDOWN RT_BIT_32(18)
331/** Check for pending TLB flush action. */
332#define VMCPU_FF_TLB_FLUSH RT_BIT_32(VMCPU_FF_TLB_FLUSH_BIT)
333/** The bit number for VMCPU_FF_TLB_FLUSH. */
334#define VMCPU_FF_TLB_FLUSH_BIT 19
335/** Check the interupt and trap gates */
336#define VMCPU_FF_TRPM_SYNC_IDT RT_BIT_32(20)
337/** Check Guest's TSS ring 0 stack */
338#define VMCPU_FF_SELM_SYNC_TSS RT_BIT_32(21)
339/** Check Guest's GDT table */
340#define VMCPU_FF_SELM_SYNC_GDT RT_BIT_32(22)
341/** Check Guest's LDT table */
342#define VMCPU_FF_SELM_SYNC_LDT RT_BIT_32(23)
343/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
344#define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24)
345/** CSAM needs to scan the page that's being executed */
346#define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26)
347/** CSAM needs to do some homework. */
348#define VMCPU_FF_CSAM_PENDING_ACTION RT_BIT_32(27)
349/** Force return to Ring-3. */
350#define VMCPU_FF_TO_R3 RT_BIT_32(28)
351
352/** Externally VM forced actions. Used to quit the idle/wait loop. */
353#define VM_FF_EXTERNAL_SUSPENDED_MASK (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS)
354/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
355#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK (VMCPU_FF_REQUEST)
356
357/** Externally forced VM actions. Used to quit the idle/wait loop. */
358#define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \
359 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
360/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
361#define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_TIMER)
362
363/** High priority VM pre-execution actions. */
364#define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC \
365 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
366/** High priority VMCPU pre-execution actions. */
367#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 \
368 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
369 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
370
371/** High priority VM pre raw-mode execution mask. */
372#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
373/** High priority VMCPU pre raw-mode execution mask. */
374#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
375 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
376
377/** High priority post-execution actions. */
378#define VM_FF_HIGH_PRIORITY_POST_MASK (VM_FF_PGM_NO_MEMORY)
379/** High priority post-execution actions. */
380#define VMCPU_FF_HIGH_PRIORITY_POST_MASK (VMCPU_FF_PDM_CRITSECT|VMCPU_FF_CSAM_PENDING_ACTION)
381
382/** Normal priority VM post-execution actions. */
383#define VM_FF_NORMAL_PRIORITY_POST_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET \
384 | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
385/** Normal priority VMCPU post-execution actions. */
386#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE)
387
388/** Normal priority VM actions. */
389#define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS)
390/** Normal priority VMCPU actions. */
391#define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST)
392
393/** Flags to clear before resuming guest execution. */
394#define VMCPU_FF_RESUME_GUEST_MASK (VMCPU_FF_TO_R3)
395
396/** VM Flags that cause the HWACCM loops to go back to ring-3. */
397#define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_QUEUES | VM_FF_EMT_RENDEZVOUS)
398/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
399#define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3 | VMCPU_FF_TIMER)
400
401/** All the forced VM flags. */
402#define VM_FF_ALL_MASK (~0U)
403/** All the forced VMCPU flags. */
404#define VMCPU_FF_ALL_MASK (~0U)
405
406/** All the forced VM flags. */
407#define VM_FF_ALL_BUT_RAW_MASK (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NO_MEMORY)
408/** All the forced VMCPU flags. */
409#define VMCPU_FF_ALL_BUT_RAW_MASK (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_PDM_CRITSECT))
410
411/** @} */
412
413/** @def VM_FF_SET
414 * Sets a force action flag.
415 *
416 * @param pVM VM Handle.
417 * @param fFlag The flag to set.
418 */
419#if 1
420# define VM_FF_SET(pVM, fFlag) ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
421#else
422# define VM_FF_SET(pVM, fFlag) \
423 do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
424 RTLogPrintf("VM_FF_SET : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
425 } while (0)
426#endif
427
428/** @def VMCPU_FF_SET
429 * Sets a force action flag for the given VCPU.
430 *
431 * @param pVCpu VMCPU Handle.
432 * @param fFlag The flag to set.
433 */
434#define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
435
436/** @def VM_FF_CLEAR
437 * Clears a force action flag.
438 *
439 * @param pVM VM Handle.
440 * @param fFlag The flag to clear.
441 */
442#if 1
443# define VM_FF_CLEAR(pVM, fFlag) ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
444#else
445# define VM_FF_CLEAR(pVM, fFlag) \
446 do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
447 RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
448 } while (0)
449#endif
450
451/** @def VMCPU_FF_CLEAR
452 * Clears a force action flag for the given VCPU.
453 *
454 * @param pVCpu VMCPU Handle.
455 * @param fFlag The flag to clear.
456 */
457#define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
458
459/** @def VM_FF_ISSET
460 * Checks if a force action flag is set.
461 *
462 * @param pVM VM Handle.
463 * @param fFlag The flag to check.
464 */
465#define VM_FF_ISSET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
466
467/** @def VMCPU_FF_ISSET
468 * Checks if a force action flag is set for the given VCPU.
469 *
470 * @param pVCpu VMCPU Handle.
471 * @param fFlag The flag to check.
472 */
473#define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
474
475/** @def VM_FF_ISPENDING
476 * Checks if one or more force action in the specified set is pending.
477 *
478 * @param pVM VM Handle.
479 * @param fFlags The flags to check for.
480 */
481#define VM_FF_ISPENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags))
482
483/** @def VM_FF_TESTANDCLEAR
484 * Checks if one (!) force action in the specified set is pending and clears it atomically
485 *
486 * @returns true if the bit was set.
487 * @returns false if the bit was clear.
488 * @param pVM VM Handle.
489 * @param iBit Bit position to check and clear
490 */
491#define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))
492
493/** @def VMCPU_FF_TESTANDCLEAR
494 * Checks if one (!) force action in the specified set is pending and clears it atomically
495 *
496 * @returns true if the bit was set.
497 * @returns false if the bit was clear.
498 * @param pVCpu VMCPU Handle.
499 * @param iBit Bit position to check and clear
500 */
501#define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))
502
503/** @def VMCPU_FF_ISPENDING
504 * Checks if one or more force action in the specified set is pending for the given VCPU.
505 *
506 * @param pVCpu VMCPU Handle.
507 * @param fFlags The flags to check for.
508 */
509#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
510
511/** @def VM_FF_ISPENDING
512 * Checks if one or more force action in the specified set is pending while one
513 * or more other ones are not.
514 *
515 * @param pVM VM Handle.
516 * @param fFlags The flags to check for.
517 * @param fExcpt The flags that should not be set.
518 */
519#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt) ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
520
521/** @def VMCPU_FF_IS_PENDING_EXCEPT
522 * Checks if one or more force action in the specified set is pending for the given
523 * VCPU while one or more other ones are not.
524 *
525 * @param pVCpu VMCPU Handle.
526 * @param fFlags The flags to check for.
527 * @param fExcpt The flags that should not be set.
528 */
529#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
530
531/** @def VM_IS_EMT
532 * Checks if the current thread is the emulation thread (EMT).
533 *
534 * @remark The ring-0 variation will need attention if we expand the ring-0
535 * code to let threads other than EMT mess around with the VM.
536 */
537#ifdef IN_RC
538# define VM_IS_EMT(pVM) true
539#else
540# define VM_IS_EMT(pVM) (VMMGetCpu(pVM) != NULL)
541#endif
542
543/** @def VMCPU_IS_EMT
544 * Checks if the current thread is the emulation thread (EMT) for the specified
545 * virtual CPU.
546 */
547#ifdef IN_RC
548# define VMCPU_IS_EMT(pVCpu) true
549#else
550# define VMCPU_IS_EMT(pVCpu) ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
551#endif
552
553/** @def VM_ASSERT_EMT
554 * Asserts that the current thread IS the emulation thread (EMT).
555 */
556#ifdef IN_RC
557# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
558#elif defined(IN_RING0)
559# define VM_ASSERT_EMT(pVM) Assert(VM_IS_EMT(pVM))
560#else
561# define VM_ASSERT_EMT(pVM) \
562 AssertMsg(VM_IS_EMT(pVM), \
563 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
564#endif
565
566/** @def VMCPU_ASSERT_EMT
567 * Asserts that the current thread IS the emulation thread (EMT) of the
568 * specified virtual CPU.
569 */
570#ifdef IN_RC
571# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
572#elif defined(IN_RING0)
573# define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu))
574#else
575# define VMCPU_ASSERT_EMT(pVCpu) \
576 AssertMsg(VMCPU_IS_EMT(pVCpu), \
577 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
578 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
579#endif
580
581/** @def VM_ASSERT_EMT_RETURN
582 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
583 */
584#ifdef IN_RC
585# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
586#elif defined(IN_RING0)
587# define VM_ASSERT_EMT_RETURN(pVM, rc) AssertReturn(VM_IS_EMT(pVM), (rc))
588#else
589# define VM_ASSERT_EMT_RETURN(pVM, rc) \
590 AssertMsgReturn(VM_IS_EMT(pVM), \
591 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
592 (rc))
593#endif
594
595/** @def VMCPU_ASSERT_EMT_RETURN
596 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
597 */
598#ifdef IN_RC
599# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
600#elif defined(IN_RING0)
601# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
602#else
603# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
604 AssertMsg(VMCPU_IS_EMT(pVCpu), \
605 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
606 RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
607 (rc))
608#endif
609
610/** @def VM_ASSERT_EMT0
611 * Asserts that the current thread IS emulation thread \#0 (EMT0).
612 */
613#define VM_ASSERT_EMT0(pVM) VMCPU_ASSERT_EMT(&(pVM)->aCpus[0])
614
615/** @def VM_ASSERT_EMT0_RETURN
616 * Asserts that the current thread IS emulation thread \#0 (EMT0) and returns if
617 * it isn't.
618 */
619#define VM_ASSERT_EMT0_RETURN(pVM, rc) VMCPU_ASSERT_EMT_RETURN(&(pVM)->aCpus[0], (rc))
620
621
622/**
623 * Asserts that the current thread is NOT the emulation thread.
624 */
625#define VM_ASSERT_OTHER_THREAD(pVM) \
626 AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))
627
628
629/** @def VM_ASSERT_STATE_RETURN
630 * Asserts a certain VM state.
631 */
632#define VM_ASSERT_STATE(pVM, _enmState) \
633 AssertMsg((pVM)->enmVMState == (_enmState), \
634 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))
635
636/** @def VM_ASSERT_STATE_RETURN
637 * Asserts a certain VM state and returns if it doesn't match.
638 */
639#define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
640 AssertMsgReturn((pVM)->enmVMState == (_enmState), \
641 ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
642 (rc))
643
644/** @def VM_ASSERT_VALID_EXT_RETURN
645 * Asserts a the VM handle is valid for external access, i.e. not being
646 * destroy or terminated.
647 */
648#define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
649 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
650 && ( (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
651 || ( (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
652 && VM_IS_EMT(pVM))), \
653 ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
654 ? VMGetStateName(pVM->enmVMState) : ""), \
655 (rc))
656
657/** @def VMCPU_ASSERT_VALID_EXT_RETURN
658 * Asserts a the VMCPU handle is valid for external access, i.e. not being
659 * destroy or terminated.
660 */
661#define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
662 AssertMsgReturn( RT_VALID_ALIGNED_PTR(pVCpu, 64) \
663 && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
664 && (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
665 ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
666 RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
667 ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
668 (rc))
669
670
671/** This is the VM structure.
672 *
673 * It contains (nearly?) all the VM data which have to be available in all
674 * contexts. Even if it contains all the data the idea is to use APIs not
675 * to modify all the members all around the place. Therefore we make use of
676 * unions to hide everything which isn't local to the current source module.
677 * This means we'll have to pay a little bit of attention when adding new
678 * members to structures in the unions and make sure to keep the padding sizes
679 * up to date.
680 *
681 * Run tstVMStructSize after update!
682 */
683typedef struct VM
684{
685 /** The state of the VM.
686 * This field is read only to everyone except the VM and EM. */
687 VMSTATE volatile enmVMState;
688 /** Forced action flags.
689 * See the VM_FF_* \#defines. Updated atomically.
690 */
691 volatile uint32_t fGlobalForcedActions;
692 /** Pointer to the array of page descriptors for the VM structure allocation. */
693 R3PTRTYPE(PSUPPAGE) paVMPagesR3;
694 /** Session handle. For use when calling SUPR0 APIs. */
695 PSUPDRVSESSION pSession;
696 /** Pointer to the ring-3 VM structure. */
697 PUVM pUVM;
698 /** Ring-3 Host Context VM Pointer. */
699 R3PTRTYPE(struct VM *) pVMR3;
700 /** Ring-0 Host Context VM Pointer. */
701 R0PTRTYPE(struct VM *) pVMR0;
702 /** Raw-mode Context VM Pointer. */
703 RCPTRTYPE(struct VM *) pVMRC;
704
705 /** The GVM VM handle. Only the GVM should modify this field. */
706 uint32_t hSelf;
707 /** Number of virtual CPUs. */
708 uint32_t cCpus;
709 /** CPU priority (1-100) */
710 uint32_t uCpuPriority;
711
712 /** Size of the VM structure including the VMCPU array. */
713 uint32_t cbSelf;
714
715 /** Offset to the VMCPU array starting from beginning of this structure. */
716 uint32_t offVMCPU;
717
718 /** Reserved; alignment. */
719 uint32_t u32Reserved[5];
720
721 /** @name Public VMM Switcher APIs
722 * @{ */
723 /**
724 * Assembly switch entry point for returning to host context.
725 * This function will clean up the stack frame.
726 *
727 * @param eax The return code, register.
728 * @param Ctx The guest core context.
729 * @remark Assume interrupts disabled.
730 */
731 RTRCPTR pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;
732
733 /**
734 * Assembly switch entry point for returning to host context.
735 *
736 * This is an alternative entry point which we'll be using when the we have the
737 * hypervisor context and need to save that before going to the host.
738 *
739 * This is typically useful when abandoning the hypervisor because of a trap
740 * and want the trap state to be saved.
741 *
742 * @param eax The return code, register.
743 * @param ecx Pointer to the hypervisor core context, register.
744 * @remark Assume interrupts disabled.
745 */
746 RTRCPTR pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;
747
748 /**
749 * Assembly switch entry point for returning to host context.
750 *
751 * This is an alternative to the two *Ctx APIs and implies that the context has already
752 * been saved, or that it's just a brief return to HC and that the caller intends to resume
753 * whatever it is doing upon 'return' from this call.
754 *
755 * @param eax The return code, register.
756 * @remark Assume interrupts disabled.
757 */
758 RTRCPTR pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
759 /** @} */
760
761
762 /** @name Various VM data owned by VM.
763 * @{ */
764 RTTHREAD uPadding1;
765 /** The native handle of ThreadEMT. Getting the native handle
766 * is generally faster than getting the IPRT one (except on OS/2 :-). */
767 RTNATIVETHREAD uPadding2;
768 /** @} */
769
770
771 /** @name Various items that are frequently accessed.
772 * @{ */
773 /** Raw ring-3 indicator. */
774 bool fRawR3Enabled;
775 /** Raw ring-0 indicator. */
776 bool fRawR0Enabled;
777 /** PATM enabled flag.
778 * This is placed here for performance reasons. */
779 bool fPATMEnabled;
780 /** CSAM enabled flag.
781 * This is placed here for performance reasons. */
782 bool fCSAMEnabled;
783 /** Hardware VM support is available and enabled.
784 * This is placed here for performance reasons. */
785 bool fHWACCMEnabled;
786 /** Hardware VM support is required and non-optional.
787 * This is initialized together with the rest of the VM structure. */
788 bool fHwVirtExtForced;
789 /** PARAV enabled flag. */
790 bool fPARAVEnabled;
791 /** Large page enabled flag. */
792 bool fUseLargePages;
793 /** @} */
794
795
796 /* padding to make gnuc put the StatQemuToGC where msc does. */
797#if HC_ARCH_BITS == 32
798 uint32_t padding0;
799#endif
800
801 /** Profiling the total time from Qemu to GC. */
802 STAMPROFILEADV StatTotalQemuToGC;
803 /** Profiling the total time from GC to Qemu. */
804 STAMPROFILEADV StatTotalGCToQemu;
805 /** Profiling the total time spent in GC. */
806 STAMPROFILEADV StatTotalInGC;
807 /** Profiling the total time spent not in Qemu. */
808 STAMPROFILEADV StatTotalInQemu;
809 /** Profiling the VMMSwitcher code for going to GC. */
810 STAMPROFILEADV StatSwitcherToGC;
811 /** Profiling the VMMSwitcher code for going to HC. */
812 STAMPROFILEADV StatSwitcherToHC;
813 STAMPROFILEADV StatSwitcherSaveRegs;
814 STAMPROFILEADV StatSwitcherSysEnter;
815 STAMPROFILEADV StatSwitcherDebug;
816 STAMPROFILEADV StatSwitcherCR0;
817 STAMPROFILEADV StatSwitcherCR4;
818 STAMPROFILEADV StatSwitcherJmpCR3;
819 STAMPROFILEADV StatSwitcherRstrRegs;
820 STAMPROFILEADV StatSwitcherLgdt;
821 STAMPROFILEADV StatSwitcherLidt;
822 STAMPROFILEADV StatSwitcherLldt;
823 STAMPROFILEADV StatSwitcherTSS;
824
825 /** Padding - the unions must be aligned on a 64 bytes boundrary and the unions
826 * must start at the same offset on both 64-bit and 32-bit hosts. */
827 uint8_t abAlignment1[HC_ARCH_BITS == 32 ? 48 : 24];
828
829 /** CPUM part. */
830 union
831 {
832#ifdef ___CPUMInternal_h
833 struct CPUM s;
834#endif
835 uint8_t padding[1472]; /* multiple of 64 */
836 } cpum;
837
838 /** VMM part. */
839 union
840 {
841#ifdef ___VMMInternal_h
842 struct VMM s;
843#endif
844 uint8_t padding[1536]; /* multiple of 64 */
845 } vmm;
846
847 /** PGM part. */
848 union
849 {
850#ifdef ___PGMInternal_h
851 struct PGM s;
852#endif
853 uint8_t padding[4096*2+6080]; /* multiple of 64 */
854 } pgm;
855
856 /** HWACCM part. */
857 union
858 {
859#ifdef ___HWACCMInternal_h
860 struct HWACCM s;
861#endif
862 uint8_t padding[5376]; /* multiple of 64 */
863 } hwaccm;
864
865 /** TRPM part. */
866 union
867 {
868#ifdef ___TRPMInternal_h
869 struct TRPM s;
870#endif
871 uint8_t padding[5184]; /* multiple of 64 */
872 } trpm;
873
874 /** SELM part. */
875 union
876 {
877#ifdef ___SELMInternal_h
878 struct SELM s;
879#endif
880 uint8_t padding[576]; /* multiple of 64 */
881 } selm;
882
883 /** MM part. */
884 union
885 {
886#ifdef ___MMInternal_h
887 struct MM s;
888#endif
889 uint8_t padding[192]; /* multiple of 64 */
890 } mm;
891
892 /** PDM part. */
893 union
894 {
895#ifdef ___PDMInternal_h
896 struct PDM s;
897#endif
898 uint8_t padding[1600]; /* multiple of 64 */
899 } pdm;
900
901 /** IOM part. */
902 union
903 {
904#ifdef ___IOMInternal_h
905 struct IOM s;
906#endif
907 uint8_t padding[832]; /* multiple of 64 */
908 } iom;
909
910 /** PATM part. */
911 union
912 {
913#ifdef ___PATMInternal_h
914 struct PATM s;
915#endif
916 uint8_t padding[768]; /* multiple of 64 */
917 } patm;
918
919 /** CSAM part. */
920 union
921 {
922#ifdef ___CSAMInternal_h
923 struct CSAM s;
924#endif
925 uint8_t padding[1088]; /* multiple of 64 */
926 } csam;
927
928 /** EM part. */
929 union
930 {
931#ifdef ___EMInternal_h
932 struct EM s;
933#endif
934 uint8_t padding[256]; /* multiple of 64 */
935 } em;
936
937 /** TM part. */
938 union
939 {
940#ifdef ___TMInternal_h
941 struct TM s;
942#endif
943 uint8_t padding[2176]; /* multiple of 64 */
944 } tm;
945
946 /** DBGF part. */
947 union
948 {
949#ifdef ___DBGFInternal_h
950 struct DBGF s;
951#endif
952 uint8_t padding[2368]; /* multiple of 64 */
953 } dbgf;
954
955 /** SSM part. */
956 union
957 {
958#ifdef ___SSMInternal_h
959 struct SSM s;
960#endif
961 uint8_t padding[128]; /* multiple of 64 */
962 } ssm;
963
964 /** REM part. */
965 union
966 {
967#ifdef ___REMInternal_h
968 struct REM s;
969#endif
970 uint8_t padding[0x11100]; /* multiple of 64 */
971 } rem;
972
973 /* ---- begin small stuff ---- */
974
975 /** VM part. */
976 union
977 {
978#ifdef ___VMInternal_h
979 struct VMINT s;
980#endif
981 uint8_t padding[24]; /* multiple of 8 */
982 } vm;
983
984 /** CFGM part. */
985 union
986 {
987#ifdef ___CFGMInternal_h
988 struct CFGM s;
989#endif
990 uint8_t padding[8]; /* multiple of 8 */
991 } cfgm;
992
993 /** PARAV part. */
994 union
995 {
996#ifdef ___PARAVInternal_h
997 struct PARAV s;
998#endif
999 uint8_t padding[24]; /* multiple of 8 */
1000 } parav;
1001
1002 /** Padding for aligning the cpu array on a page boundrary. */
1003 uint8_t abAlignment2[1992];
1004
1005 /* ---- end small stuff ---- */
1006
1007 /** VMCPU array for the configured number of virtual CPUs.
1008 * Must be aligned on a page boundrary for TLB hit reasons as well as
1009 * alignment of VMCPU members. */
1010 VMCPU aCpus[1];
1011} VM;
1012
1013
1014#ifdef IN_RC
1015RT_C_DECLS_BEGIN
1016
1017/** The VM structure.
1018 * This is imported from the VMMGCBuiltin module, i.e. it's a one
1019 * of those magic globals which we should avoid using.
1020 */
1021extern DECLIMPORT(VM) g_VM;
1022
1023RT_C_DECLS_END
1024#endif
1025
1026/** @} */
1027
1028#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette