VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 66227

Last change on this file since 66227 was 66227, checked in by vboxsync, 8 years ago

VMM: Nested Hw.virt: Implement SVM VMRUN and #VMEXIT in IEM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 44.1 KB
Line 
1/* $Id: HMInternal.h 66227 2017-03-23 14:50:07Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <VBox/vmm/trpm.h>
31#include <iprt/memobj.h>
32#include <iprt/cpuset.h>
33#include <iprt/mp.h>
34#include <iprt/avl.h>
35#include <iprt/string.h>
36
37#if defined(RT_OS_DARWIN) && HC_ARCH_BITS == 32
38# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
39#endif
40
41#if HC_ARCH_BITS == 64 || defined (VBOX_WITH_64_BITS_GUESTS)
42/* Enable 64 bits guest support. */
43# define VBOX_ENABLE_64_BITS_GUESTS
44#endif
45
46#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
47# define VMX_USE_CACHED_VMCS_ACCESSES
48#endif
49
50/** @def HM_PROFILE_EXIT_DISPATCH
51 * Enables profiling of the VM exit handler dispatching. */
52#if 0 || defined(DOXYGEN_RUNNING)
53# define HM_PROFILE_EXIT_DISPATCH
54#endif
55
56RT_C_DECLS_BEGIN
57
58
59/** @defgroup grp_hm_int Internal
60 * @ingroup grp_hm
61 * @internal
62 * @{
63 */
64
65/** @def HMCPU_CF_CLEAR
66 * Clears a HM-context flag.
67 *
68 * @param pVCpu The cross context virtual CPU structure.
69 * @param fFlag The flag to clear.
70 */
71#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
72
73/** @def HMCPU_CF_SET
74 * Sets a HM-context flag.
75 *
76 * @param pVCpu The cross context virtual CPU structure.
77 * @param fFlag The flag to set.
78 */
79#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
80
81/** @def HMCPU_CF_IS_SET
82 * Checks if all the flags in the specified HM-context set is pending.
83 *
84 * @param pVCpu The cross context virtual CPU structure.
85 * @param fFlag The flag to check.
86 */
87#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
88
89/** @def HMCPU_CF_IS_PENDING
90 * Checks if one or more of the flags in the specified HM-context set is
91 * pending.
92 *
93 * @param pVCpu The cross context virtual CPU structure.
94 * @param fFlags The flags to check for.
95 */
96#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
97
98/** @def HMCPU_CF_IS_PENDING_ONLY
99 * Checks if -only- one or more of the specified HM-context flags is pending.
100 *
101 * @param pVCpu The cross context virtual CPU structure.
102 * @param fFlags The flags to check for.
103 */
104#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
105
106/** @def HMCPU_CF_IS_SET_ONLY
107 * Checks if -only- all the flags in the specified HM-context set is pending.
108 *
109 * @param pVCpu The cross context virtual CPU structure.
110 * @param fFlags The flags to check for.
111 */
112#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
113
114/** @def HMCPU_CF_RESET_TO
115 * Resets the HM-context flags to the specified value.
116 *
117 * @param pVCpu The cross context virtual CPU structure.
118 * @param fFlags The new value.
119 */
120#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
121
122/** @def HMCPU_CF_VALUE
123 * Returns the current HM-context flags value.
124 *
125 * @param pVCpu The cross context virtual CPU structure.
126 */
127#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
128
129
130/** Resets/initializes the VM-exit/\#VMEXIT history array. */
131#define HMCPU_EXIT_HISTORY_RESET(pVCpu) (memset(&(pVCpu)->hm.s.auExitHistory, 0xff, sizeof((pVCpu)->hm.s.auExitHistory)))
132
133/** Updates the VM-exit/\#VMEXIT history array. */
134#define HMCPU_EXIT_HISTORY_ADD(pVCpu, a_ExitReason) \
135 do { \
136 AssertMsg((pVCpu)->hm.s.idxExitHistoryFree < RT_ELEMENTS((pVCpu)->hm.s.auExitHistory), ("%u\n", (pVCpu)->hm.s.idxExitHistoryFree)); \
137 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree++] = (uint16_t)(a_ExitReason); \
138 if ((pVCpu)->hm.s.idxExitHistoryFree == RT_ELEMENTS((pVCpu)->hm.s.auExitHistory)) \
139 (pVCpu)->hm.s.idxExitHistoryFree = 0; \
140 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree] = UINT16_MAX; \
141 } while (0)
142
143/** Maximum number of exit reason statistics counters. */
144#define MAX_EXITREASON_STAT 0x100
145#define MASK_EXITREASON_STAT 0xff
146#define MASK_INJECT_IRQ_STAT 0xff
147
148/** @name HM changed flags.
149 * These flags are used to keep track of which important registers that have
150 * been changed since last they were reset.
151 *
152 * Flags marked "shared" are used for registers that are common to both the host
153 * and guest (i.e. without dedicated VMCS/VMCB fields for guest bits).
154 *
155 * @{
156 */
157#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
158#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
159#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
160#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
161#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
162#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
163#define HM_CHANGED_GUEST_TR RT_BIT(6)
164#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
165#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
166#define HM_CHANGED_GUEST_RIP RT_BIT(9)
167#define HM_CHANGED_GUEST_RSP RT_BIT(10)
168#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
169#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
170#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
171#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
172#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
173#define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16)
174#define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */ /** @todo Move this to VT-x specific? */
175#define HM_CHANGED_GUEST_XCPT_INTERCEPTS RT_BIT(18)
176/* VT-x specific state. */
177#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(19)
178#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(20)
179#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(21)
180#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(22)
181#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(23)
182/* AMD-V specific state. */
183#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(19)
184#define HM_CHANGED_SVM_RESERVED1 RT_BIT(20)
185#define HM_CHANGED_SVM_RESERVED2 RT_BIT(21)
186#define HM_CHANGED_SVM_RESERVED3 RT_BIT(22)
187#define HM_CHANGED_SVM_RESERVED4 RT_BIT(23)
188
189#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
190 | HM_CHANGED_GUEST_CR3 \
191 | HM_CHANGED_GUEST_CR4 \
192 | HM_CHANGED_GUEST_GDTR \
193 | HM_CHANGED_GUEST_IDTR \
194 | HM_CHANGED_GUEST_LDTR \
195 | HM_CHANGED_GUEST_TR \
196 | HM_CHANGED_GUEST_SEGMENT_REGS \
197 | HM_CHANGED_GUEST_DEBUG \
198 | HM_CHANGED_GUEST_RIP \
199 | HM_CHANGED_GUEST_RSP \
200 | HM_CHANGED_GUEST_RFLAGS \
201 | HM_CHANGED_GUEST_CR2 \
202 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
203 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
204 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
205 | HM_CHANGED_GUEST_EFER_MSR \
206 | HM_CHANGED_GUEST_LAZY_MSRS \
207 | HM_CHANGED_GUEST_XCPT_INTERCEPTS \
208 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
209 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
210 | HM_CHANGED_VMX_GUEST_APIC_STATE \
211 | HM_CHANGED_VMX_ENTRY_CTLS \
212 | HM_CHANGED_VMX_EXIT_CTLS)
213
214#define HM_CHANGED_HOST_CONTEXT RT_BIT(24)
215
216/* Bits shared between host and guest. */
217#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
218 | HM_CHANGED_GUEST_DEBUG \
219 | HM_CHANGED_GUEST_LAZY_MSRS)
220/** @} */
221
222/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
223#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
224/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
225#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
226/** Total guest mapped memory needed. */
227#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
228
229
230/** @name Macros for enabling and disabling preemption.
231 * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
232 * preemption has already been disabled when there is no context hook.
233 * @{ */
234#ifdef VBOX_STRICT
235# define HM_DISABLE_PREEMPT() \
236 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
237 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled(pVCpu)); \
238 RTThreadPreemptDisable(&PreemptStateInternal)
239#else
240# define HM_DISABLE_PREEMPT() \
241 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
242 RTThreadPreemptDisable(&PreemptStateInternal)
243#endif /* VBOX_STRICT */
244#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
245/** @} */
246
247
248/** Enable for TPR guest patching. */
249#define VBOX_HM_WITH_GUEST_PATCHING
250
251/** @name HM saved state versions
252 * @{
253 */
254#ifdef VBOX_HM_WITH_GUEST_PATCHING
255# define HM_SAVED_STATE_VERSION 5
256# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
257#else
258# define HM_SAVED_STATE_VERSION 4
259# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
260#endif
261#define HM_SAVED_STATE_VERSION_2_0_X 3
262/** @} */
263
264/**
265 * Global per-cpu information. (host)
266 */
267typedef struct HMGLOBALCPUINFO
268{
269 /** The CPU ID. */
270 RTCPUID idCpu;
271 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
272 RTR0MEMOBJ hMemObj;
273 /** The physical address of the first page in hMemObj (it's a
274 * physcially contigous allocation if it spans multiple pages). */
275 RTHCPHYS HCPhysMemObj;
276 /** The address of the memory (for pfnEnable). */
277 void *pvMemObj;
278 /** Current ASID (AMD-V) / VPID (Intel). */
279 uint32_t uCurrentAsid;
280 /** TLB flush count. */
281 uint32_t cTlbFlushes;
282 /** Whether to flush each new ASID/VPID before use. */
283 bool fFlushAsidBeforeUse;
284 /** Configured for VT-x or AMD-V. */
285 bool fConfigured;
286 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
287 bool fIgnoreAMDVInUseError;
288 /** In use by our code. (for power suspend) */
289 volatile bool fInUse;
290} HMGLOBALCPUINFO;
291/** Pointer to the per-cpu global information. */
292typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
293
294typedef enum
295{
296 HMPENDINGIO_INVALID = 0,
297 HMPENDINGIO_PORT_READ,
298 /* not implemented: HMPENDINGIO_STRING_READ, */
299 /* not implemented: HMPENDINGIO_STRING_WRITE, */
300 /** The usual 32-bit paranoia. */
301 HMPENDINGIO_32BIT_HACK = 0x7fffffff
302} HMPENDINGIO;
303
304
305typedef enum
306{
307 HMTPRINSTR_INVALID,
308 HMTPRINSTR_READ,
309 HMTPRINSTR_READ_SHR4,
310 HMTPRINSTR_WRITE_REG,
311 HMTPRINSTR_WRITE_IMM,
312 HMTPRINSTR_JUMP_REPLACEMENT,
313 /** The usual 32-bit paranoia. */
314 HMTPRINSTR_32BIT_HACK = 0x7fffffff
315} HMTPRINSTR;
316
317typedef struct
318{
319 /** The key is the address of patched instruction. (32 bits GC ptr) */
320 AVLOU32NODECORE Core;
321 /** Original opcode. */
322 uint8_t aOpcode[16];
323 /** Instruction size. */
324 uint32_t cbOp;
325 /** Replacement opcode. */
326 uint8_t aNewOpcode[16];
327 /** Replacement instruction size. */
328 uint32_t cbNewOp;
329 /** Instruction type. */
330 HMTPRINSTR enmType;
331 /** Source operand. */
332 uint32_t uSrcOperand;
333 /** Destination operand. */
334 uint32_t uDstOperand;
335 /** Number of times the instruction caused a fault. */
336 uint32_t cFaults;
337 /** Patch address of the jump replacement. */
338 RTGCPTR32 pJumpTarget;
339} HMTPRPATCH;
340/** Pointer to HMTPRPATCH. */
341typedef HMTPRPATCH *PHMTPRPATCH;
342
343
344/**
345 * Makes a HMEXITSTAT::uKey value from a program counter and an exit code.
346 *
347 * @returns 64-bit key
348 * @param a_uPC The RIP + CS.BASE value of the exit.
349 * @param a_uExit The exit code.
350 * @todo Add CPL?
351 */
352#define HMEXITSTAT_MAKE_KEY(a_uPC, a_uExit) (((a_uPC) & UINT64_C(0x0000ffffffffffff)) | (uint64_t)(a_uExit) << 48)
353
354typedef struct HMEXITINFO
355{
356 /** See HMEXITSTAT_MAKE_KEY(). */
357 uint64_t uKey;
358 /** Number of recent hits (depreciates with time). */
359 uint32_t volatile cHits;
360 /** The age + lock. */
361 uint16_t volatile uAge;
362 /** Action or action table index. */
363 uint16_t iAction;
364} HMEXITINFO;
365AssertCompileSize(HMEXITINFO, 16); /* Lots of these guys, so don't add any unnecessary stuff! */
366
367typedef struct HMEXITHISTORY
368{
369 /** The exit timestamp. */
370 uint64_t uTscExit;
371 /** The index of the corresponding HMEXITINFO entry.
372 * UINT32_MAX if none (too many collisions, race, whatever). */
373 uint32_t iExitInfo;
374 /** Figure out later, needed for padding now. */
375 uint32_t uSomeClueOrSomething;
376} HMEXITHISTORY;
377
378/**
379 * Switcher function, HC to the special 64-bit RC.
380 *
381 * @param pVM The cross context VM structure.
382 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
383 * @returns Return code indicating the action to take.
384 */
385typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
386/** Pointer to switcher function. */
387typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
388
389/**
390 * HM VM Instance data.
391 * Changes to this must checked against the padding of the hm union in VM!
392 */
393typedef struct HM
394{
395 /** Set when we've initialized VMX or SVM. */
396 bool fInitialized;
397 /** Set if nested paging is enabled. */
398 bool fNestedPaging;
399 /** Set if nested paging is allowed. */
400 bool fAllowNestedPaging;
401 /** Set if large pages are enabled (requires nested paging). */
402 bool fLargePages;
403 /** Set if we can support 64-bit guests or not. */
404 bool fAllow64BitGuests;
405 /** Set when TPR patching is allowed. */
406 bool fTprPatchingAllowed;
407 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
408 bool fGlobalInit;
409 /** Set when TPR patching is active. */
410 bool fTPRPatchingActive;
411 /** Set when the debug facility has breakpoints/events enabled that requires
412 * us to use the debug execution loop in ring-0. */
413 bool fUseDebugLoop;
414 /** Set if hardware APIC virtualization is enabled. */
415 bool fVirtApicRegs;
416 /** Set if posted interrupt processing is enabled. */
417 bool fPostedIntrs;
418 /** Alignment. */
419 bool fAlignment0;
420
421 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
422 uint32_t fHostKernelFeatures;
423
424 /** Maximum ASID allowed. */
425 uint32_t uMaxAsid;
426 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
427 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
428 uint32_t cMaxResumeLoops;
429
430 /** Guest allocated memory for patching purposes. */
431 RTGCPTR pGuestPatchMem;
432 /** Current free pointer inside the patch block. */
433 RTGCPTR pFreeGuestPatchMem;
434 /** Size of the guest patch memory block. */
435 uint32_t cbGuestPatchMem;
436 uint32_t u32Alignment0;
437
438#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
439 /** 32 to 64 bits switcher entrypoint. */
440 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
441 RTR0PTR pvR0Alignment0;
442#endif
443
444 struct
445 {
446 /** Set by the ring-0 side of HM to indicate VMX is supported by the
447 * CPU. */
448 bool fSupported;
449 /** Set when we've enabled VMX. */
450 bool fEnabled;
451 /** Set if VPID is supported. */
452 bool fVpid;
453 /** Set if VT-x VPID is allowed. */
454 bool fAllowVpid;
455 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
456 bool fUnrestrictedGuest;
457 /** Set if unrestricted guest execution is allowed to be used. */
458 bool fAllowUnrestricted;
459 /** Whether we're using the preemption timer or not. */
460 bool fUsePreemptTimer;
461 /** The shift mask employed by the VMX-Preemption timer. */
462 uint8_t cPreemptTimerShift;
463
464 /** Virtual address of the TSS page used for real mode emulation. */
465 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
466 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
467 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
468
469 /** Physical address of the APIC-access page. */
470 RTHCPHYS HCPhysApicAccess;
471 /** R0 memory object for the APIC-access page. */
472 RTR0MEMOBJ hMemObjApicAccess;
473 /** Virtual address of the APIC-access page. */
474 R0PTRTYPE(uint8_t *) pbApicAccess;
475
476#ifdef VBOX_WITH_CRASHDUMP_MAGIC
477 RTHCPHYS HCPhysScratch;
478 RTR0MEMOBJ hMemObjScratch;
479 R0PTRTYPE(uint8_t *) pbScratch;
480#endif
481
482 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
483 uint32_t uFlushTaggedTlb;
484
485 /** Pause-loop exiting (PLE) gap in ticks. */
486 uint32_t cPleGapTicks;
487 /** Pause-loop exiting (PLE) window in ticks. */
488 uint32_t cPleWindowTicks;
489 uint32_t u32Alignment0;
490
491 /** Host CR4 value (set by ring-0 VMX init) */
492 uint64_t u64HostCr4;
493 /** Host SMM monitor control (set by ring-0 VMX init) */
494 uint64_t u64HostSmmMonitorCtl;
495 /** Host EFER value (set by ring-0 VMX init) */
496 uint64_t u64HostEfer;
497 /** Whether the CPU supports VMCS fields for swapping EFER. */
498 bool fSupportsVmcsEfer;
499 uint8_t u8Alignment2[7];
500
501 /** VMX MSR values. */
502 VMXMSRS Msrs;
503
504 /** Flush types for invept & invvpid; they depend on capabilities. */
505 VMXFLUSHEPT enmFlushEpt;
506 VMXFLUSHVPID enmFlushVpid;
507
508 /** Host-physical address for a failing VMXON instruction. */
509 RTHCPHYS HCPhysVmxEnableError;
510 } vmx;
511
512 struct
513 {
514 /** Set by the ring-0 side of HM to indicate SVM is supported by the
515 * CPU. */
516 bool fSupported;
517 /** Set when we've enabled SVM. */
518 bool fEnabled;
519 /** Set if erratum 170 affects the AMD cpu. */
520 bool fAlwaysFlushTLB;
521 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
522 bool fIgnoreInUseError;
523 uint8_t u8Alignment0[4];
524
525 /** Physical address of the IO bitmap (12kb). */
526 RTHCPHYS HCPhysIOBitmap;
527 /** R0 memory object for the IO bitmap (12kb). */
528 RTR0MEMOBJ hMemObjIOBitmap;
529 /** Virtual address of the IO bitmap. */
530 R0PTRTYPE(void *) pvIOBitmap;
531
532 /* HWCR MSR (for diagnostics) */
533 uint64_t u64MsrHwcr;
534
535 /** SVM revision. */
536 uint32_t u32Rev;
537 /** SVM feature bits from cpuid 0x8000000a */
538 uint32_t u32Features;
539
540 /** Pause filter counter. */
541 uint16_t cPauseFilter;
542 /** Pause filter treshold in ticks. */
543 uint16_t cPauseFilterThresholdTicks;
544 uint32_t u32Alignment0;
545 } svm;
546
547 /**
548 * AVL tree with all patches (active or disabled) sorted by guest instruction
549 * address.
550 */
551 AVLOU32TREE PatchTree;
552 uint32_t cPatches;
553 HMTPRPATCH aPatches[64];
554
555 struct
556 {
557 uint32_t u32AMDFeatureECX;
558 uint32_t u32AMDFeatureEDX;
559 } cpuid;
560
561 /** Saved error from detection */
562 int32_t lLastError;
563
564 /** HMR0Init was run */
565 bool fHMR0Init;
566 bool u8Alignment1[3];
567
568 STAMCOUNTER StatTprPatchSuccess;
569 STAMCOUNTER StatTprPatchFailure;
570 STAMCOUNTER StatTprReplaceSuccessCr8;
571 STAMCOUNTER StatTprReplaceSuccessVmc;
572 STAMCOUNTER StatTprReplaceFailure;
573} HM;
574/** Pointer to HM VM instance data. */
575typedef HM *PHM;
576
577AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
578
579/* Maximum number of cached entries. */
580#define VMCSCACHE_MAX_ENTRY 128
581
582/**
583 * Structure for storing read and write VMCS actions.
584 */
585typedef struct VMCSCACHE
586{
587#ifdef VBOX_WITH_CRASHDUMP_MAGIC
588 /* Magic marker for searching in crash dumps. */
589 uint8_t aMagic[16];
590 uint64_t uMagic;
591 uint64_t u64TimeEntry;
592 uint64_t u64TimeSwitch;
593 uint64_t cResume;
594 uint64_t interPD;
595 uint64_t pSwitcher;
596 uint32_t uPos;
597 uint32_t idCpu;
598#endif
599 /* CR2 is saved here for EPT syncing. */
600 uint64_t cr2;
601 struct
602 {
603 uint32_t cValidEntries;
604 uint32_t uAlignment;
605 uint32_t aField[VMCSCACHE_MAX_ENTRY];
606 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
607 } Write;
608 struct
609 {
610 uint32_t cValidEntries;
611 uint32_t uAlignment;
612 uint32_t aField[VMCSCACHE_MAX_ENTRY];
613 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
614 } Read;
615#ifdef VBOX_STRICT
616 struct
617 {
618 RTHCPHYS HCPhysCpuPage;
619 RTHCPHYS HCPhysVmcs;
620 RTGCPTR pCache;
621 RTGCPTR pCtx;
622 } TestIn;
623 struct
624 {
625 RTHCPHYS HCPhysVmcs;
626 RTGCPTR pCache;
627 RTGCPTR pCtx;
628 uint64_t eflags;
629 uint64_t cr8;
630 } TestOut;
631 struct
632 {
633 uint64_t param1;
634 uint64_t param2;
635 uint64_t param3;
636 uint64_t param4;
637 } ScratchPad;
638#endif
639} VMCSCACHE;
640/** Pointer to VMCSCACHE. */
641typedef VMCSCACHE *PVMCSCACHE;
642AssertCompileSizeAlignment(VMCSCACHE, 8);
643
644/**
645 * VMX StartVM function.
646 *
647 * @returns VBox status code (no informational stuff).
648 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
649 * @param pCtx The CPU register context.
650 * @param pCache The VMCS cache.
651 * @param pVM Pointer to the cross context VM structure.
652 * @param pVCpu Pointer to the cross context per-CPU structure.
653 */
654typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
655/** Pointer to a VMX StartVM function. */
656typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
657
658/** SVM VMRun function. */
659typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
660/** Pointer to a SVM VMRun function. */
661typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
662
663/**
664 * HM VMCPU Instance data.
665 *
666 * Note! If you change members of this struct, make sure to check if the
667 * assembly counterpart in HMInternal.mac needs to be updated as well.
668 */
669typedef struct HMCPU
670{
671 /** Set if we need to flush the TLB during the world switch. */
672 bool fForceTLBFlush;
673 /** Set when we're using VT-x or AMD-V at that moment. */
674 bool fActive;
675 /** Set when the TLB has been checked until we return from the world switch. */
676 volatile bool fCheckedTLBFlush;
677 /** Whether we've completed the inner HM leave function. */
678 bool fLeaveDone;
679 /** Whether we're using the hyper DR7 or guest DR7. */
680 bool fUsingHyperDR7;
681 /** Whether to preload the guest-FPU state to avoid \#NM VM-exit overhead. */
682 bool fPreloadGuestFpu;
683 /** Set if XCR0 needs to be loaded and saved when entering and exiting guest
684 * code execution. */
685 bool fLoadSaveGuestXcr0;
686
687 /** Whether we should use the debug loop because of single stepping or special
688 * debug breakpoints / events are armed. */
689 bool fUseDebugLoop;
690 /** Whether we are currently executing in the debug loop.
691 * Mainly for assertions. */
692 bool fUsingDebugLoop;
693 /** Set if we using the debug loop and wish to intercept RDTSC. */
694 bool fDebugWantRdTscExit;
695 /** Whether we're executing a single instruction. */
696 bool fSingleInstruction;
697 /** Set if we need to clear the trap flag because of single stepping. */
698 bool fClearTrapFlag;
699
700 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
701 bool fGIMTrapXcptUD;
702 /** Whether paravirt. hypercalls are enabled. */
703 bool fHypercallsEnabled;
704 uint8_t u8Alignment0[2];
705
706 /** World switch exit counter. */
707 volatile uint32_t cWorldSwitchExits;
708 /** HM_CHANGED_* flags. */
709 volatile uint32_t fContextUseFlags;
710 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
711 * time). */
712 RTCPUID idLastCpu;
713 /** TLB flush count. */
714 uint32_t cTlbFlushes;
715 /** Current ASID in use by the VM. */
716 uint32_t uCurrentAsid;
717 /** An additional error code used for some gurus. */
718 uint32_t u32HMError;
719 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
720 uint64_t u64HostTscAux;
721
722 struct
723 {
724 /** Ring 0 handlers for VT-x. */
725 PFNHMVMXSTARTVM pfnStartVM;
726#if HC_ARCH_BITS == 32
727 uint32_t u32Alignment0;
728#endif
729 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
730 uint32_t u32PinCtls;
731 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
732 uint32_t u32ProcCtls;
733 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
734 uint32_t u32ProcCtls2;
735 /** Current VMX_VMCS32_CTRL_EXIT. */
736 uint32_t u32ExitCtls;
737 /** Current VMX_VMCS32_CTRL_ENTRY. */
738 uint32_t u32EntryCtls;
739
740 /** Current CR0 mask. */
741 uint32_t u32CR0Mask;
742 /** Current CR4 mask. */
743 uint32_t u32CR4Mask;
744 /** Current exception bitmap. */
745 uint32_t u32XcptBitmap;
746 /** The updated-guest-state mask. */
747 volatile uint32_t fUpdatedGuestState;
748 uint32_t u32Alignment1;
749
750 /** Physical address of the VM control structure (VMCS). */
751 RTHCPHYS HCPhysVmcs;
752 /** R0 memory object for the VM control structure (VMCS). */
753 RTR0MEMOBJ hMemObjVmcs;
754 /** Virtual address of the VM control structure (VMCS). */
755 R0PTRTYPE(void *) pvVmcs;
756
757 /** Physical address of the virtual APIC page for TPR caching. */
758 RTHCPHYS HCPhysVirtApic;
759 /** Padding. */
760 R0PTRTYPE(void *) pvAlignment0;
761 /** Virtual address of the virtual APIC page for TPR caching. */
762 R0PTRTYPE(uint8_t *) pbVirtApic;
763
764 /** Physical address of the MSR bitmap. */
765 RTHCPHYS HCPhysMsrBitmap;
766 /** R0 memory object for the MSR bitmap. */
767 RTR0MEMOBJ hMemObjMsrBitmap;
768 /** Virtual address of the MSR bitmap. */
769 R0PTRTYPE(void *) pvMsrBitmap;
770
771 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
772 * for guest MSRs). */
773 RTHCPHYS HCPhysGuestMsr;
774 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
775 * (used for guest MSRs). */
776 RTR0MEMOBJ hMemObjGuestMsr;
777 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
778 * for guest MSRs). */
779 R0PTRTYPE(void *) pvGuestMsr;
780
781 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
782 RTHCPHYS HCPhysHostMsr;
783 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
784 RTR0MEMOBJ hMemObjHostMsr;
785 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
786 R0PTRTYPE(void *) pvHostMsr;
787
788 /** Current EPTP. */
789 RTHCPHYS HCPhysEPTP;
790
791 /** Number of guest/host MSR pairs in the auto-load/store area. */
792 uint32_t cMsrs;
793 /** Whether the host MSR values are up-to-date in the auto-load/store area. */
794 bool fUpdatedHostMsrs;
795 uint8_t u8Alignment0[3];
796
797 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
798 uint64_t u64HostLStarMsr;
799 /** Host STAR MSR value to restore lazily while leaving VT-x. */
800 uint64_t u64HostStarMsr;
801 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
802 uint64_t u64HostSFMaskMsr;
803 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
804 uint64_t u64HostKernelGSBaseMsr;
805 /** A mask of which MSRs have been swapped and need restoration. */
806 uint32_t fLazyMsrs;
807 uint32_t u32Alignment2;
808
809 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
810 uint64_t u64MsrApicBase;
811 /** Last use TSC offset value. (cached) */
812 uint64_t u64TSCOffset;
813
814 /** VMCS cache. */
815 VMCSCACHE VMCSCache;
816
817 /** Real-mode emulation state. */
818 struct
819 {
820 X86DESCATTR AttrCS;
821 X86DESCATTR AttrDS;
822 X86DESCATTR AttrES;
823 X86DESCATTR AttrFS;
824 X86DESCATTR AttrGS;
825 X86DESCATTR AttrSS;
826 X86EFLAGS Eflags;
827 uint32_t fRealOnV86Active;
828 } RealMode;
829
830 /** VT-x error-reporting (mainly for ring-3 propagation). */
831 struct
832 {
833 uint64_t u64VMCSPhys;
834 uint32_t u32VMCSRevision;
835 uint32_t u32InstrError;
836 uint32_t u32ExitReason;
837 RTCPUID idEnteredCpu;
838 RTCPUID idCurrentCpu;
839 uint32_t u32Alignment0;
840 } LastError;
841
842 /** Current state of the VMCS. */
843 uint32_t uVmcsState;
844 /** Which host-state bits to restore before being preempted. */
845 uint32_t fRestoreHostFlags;
846 /** The host-state restoration structure. */
847 VMXRESTOREHOST RestoreHost;
848
849 /** Set if guest was executing in real mode (extra checks). */
850 bool fWasInRealMode;
851 /** Set if guest switched to 64-bit mode on a 32-bit host. */
852 bool fSwitchedTo64on32;
853
854 uint8_t u8Alignment1[6];
855 } vmx;
856
857 struct
858 {
859 /** Ring 0 handlers for VT-x. */
860 PFNHMSVMVMRUN pfnVMRun;
861#if HC_ARCH_BITS == 32
862 uint32_t u32Alignment0;
863#endif
864
865 /** Physical address of the host VMCB which holds additional host-state. */
866 RTHCPHYS HCPhysVmcbHost;
867 /** R0 memory object for the host VMCB which holds additional host-state. */
868 RTR0MEMOBJ hMemObjVmcbHost;
869 /** Virtual address of the host VMCB which holds additional host-state. */
870 R0PTRTYPE(void *) pvVmcbHost;
871
872 /** Physical address of the guest VMCB. */
873 RTHCPHYS HCPhysVmcb;
874 /** R0 memory object for the guest VMCB. */
875 RTR0MEMOBJ hMemObjVmcb;
876 /** Virtual address of the guest VMCB. */
877 R0PTRTYPE(void *) pvVmcb;
878
879 /** Physical address of the MSR bitmap (8 KB). */
880 RTHCPHYS HCPhysMsrBitmap;
881 /** R0 memory object for the MSR bitmap (8 KB). */
882 RTR0MEMOBJ hMemObjMsrBitmap;
883 /** Virtual address of the MSR bitmap. */
884 R0PTRTYPE(void *) pvMsrBitmap;
885
886 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
887 * we should check if the VTPR changed on every VM-exit. */
888 bool fSyncVTpr;
889 uint8_t u8Alignment0[7];
890 } svm;
891
892 /** Event injection state. */
893 struct
894 {
895 uint32_t fPending;
896 uint32_t u32ErrCode;
897 uint32_t cbInstr;
898 uint32_t u32Padding; /**< Explicit alignment padding. */
899 uint64_t u64IntInfo;
900 RTGCUINTPTR GCPtrFaultAddress;
901 } Event;
902
903 /** IO Block emulation state. */
904 struct
905 {
906 bool fEnabled;
907 uint8_t u8Align[7];
908
909 /** RIP at the start of the io code we wish to emulate in the recompiler. */
910 RTGCPTR GCPtrFunctionEip;
911
912 uint64_t cr0;
913 } EmulateIoBlock;
914
915 /* */
916 struct
917 {
918 /** Pending IO operation type. */
919 HMPENDINGIO enmType;
920 uint32_t u32Alignment0;
921 RTGCPTR GCPtrRip;
922 RTGCPTR GCPtrRipNext;
923 union
924 {
925 struct
926 {
927 uint32_t uPort;
928 uint32_t uAndVal;
929 uint32_t cbSize;
930 } Port;
931 uint64_t aRaw[2];
932 } s;
933 } PendingIO;
934
935 /** The PAE PDPEs used with Nested Paging (only valid when
936 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
937 X86PDPE aPdpes[4];
938
939 /** Current shadow paging mode. */
940 PGMMODE enmShadowMode;
941
942 /** The CPU ID of the CPU currently owning the VMCS. Set in
943 * HMR0Enter and cleared in HMR0Leave. */
944 RTCPUID idEnteredCpu;
945
946 /** VT-x/AMD-V VM-exit/\#VMXEXIT history, circular array. */
947 uint16_t auExitHistory[31];
948 /** The index of the next free slot in the history array. */
949 uint16_t idxExitHistoryFree;
950
951 /** For saving stack space, the disassembler state is allocated here instead of
952 * on the stack. */
953 DISCPUSTATE DisState;
954
955 STAMPROFILEADV StatEntry;
956 STAMPROFILEADV StatExit1;
957 STAMPROFILEADV StatExit2;
958 STAMPROFILEADV StatExitIO;
959 STAMPROFILEADV StatExitMovCRx;
960 STAMPROFILEADV StatExitXcptNmi;
961 STAMPROFILEADV StatLoadGuestState;
962 STAMPROFILEADV StatInGC;
963
964#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
965 STAMPROFILEADV StatWorldSwitch3264;
966#endif
967 STAMPROFILEADV StatPoke;
968 STAMPROFILEADV StatSpinPoke;
969 STAMPROFILEADV StatSpinPokeFailed;
970
971 STAMCOUNTER StatInjectInterrupt;
972 STAMCOUNTER StatInjectXcpt;
973 STAMCOUNTER StatInjectPendingReflect;
974 STAMCOUNTER StatInjectPendingInterpret;
975
976 STAMCOUNTER StatExitAll;
977 STAMCOUNTER StatExitShadowNM;
978 STAMCOUNTER StatExitGuestNM;
979 STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
980 STAMCOUNTER StatExitShadowPFEM;
981 STAMCOUNTER StatExitGuestPF;
982 STAMCOUNTER StatExitGuestUD;
983 STAMCOUNTER StatExitGuestSS;
984 STAMCOUNTER StatExitGuestNP;
985 STAMCOUNTER StatExitGuestTS;
986 STAMCOUNTER StatExitGuestGP;
987 STAMCOUNTER StatExitGuestDE;
988 STAMCOUNTER StatExitGuestDB;
989 STAMCOUNTER StatExitGuestMF;
990 STAMCOUNTER StatExitGuestBP;
991 STAMCOUNTER StatExitGuestXF;
992 STAMCOUNTER StatExitGuestXcpUnk;
993 STAMCOUNTER StatExitInvlpg;
994 STAMCOUNTER StatExitInvd;
995 STAMCOUNTER StatExitWbinvd;
996 STAMCOUNTER StatExitPause;
997 STAMCOUNTER StatExitCpuid;
998 STAMCOUNTER StatExitRdtsc;
999 STAMCOUNTER StatExitRdtscp;
1000 STAMCOUNTER StatExitRdpmc;
1001 STAMCOUNTER StatExitVmcall;
1002 STAMCOUNTER StatExitRdrand;
1003 STAMCOUNTER StatExitCli;
1004 STAMCOUNTER StatExitSti;
1005 STAMCOUNTER StatExitPushf;
1006 STAMCOUNTER StatExitPopf;
1007 STAMCOUNTER StatExitIret;
1008 STAMCOUNTER StatExitInt;
1009 STAMCOUNTER StatExitCRxWrite[16];
1010 STAMCOUNTER StatExitCRxRead[16];
1011 STAMCOUNTER StatExitDRxWrite;
1012 STAMCOUNTER StatExitDRxRead;
1013 STAMCOUNTER StatExitRdmsr;
1014 STAMCOUNTER StatExitWrmsr;
1015 STAMCOUNTER StatExitClts;
1016 STAMCOUNTER StatExitXdtrAccess;
1017 STAMCOUNTER StatExitHlt;
1018 STAMCOUNTER StatExitMwait;
1019 STAMCOUNTER StatExitMonitor;
1020 STAMCOUNTER StatExitLmsw;
1021 STAMCOUNTER StatExitIOWrite;
1022 STAMCOUNTER StatExitIORead;
1023 STAMCOUNTER StatExitIOStringWrite;
1024 STAMCOUNTER StatExitIOStringRead;
1025 STAMCOUNTER StatExitIntWindow;
1026 STAMCOUNTER StatExitExtInt;
1027 STAMCOUNTER StatExitHostNmiInGC;
1028 STAMCOUNTER StatExitPreemptTimer;
1029 STAMCOUNTER StatExitTprBelowThreshold;
1030 STAMCOUNTER StatExitTaskSwitch;
1031 STAMCOUNTER StatExitMtf;
1032 STAMCOUNTER StatExitApicAccess;
1033 STAMCOUNTER StatPendingHostIrq;
1034
1035 STAMCOUNTER StatFlushPage;
1036 STAMCOUNTER StatFlushPageManual;
1037 STAMCOUNTER StatFlushPhysPageManual;
1038 STAMCOUNTER StatFlushTlb;
1039 STAMCOUNTER StatFlushTlbManual;
1040 STAMCOUNTER StatFlushTlbWorldSwitch;
1041 STAMCOUNTER StatNoFlushTlbWorldSwitch;
1042 STAMCOUNTER StatFlushEntire;
1043 STAMCOUNTER StatFlushAsid;
1044 STAMCOUNTER StatFlushNestedPaging;
1045 STAMCOUNTER StatFlushTlbInvlpgVirt;
1046 STAMCOUNTER StatFlushTlbInvlpgPhys;
1047 STAMCOUNTER StatTlbShootdown;
1048 STAMCOUNTER StatTlbShootdownFlush;
1049
1050 STAMCOUNTER StatSwitchTprMaskedIrq;
1051 STAMCOUNTER StatSwitchGuestIrq;
1052 STAMCOUNTER StatSwitchHmToR3FF;
1053 STAMCOUNTER StatSwitchExitToR3;
1054 STAMCOUNTER StatSwitchLongJmpToR3;
1055 STAMCOUNTER StatSwitchMaxResumeLoops;
1056 STAMCOUNTER StatSwitchHltToR3;
1057 STAMCOUNTER StatSwitchApicAccessToR3;
1058 STAMCOUNTER StatSwitchPreempt;
1059 STAMCOUNTER StatSwitchPreemptSaveHostState;
1060
1061 STAMCOUNTER StatTscParavirt;
1062 STAMCOUNTER StatTscOffset;
1063 STAMCOUNTER StatTscIntercept;
1064
1065 STAMCOUNTER StatExitReasonNpf;
1066 STAMCOUNTER StatDRxArmed;
1067 STAMCOUNTER StatDRxContextSwitch;
1068 STAMCOUNTER StatDRxIoCheck;
1069
1070 STAMCOUNTER StatLoadMinimal;
1071 STAMCOUNTER StatLoadFull;
1072
1073 STAMCOUNTER StatVmxCheckBadRmSelBase;
1074 STAMCOUNTER StatVmxCheckBadRmSelLimit;
1075 STAMCOUNTER StatVmxCheckRmOk;
1076
1077 STAMCOUNTER StatVmxCheckBadSel;
1078 STAMCOUNTER StatVmxCheckBadRpl;
1079 STAMCOUNTER StatVmxCheckBadLdt;
1080 STAMCOUNTER StatVmxCheckBadTr;
1081 STAMCOUNTER StatVmxCheckPmOk;
1082
1083#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1084 STAMCOUNTER StatFpu64SwitchBack;
1085 STAMCOUNTER StatDebug64SwitchBack;
1086#endif
1087
1088#ifdef VBOX_WITH_STATISTICS
1089 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
1090 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
1091 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
1092 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
1093#endif
1094#ifdef HM_PROFILE_EXIT_DISPATCH
1095 STAMPROFILEADV StatExitDispatch;
1096#endif
1097} HMCPU;
1098/** Pointer to HM VMCPU instance data. */
1099typedef HMCPU *PHMCPU;
1100AssertCompileMemberAlignment(HMCPU, vmx, 8);
1101AssertCompileMemberAlignment(HMCPU, svm, 8);
1102AssertCompileMemberAlignment(HMCPU, Event, 8);
1103
1104VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent);
1105
1106#ifdef IN_RING0
1107VMMR0DECL(PHMGLOBALCPUINFO) hmR0GetCurrentCpu(void);
1108
1109# ifdef VBOX_STRICT
1110VMMR0DECL(void) hmDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
1111VMMR0DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
1112# else
1113# define hmDumpRegs(a, b ,c) do { } while (0)
1114# define hmR0DumpDescriptor(a, b, c) do { } while (0)
1115# endif /* VBOX_STRICT */
1116
1117# ifdef VBOX_WITH_KERNEL_USING_XMM
1118DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1119DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1120# endif
1121
1122#endif /* IN_RING0 */
1123
1124/** @} */
1125
1126RT_C_DECLS_END
1127
1128#endif
1129
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette