VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 60816

Last change on this file since 60816 was 60732, checked in by vboxsync, 9 years ago

VMM/HM: Add stat. counter for intr masked-by-TPR during event injection.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 42.8 KB
Line 
1/* $Id: HMInternal.h 60732 2016-04-28 10:29:17Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34#include <iprt/string.h>
35
36#if defined(RT_OS_DARWIN) && HC_ARCH_BITS == 32
37# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
38#endif
39
40#if HC_ARCH_BITS == 64 || defined (VBOX_WITH_64_BITS_GUESTS)
41/* Enable 64 bits guest support. */
42# define VBOX_ENABLE_64_BITS_GUESTS
43#endif
44
45#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
46# define VMX_USE_CACHED_VMCS_ACCESSES
47#endif
48
49/** @def HM_PROFILE_EXIT_DISPATCH
50 * Enables profiling of the VM exit handler dispatching. */
51#if 0 || defined(DOXYGEN_RUNNING)
52# define HM_PROFILE_EXIT_DISPATCH
53#endif
54
55RT_C_DECLS_BEGIN
56
57
58/** @defgroup grp_hm_int Internal
59 * @ingroup grp_hm
60 * @internal
61 * @{
62 */
63
64/** @def HMCPU_CF_CLEAR
65 * Clears a HM-context flag.
66 *
67 * @param pVCpu The cross context virtual CPU structure.
68 * @param fFlag The flag to clear.
69 */
70#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
71
72/** @def HMCPU_CF_SET
73 * Sets a HM-context flag.
74 *
75 * @param pVCpu The cross context virtual CPU structure.
76 * @param fFlag The flag to set.
77 */
78#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
79
80/** @def HMCPU_CF_IS_SET
81 * Checks if all the flags in the specified HM-context set is pending.
82 *
83 * @param pVCpu The cross context virtual CPU structure.
84 * @param fFlag The flag to check.
85 */
86#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
87
88/** @def HMCPU_CF_IS_PENDING
89 * Checks if one or more of the flags in the specified HM-context set is
90 * pending.
91 *
92 * @param pVCpu The cross context virtual CPU structure.
93 * @param fFlags The flags to check for.
94 */
95#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
96
97/** @def HMCPU_CF_IS_PENDING_ONLY
98 * Checks if -only- one or more of the specified HM-context flags is pending.
99 *
100 * @param pVCpu The cross context virtual CPU structure.
101 * @param fFlags The flags to check for.
102 */
103#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
104
105/** @def HMCPU_CF_IS_SET_ONLY
106 * Checks if -only- all the flags in the specified HM-context set is pending.
107 *
108 * @param pVCpu The cross context virtual CPU structure.
109 * @param fFlags The flags to check for.
110 */
111#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
112
113/** @def HMCPU_CF_RESET_TO
114 * Resets the HM-context flags to the specified value.
115 *
116 * @param pVCpu The cross context virtual CPU structure.
117 * @param fFlags The new value.
118 */
119#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
120
121/** @def HMCPU_CF_VALUE
122 * Returns the current HM-context flags value.
123 *
124 * @param pVCpu The cross context virtual CPU structure.
125 */
126#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
127
128
129/** Resets/initializes the VM-exit/\#VMEXIT history array. */
130#define HMCPU_EXIT_HISTORY_RESET(pVCpu) (memset(&(pVCpu)->hm.s.auExitHistory, 0xff, sizeof((pVCpu)->hm.s.auExitHistory)))
131
132/** Updates the VM-exit/\#VMEXIT history array. */
133#define HMCPU_EXIT_HISTORY_ADD(pVCpu, a_ExitReason) \
134 do { \
135 AssertMsg((pVCpu)->hm.s.idxExitHistoryFree < RT_ELEMENTS((pVCpu)->hm.s.auExitHistory), ("%u\n", (pVCpu)->hm.s.idxExitHistoryFree)); \
136 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree++] = (uint16_t)(a_ExitReason); \
137 if ((pVCpu)->hm.s.idxExitHistoryFree == RT_ELEMENTS((pVCpu)->hm.s.auExitHistory)) \
138 (pVCpu)->hm.s.idxExitHistoryFree = 0; \
139 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree] = UINT16_MAX; \
140 } while (0)
141
142/** Maximum number of exit reason statistics counters. */
143#define MAX_EXITREASON_STAT 0x100
144#define MASK_EXITREASON_STAT 0xff
145#define MASK_INJECT_IRQ_STAT 0xff
146
147/** @name HM changed flags.
148 * These flags are used to keep track of which important registers that
149 * have been changed since last they were reset.
150 * @{
151 */
152#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
153#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
154#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
155#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
156#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
157#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
158#define HM_CHANGED_GUEST_TR RT_BIT(6)
159#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
160#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
161#define HM_CHANGED_GUEST_RIP RT_BIT(9)
162#define HM_CHANGED_GUEST_RSP RT_BIT(10)
163#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
164#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
165#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
166#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
167#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
168#define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16)
169#define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */
170#define HM_CHANGED_GUEST_XCPT_INTERCEPTS RT_BIT(18)
171/* VT-x specific state. */
172#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(19)
173#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(20)
174#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(21)
175#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(22)
176#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(23)
177/* AMD-V specific state. */
178#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(19)
179#define HM_CHANGED_SVM_RESERVED1 RT_BIT(20)
180#define HM_CHANGED_SVM_RESERVED2 RT_BIT(21)
181#define HM_CHANGED_SVM_RESERVED3 RT_BIT(22)
182#define HM_CHANGED_SVM_RESERVED4 RT_BIT(23)
183
184#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
185 | HM_CHANGED_GUEST_CR3 \
186 | HM_CHANGED_GUEST_CR4 \
187 | HM_CHANGED_GUEST_GDTR \
188 | HM_CHANGED_GUEST_IDTR \
189 | HM_CHANGED_GUEST_LDTR \
190 | HM_CHANGED_GUEST_TR \
191 | HM_CHANGED_GUEST_SEGMENT_REGS \
192 | HM_CHANGED_GUEST_DEBUG \
193 | HM_CHANGED_GUEST_RIP \
194 | HM_CHANGED_GUEST_RSP \
195 | HM_CHANGED_GUEST_RFLAGS \
196 | HM_CHANGED_GUEST_CR2 \
197 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
198 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
199 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
200 | HM_CHANGED_GUEST_EFER_MSR \
201 | HM_CHANGED_GUEST_LAZY_MSRS \
202 | HM_CHANGED_GUEST_XCPT_INTERCEPTS \
203 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
204 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
205 | HM_CHANGED_VMX_GUEST_APIC_STATE \
206 | HM_CHANGED_VMX_ENTRY_CTLS \
207 | HM_CHANGED_VMX_EXIT_CTLS)
208
209#define HM_CHANGED_HOST_CONTEXT RT_BIT(24)
210
211/* Bits shared between host and guest. */
212#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
213 | HM_CHANGED_GUEST_DEBUG \
214 | HM_CHANGED_GUEST_LAZY_MSRS)
215/** @} */
216
217/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
218#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
219/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
220#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
221/** Total guest mapped memory needed. */
222#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
223
224
225/** @name Macros for enabling and disabling preemption.
226 * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
227 * preemption has already been disabled when there is no context hook.
228 * @{ */
229#ifdef VBOX_STRICT
230# define HM_DISABLE_PREEMPT() \
231 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
232 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled(pVCpu)); \
233 RTThreadPreemptDisable(&PreemptStateInternal)
234#else
235# define HM_DISABLE_PREEMPT() \
236 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
237 RTThreadPreemptDisable(&PreemptStateInternal)
238#endif /* VBOX_STRICT */
239#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
240/** @} */
241
242
243/** Enable for TPR guest patching. */
244#define VBOX_HM_WITH_GUEST_PATCHING
245
246/** @name HM saved state versions
247 * @{
248 */
249#ifdef VBOX_HM_WITH_GUEST_PATCHING
250# define HM_SAVED_STATE_VERSION 5
251# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
252#else
253# define HM_SAVED_STATE_VERSION 4
254# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
255#endif
256#define HM_SAVED_STATE_VERSION_2_0_X 3
257/** @} */
258
259/**
260 * Global per-cpu information. (host)
261 */
262typedef struct HMGLOBALCPUINFO
263{
264 /** The CPU ID. */
265 RTCPUID idCpu;
266 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
267 RTR0MEMOBJ hMemObj;
268 /** The physical address of the first page in hMemObj (it's a
269 * physcially contigous allocation if it spans multiple pages). */
270 RTHCPHYS HCPhysMemObj;
271 /** The address of the memory (for pfnEnable). */
272 void *pvMemObj;
273 /** Current ASID (AMD-V) / VPID (Intel). */
274 uint32_t uCurrentAsid;
275 /** TLB flush count. */
276 uint32_t cTlbFlushes;
277 /** Whether to flush each new ASID/VPID before use. */
278 bool fFlushAsidBeforeUse;
279 /** Configured for VT-x or AMD-V. */
280 bool fConfigured;
281 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
282 bool fIgnoreAMDVInUseError;
283 /** In use by our code. (for power suspend) */
284 volatile bool fInUse;
285} HMGLOBALCPUINFO;
286/** Pointer to the per-cpu global information. */
287typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
288
289typedef enum
290{
291 HMPENDINGIO_INVALID = 0,
292 HMPENDINGIO_PORT_READ,
293 HMPENDINGIO_PORT_WRITE,
294 HMPENDINGIO_STRING_READ,
295 HMPENDINGIO_STRING_WRITE,
296 /** The usual 32-bit paranoia. */
297 HMPENDINGIO_32BIT_HACK = 0x7fffffff
298} HMPENDINGIO;
299
300
301typedef enum
302{
303 HMTPRINSTR_INVALID,
304 HMTPRINSTR_READ,
305 HMTPRINSTR_READ_SHR4,
306 HMTPRINSTR_WRITE_REG,
307 HMTPRINSTR_WRITE_IMM,
308 HMTPRINSTR_JUMP_REPLACEMENT,
309 /** The usual 32-bit paranoia. */
310 HMTPRINSTR_32BIT_HACK = 0x7fffffff
311} HMTPRINSTR;
312
313typedef struct
314{
315 /** The key is the address of patched instruction. (32 bits GC ptr) */
316 AVLOU32NODECORE Core;
317 /** Original opcode. */
318 uint8_t aOpcode[16];
319 /** Instruction size. */
320 uint32_t cbOp;
321 /** Replacement opcode. */
322 uint8_t aNewOpcode[16];
323 /** Replacement instruction size. */
324 uint32_t cbNewOp;
325 /** Instruction type. */
326 HMTPRINSTR enmType;
327 /** Source operand. */
328 uint32_t uSrcOperand;
329 /** Destination operand. */
330 uint32_t uDstOperand;
331 /** Number of times the instruction caused a fault. */
332 uint32_t cFaults;
333 /** Patch address of the jump replacement. */
334 RTGCPTR32 pJumpTarget;
335} HMTPRPATCH;
336/** Pointer to HMTPRPATCH. */
337typedef HMTPRPATCH *PHMTPRPATCH;
338
339/**
340 * Switcher function, HC to the special 64-bit RC.
341 *
342 * @param pVM The cross context VM structure.
343 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
344 * @returns Return code indicating the action to take.
345 */
346typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
347/** Pointer to switcher function. */
348typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
349
350/**
351 * HM VM Instance data.
352 * Changes to this must checked against the padding of the hm union in VM!
353 */
354typedef struct HM
355{
356 /** Set when we've initialized VMX or SVM. */
357 bool fInitialized;
358 /** Set if nested paging is enabled. */
359 bool fNestedPaging;
360 /** Set if nested paging is allowed. */
361 bool fAllowNestedPaging;
362 /** Set if large pages are enabled (requires nested paging). */
363 bool fLargePages;
364 /** Set if we can support 64-bit guests or not. */
365 bool fAllow64BitGuests;
366 /** Set if an IO-APIC is configured for this VM. */
367 bool fHasIoApic;
368 /** Set when TPR patching is allowed. */
369 bool fTprPatchingAllowed;
370 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
371 bool fGlobalInit;
372 /** Set when TPR patching is active. */
373 bool fTPRPatchingActive;
374 /** Set when the debug facility has breakpoints/events enabled that requires
375 * us to use the debug execution loop in ring-0. */
376 bool fUseDebugLoop;
377 /** Set if hardware APIC virtualization is enabled. */
378 bool fVirtApicRegs;
379 /** Set if posted interrupt processing is enabled. */
380 bool fPostedIntrs;
381
382 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
383 uint32_t fHostKernelFeatures;
384
385 /** Maximum ASID allowed. */
386 uint32_t uMaxAsid;
387 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
388 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
389 uint32_t cMaxResumeLoops;
390
391 /** Guest allocated memory for patching purposes. */
392 RTGCPTR pGuestPatchMem;
393 /** Current free pointer inside the patch block. */
394 RTGCPTR pFreeGuestPatchMem;
395 /** Size of the guest patch memory block. */
396 uint32_t cbGuestPatchMem;
397 uint32_t u32Alignment0;
398
399#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
400 /** 32 to 64 bits switcher entrypoint. */
401 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
402 RTR0PTR pvR0Alignment0;
403#endif
404
405 struct
406 {
407 /** Set by the ring-0 side of HM to indicate VMX is supported by the
408 * CPU. */
409 bool fSupported;
410 /** Set when we've enabled VMX. */
411 bool fEnabled;
412 /** Set if VPID is supported. */
413 bool fVpid;
414 /** Set if VT-x VPID is allowed. */
415 bool fAllowVpid;
416 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
417 bool fUnrestrictedGuest;
418 /** Set if unrestricted guest execution is allowed to be used. */
419 bool fAllowUnrestricted;
420 /** Whether we're using the preemption timer or not. */
421 bool fUsePreemptTimer;
422 /** The shift mask employed by the VMX-Preemption timer. */
423 uint8_t cPreemptTimerShift;
424
425 /** Virtual address of the TSS page used for real mode emulation. */
426 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
427 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
428 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
429
430 /** Physical address of the APIC-access page. */
431 RTHCPHYS HCPhysApicAccess;
432 /** R0 memory object for the APIC-access page. */
433 RTR0MEMOBJ hMemObjApicAccess;
434 /** Virtual address of the APIC-access page. */
435 R0PTRTYPE(uint8_t *) pbApicAccess;
436
437#ifdef VBOX_WITH_CRASHDUMP_MAGIC
438 RTHCPHYS HCPhysScratch;
439 RTR0MEMOBJ hMemObjScratch;
440 R0PTRTYPE(uint8_t *) pbScratch;
441#endif
442
443 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
444 uint32_t uFlushTaggedTlb;
445
446 /** Pause-loop exiting (PLE) gap in ticks. */
447 uint32_t cPleGapTicks;
448 /** Pause-loop exiting (PLE) window in ticks. */
449 uint32_t cPleWindowTicks;
450 uint32_t u32Alignment0;
451
452 /** Host CR4 value (set by ring-0 VMX init) */
453 uint64_t u64HostCr4;
454 /** Host SMM monitor control (set by ring-0 VMX init) */
455 uint64_t u64HostSmmMonitorCtl;
456 /** Host EFER value (set by ring-0 VMX init) */
457 uint64_t u64HostEfer;
458 /** Whether the CPU supports VMCS fields for swapping EFER. */
459 bool fSupportsVmcsEfer;
460 uint8_t u8Alignment2[7];
461
462 /** VMX MSR values. */
463 VMXMSRS Msrs;
464
465 /** Flush types for invept & invvpid; they depend on capabilities. */
466 VMXFLUSHEPT enmFlushEpt;
467 VMXFLUSHVPID enmFlushVpid;
468
469 /** Host-physical address for a failing VMXON instruction. */
470 RTHCPHYS HCPhysVmxEnableError;
471 } vmx;
472
473 struct
474 {
475 /** Set by the ring-0 side of HM to indicate SVM is supported by the
476 * CPU. */
477 bool fSupported;
478 /** Set when we've enabled SVM. */
479 bool fEnabled;
480 /** Set if erratum 170 affects the AMD cpu. */
481 bool fAlwaysFlushTLB;
482 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
483 bool fIgnoreInUseError;
484 uint8_t u8Alignment0[4];
485
486 /** Physical address of the IO bitmap (12kb). */
487 RTHCPHYS HCPhysIOBitmap;
488 /** R0 memory object for the IO bitmap (12kb). */
489 RTR0MEMOBJ hMemObjIOBitmap;
490 /** Virtual address of the IO bitmap. */
491 R0PTRTYPE(void *) pvIOBitmap;
492
493 /* HWCR MSR (for diagnostics) */
494 uint64_t u64MsrHwcr;
495
496 /** SVM revision. */
497 uint32_t u32Rev;
498 /** SVM feature bits from cpuid 0x8000000a */
499 uint32_t u32Features;
500
501 /** Pause filter counter. */
502 uint16_t cPauseFilter;
503 /** Pause filter treshold in ticks. */
504 uint16_t cPauseFilterThresholdTicks;
505 uint32_t u32Alignment0;
506 } svm;
507
508 /**
509 * AVL tree with all patches (active or disabled) sorted by guest instruction
510 * address.
511 */
512 AVLOU32TREE PatchTree;
513 uint32_t cPatches;
514 HMTPRPATCH aPatches[64];
515
516 struct
517 {
518 uint32_t u32AMDFeatureECX;
519 uint32_t u32AMDFeatureEDX;
520 } cpuid;
521
522 /** Saved error from detection */
523 int32_t lLastError;
524
525 /** HMR0Init was run */
526 bool fHMR0Init;
527 bool u8Alignment1[3];
528
529 STAMCOUNTER StatTprPatchSuccess;
530 STAMCOUNTER StatTprPatchFailure;
531 STAMCOUNTER StatTprReplaceSuccessCr8;
532 STAMCOUNTER StatTprReplaceSuccessVmc;
533 STAMCOUNTER StatTprReplaceFailure;
534} HM;
535/** Pointer to HM VM instance data. */
536typedef HM *PHM;
537
538AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
539
540/* Maximum number of cached entries. */
541#define VMCSCACHE_MAX_ENTRY 128
542
543/**
544 * Structure for storing read and write VMCS actions.
545 */
546typedef struct VMCSCACHE
547{
548#ifdef VBOX_WITH_CRASHDUMP_MAGIC
549 /* Magic marker for searching in crash dumps. */
550 uint8_t aMagic[16];
551 uint64_t uMagic;
552 uint64_t u64TimeEntry;
553 uint64_t u64TimeSwitch;
554 uint64_t cResume;
555 uint64_t interPD;
556 uint64_t pSwitcher;
557 uint32_t uPos;
558 uint32_t idCpu;
559#endif
560 /* CR2 is saved here for EPT syncing. */
561 uint64_t cr2;
562 struct
563 {
564 uint32_t cValidEntries;
565 uint32_t uAlignment;
566 uint32_t aField[VMCSCACHE_MAX_ENTRY];
567 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
568 } Write;
569 struct
570 {
571 uint32_t cValidEntries;
572 uint32_t uAlignment;
573 uint32_t aField[VMCSCACHE_MAX_ENTRY];
574 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
575 } Read;
576#ifdef VBOX_STRICT
577 struct
578 {
579 RTHCPHYS HCPhysCpuPage;
580 RTHCPHYS HCPhysVmcs;
581 RTGCPTR pCache;
582 RTGCPTR pCtx;
583 } TestIn;
584 struct
585 {
586 RTHCPHYS HCPhysVmcs;
587 RTGCPTR pCache;
588 RTGCPTR pCtx;
589 uint64_t eflags;
590 uint64_t cr8;
591 } TestOut;
592 struct
593 {
594 uint64_t param1;
595 uint64_t param2;
596 uint64_t param3;
597 uint64_t param4;
598 } ScratchPad;
599#endif
600} VMCSCACHE;
601/** Pointer to VMCSCACHE. */
602typedef VMCSCACHE *PVMCSCACHE;
603AssertCompileSizeAlignment(VMCSCACHE, 8);
604
605/**
606 * VMX StartVM function.
607 *
608 * @returns VBox status code (no informational stuff).
609 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
610 * @param pCtx The CPU register context.
611 * @param pCache The VMCS cache.
612 * @param pVM Pointer to the cross context VM structure.
613 * @param pVCpu Pointer to the cross context per-CPU structure.
614 */
615typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
616/** Pointer to a VMX StartVM function. */
617typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
618
619/** SVM VMRun function. */
620typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
621/** Pointer to a SVM VMRun function. */
622typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
623
624/**
625 * HM VMCPU Instance data.
626 *
627 * Note! If you change members of this struct, make sure to check if the
628 * assembly counterpart in HMInternal.mac needs to be updated as well.
629 */
630typedef struct HMCPU
631{
632 /** Set if we need to flush the TLB during the world switch. */
633 bool fForceTLBFlush;
634 /** Set when we're using VT-x or AMD-V at that moment. */
635 bool fActive;
636 /** Set when the TLB has been checked until we return from the world switch. */
637 volatile bool fCheckedTLBFlush;
638 /** Whether we've completed the inner HM leave function. */
639 bool fLeaveDone;
640 /** Whether we're using the hyper DR7 or guest DR7. */
641 bool fUsingHyperDR7;
642 /** Whether to preload the guest-FPU state to avoid \#NM VM-exit overhead. */
643 bool fPreloadGuestFpu;
644 /** Set if XCR0 needs to be loaded and saved when entering and exiting guest
645 * code execution. */
646 bool fLoadSaveGuestXcr0;
647
648 /** Whether we should use the debug loop because of single stepping or special
649 * debug breakpoints / events are armed. */
650 bool fUseDebugLoop;
651 /** Whether we are currently executing in the debug loop.
652 * Mainly for assertions. */
653 bool fUsingDebugLoop;
654 /** Set if we using the debug loop and wish to intercept RDTSC. */
655 bool fDebugWantRdTscExit;
656 /** Whether we're executing a single instruction. */
657 bool fSingleInstruction;
658 /** Set if we need to clear the trap flag because of single stepping. */
659 bool fClearTrapFlag;
660
661 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
662 bool fGIMTrapXcptUD;
663 /** Whether paravirt. hypercalls are enabled. */
664 bool fHypercallsEnabled;
665 uint8_t u8Alignment0[2];
666
667 /** World switch exit counter. */
668 volatile uint32_t cWorldSwitchExits;
669 /** HM_CHANGED_* flags. */
670 volatile uint32_t fContextUseFlags;
671 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
672 * time). */
673 RTCPUID idLastCpu;
674 /** TLB flush count. */
675 uint32_t cTlbFlushes;
676 /** Current ASID in use by the VM. */
677 uint32_t uCurrentAsid;
678 /** An additional error code used for some gurus. */
679 uint32_t u32HMError;
680 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
681 uint64_t u64HostTscAux;
682
683 struct
684 {
685 /** Ring 0 handlers for VT-x. */
686 PFNHMVMXSTARTVM pfnStartVM;
687#if HC_ARCH_BITS == 32
688 uint32_t u32Alignment0;
689#endif
690 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
691 uint32_t u32PinCtls;
692 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
693 uint32_t u32ProcCtls;
694 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
695 uint32_t u32ProcCtls2;
696 /** Current VMX_VMCS32_CTRL_EXIT. */
697 uint32_t u32ExitCtls;
698 /** Current VMX_VMCS32_CTRL_ENTRY. */
699 uint32_t u32EntryCtls;
700
701 /** Current CR0 mask. */
702 uint32_t u32CR0Mask;
703 /** Current CR4 mask. */
704 uint32_t u32CR4Mask;
705 /** Current exception bitmap. */
706 uint32_t u32XcptBitmap;
707 /** The updated-guest-state mask. */
708 volatile uint32_t fUpdatedGuestState;
709 uint32_t u32Alignment1;
710
711 /** Physical address of the VM control structure (VMCS). */
712 RTHCPHYS HCPhysVmcs;
713 /** R0 memory object for the VM control structure (VMCS). */
714 RTR0MEMOBJ hMemObjVmcs;
715 /** Virtual address of the VM control structure (VMCS). */
716 R0PTRTYPE(void *) pvVmcs;
717
718 /** Physical address of the virtual APIC page for TPR caching. */
719 RTHCPHYS HCPhysVirtApic;
720 /** R0 memory object for the virtual APIC page for TPR caching. */
721 RTR0MEMOBJ hMemObjVirtApic;
722 /** Virtual address of the virtual APIC page for TPR caching. */
723 R0PTRTYPE(uint8_t *) pbVirtApic;
724
725 /** Physical address of the MSR bitmap. */
726 RTHCPHYS HCPhysMsrBitmap;
727 /** R0 memory object for the MSR bitmap. */
728 RTR0MEMOBJ hMemObjMsrBitmap;
729 /** Virtual address of the MSR bitmap. */
730 R0PTRTYPE(void *) pvMsrBitmap;
731
732 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
733 * for guest MSRs). */
734 RTHCPHYS HCPhysGuestMsr;
735 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
736 * (used for guest MSRs). */
737 RTR0MEMOBJ hMemObjGuestMsr;
738 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
739 * for guest MSRs). */
740 R0PTRTYPE(void *) pvGuestMsr;
741
742 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
743 RTHCPHYS HCPhysHostMsr;
744 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
745 RTR0MEMOBJ hMemObjHostMsr;
746 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
747 R0PTRTYPE(void *) pvHostMsr;
748
749 /** Current EPTP. */
750 RTHCPHYS HCPhysEPTP;
751
752 /** Number of guest/host MSR pairs in the auto-load/store area. */
753 uint32_t cMsrs;
754 /** Whether the host MSR values are up-to-date in the auto-load/store area. */
755 bool fUpdatedHostMsrs;
756 uint8_t u8Alignment0[3];
757
758 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
759 uint64_t u64HostLStarMsr;
760 /** Host STAR MSR value to restore lazily while leaving VT-x. */
761 uint64_t u64HostStarMsr;
762 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
763 uint64_t u64HostSFMaskMsr;
764 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
765 uint64_t u64HostKernelGSBaseMsr;
766 /** A mask of which MSRs have been swapped and need restoration. */
767 uint32_t fLazyMsrs;
768 uint32_t u32Alignment2;
769
770 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
771 uint64_t u64MsrApicBase;
772 /** Last use TSC offset value. (cached) */
773 uint64_t u64TSCOffset;
774
775 /** VMCS cache. */
776 VMCSCACHE VMCSCache;
777
778 /** Real-mode emulation state. */
779 struct
780 {
781 X86DESCATTR AttrCS;
782 X86DESCATTR AttrDS;
783 X86DESCATTR AttrES;
784 X86DESCATTR AttrFS;
785 X86DESCATTR AttrGS;
786 X86DESCATTR AttrSS;
787 X86EFLAGS Eflags;
788 uint32_t fRealOnV86Active;
789 } RealMode;
790
791 /** VT-x error-reporting (mainly for ring-3 propagation). */
792 struct
793 {
794 uint64_t u64VMCSPhys;
795 uint32_t u32VMCSRevision;
796 uint32_t u32InstrError;
797 uint32_t u32ExitReason;
798 RTCPUID idEnteredCpu;
799 RTCPUID idCurrentCpu;
800 uint32_t u32Alignment0;
801 } LastError;
802
803 /** Current state of the VMCS. */
804 uint32_t uVmcsState;
805 /** Which host-state bits to restore before being preempted. */
806 uint32_t fRestoreHostFlags;
807 /** The host-state restoration structure. */
808 VMXRESTOREHOST RestoreHost;
809
810 /** Set if guest was executing in real mode (extra checks). */
811 bool fWasInRealMode;
812 uint8_t u8Alignment1[7];
813 } vmx;
814
815 struct
816 {
817 /** Ring 0 handlers for VT-x. */
818 PFNHMSVMVMRUN pfnVMRun;
819#if HC_ARCH_BITS == 32
820 uint32_t u32Alignment0;
821#endif
822
823 /** Physical address of the host VMCB which holds additional host-state. */
824 RTHCPHYS HCPhysVmcbHost;
825 /** R0 memory object for the host VMCB which holds additional host-state. */
826 RTR0MEMOBJ hMemObjVmcbHost;
827 /** Virtual address of the host VMCB which holds additional host-state. */
828 R0PTRTYPE(void *) pvVmcbHost;
829
830 /** Physical address of the guest VMCB. */
831 RTHCPHYS HCPhysVmcb;
832 /** R0 memory object for the guest VMCB. */
833 RTR0MEMOBJ hMemObjVmcb;
834 /** Virtual address of the guest VMCB. */
835 R0PTRTYPE(void *) pvVmcb;
836
837 /** Physical address of the MSR bitmap (8 KB). */
838 RTHCPHYS HCPhysMsrBitmap;
839 /** R0 memory object for the MSR bitmap (8 KB). */
840 RTR0MEMOBJ hMemObjMsrBitmap;
841 /** Virtual address of the MSR bitmap. */
842 R0PTRTYPE(void *) pvMsrBitmap;
843
844 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
845 * we should check if the VTPR changed on every VM-exit. */
846 bool fSyncVTpr;
847 uint8_t u8Alignment0[7];
848 } svm;
849
850 /** Event injection state. */
851 struct
852 {
853 uint32_t fPending;
854 uint32_t u32ErrCode;
855 uint32_t cbInstr;
856 uint32_t u32Padding; /**< Explicit alignment padding. */
857 uint64_t u64IntInfo;
858 RTGCUINTPTR GCPtrFaultAddress;
859 } Event;
860
861 /** IO Block emulation state. */
862 struct
863 {
864 bool fEnabled;
865 uint8_t u8Align[7];
866
867 /** RIP at the start of the io code we wish to emulate in the recompiler. */
868 RTGCPTR GCPtrFunctionEip;
869
870 uint64_t cr0;
871 } EmulateIoBlock;
872
873 struct
874 {
875 /** Pending IO operation type. */
876 HMPENDINGIO enmType;
877 uint32_t u32Alignment0;
878 RTGCPTR GCPtrRip;
879 RTGCPTR GCPtrRipNext;
880 union
881 {
882 struct
883 {
884 uint32_t uPort;
885 uint32_t uAndVal;
886 uint32_t cbSize;
887 } Port;
888 uint64_t aRaw[2];
889 } s;
890 } PendingIO;
891
892 /** The PAE PDPEs used with Nested Paging (only valid when
893 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
894 X86PDPE aPdpes[4];
895
896 /** Current shadow paging mode. */
897 PGMMODE enmShadowMode;
898
899 /** The CPU ID of the CPU currently owning the VMCS. Set in
900 * HMR0Enter and cleared in HMR0Leave. */
901 RTCPUID idEnteredCpu;
902
903 /** VT-x/AMD-V VM-exit/\#VMXEXIT history, circular array. */
904 uint16_t auExitHistory[31];
905 /** The index of the next free slot in the history array. */
906 uint16_t idxExitHistoryFree;
907
908 /** For saving stack space, the disassembler state is allocated here instead of
909 * on the stack. */
910 DISCPUSTATE DisState;
911
912 STAMPROFILEADV StatEntry;
913 STAMPROFILEADV StatExit1;
914 STAMPROFILEADV StatExit2;
915 STAMPROFILEADV StatExitIO;
916 STAMPROFILEADV StatExitMovCRx;
917 STAMPROFILEADV StatExitXcptNmi;
918 STAMPROFILEADV StatLoadGuestState;
919 STAMPROFILEADV StatInGC;
920
921#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
922 STAMPROFILEADV StatWorldSwitch3264;
923#endif
924 STAMPROFILEADV StatPoke;
925 STAMPROFILEADV StatSpinPoke;
926 STAMPROFILEADV StatSpinPokeFailed;
927
928 STAMCOUNTER StatInjectInterrupt;
929 STAMCOUNTER StatInjectXcpt;
930 STAMCOUNTER StatInjectPendingReflect;
931
932 STAMCOUNTER StatExitAll;
933 STAMCOUNTER StatExitShadowNM;
934 STAMCOUNTER StatExitGuestNM;
935 STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
936 STAMCOUNTER StatExitShadowPFEM;
937 STAMCOUNTER StatExitGuestPF;
938 STAMCOUNTER StatExitGuestUD;
939 STAMCOUNTER StatExitGuestSS;
940 STAMCOUNTER StatExitGuestNP;
941 STAMCOUNTER StatExitGuestTS;
942 STAMCOUNTER StatExitGuestGP;
943 STAMCOUNTER StatExitGuestDE;
944 STAMCOUNTER StatExitGuestDB;
945 STAMCOUNTER StatExitGuestMF;
946 STAMCOUNTER StatExitGuestBP;
947 STAMCOUNTER StatExitGuestXF;
948 STAMCOUNTER StatExitGuestXcpUnk;
949 STAMCOUNTER StatExitInvlpg;
950 STAMCOUNTER StatExitInvd;
951 STAMCOUNTER StatExitWbinvd;
952 STAMCOUNTER StatExitPause;
953 STAMCOUNTER StatExitCpuid;
954 STAMCOUNTER StatExitRdtsc;
955 STAMCOUNTER StatExitRdtscp;
956 STAMCOUNTER StatExitRdpmc;
957 STAMCOUNTER StatExitVmcall;
958 STAMCOUNTER StatExitRdrand;
959 STAMCOUNTER StatExitCli;
960 STAMCOUNTER StatExitSti;
961 STAMCOUNTER StatExitPushf;
962 STAMCOUNTER StatExitPopf;
963 STAMCOUNTER StatExitIret;
964 STAMCOUNTER StatExitInt;
965 STAMCOUNTER StatExitCRxWrite[16];
966 STAMCOUNTER StatExitCRxRead[16];
967 STAMCOUNTER StatExitDRxWrite;
968 STAMCOUNTER StatExitDRxRead;
969 STAMCOUNTER StatExitRdmsr;
970 STAMCOUNTER StatExitWrmsr;
971 STAMCOUNTER StatExitClts;
972 STAMCOUNTER StatExitXdtrAccess;
973 STAMCOUNTER StatExitHlt;
974 STAMCOUNTER StatExitMwait;
975 STAMCOUNTER StatExitMonitor;
976 STAMCOUNTER StatExitLmsw;
977 STAMCOUNTER StatExitIOWrite;
978 STAMCOUNTER StatExitIORead;
979 STAMCOUNTER StatExitIOStringWrite;
980 STAMCOUNTER StatExitIOStringRead;
981 STAMCOUNTER StatExitIntWindow;
982 STAMCOUNTER StatExitExtInt;
983 STAMCOUNTER StatExitHostNmiInGC;
984 STAMCOUNTER StatExitPreemptTimer;
985 STAMCOUNTER StatExitTprBelowThreshold;
986 STAMCOUNTER StatExitTaskSwitch;
987 STAMCOUNTER StatExitMtf;
988 STAMCOUNTER StatExitApicAccess;
989 STAMCOUNTER StatPendingHostIrq;
990
991 STAMCOUNTER StatFlushPage;
992 STAMCOUNTER StatFlushPageManual;
993 STAMCOUNTER StatFlushPhysPageManual;
994 STAMCOUNTER StatFlushTlb;
995 STAMCOUNTER StatFlushTlbManual;
996 STAMCOUNTER StatFlushTlbWorldSwitch;
997 STAMCOUNTER StatNoFlushTlbWorldSwitch;
998 STAMCOUNTER StatFlushEntire;
999 STAMCOUNTER StatFlushAsid;
1000 STAMCOUNTER StatFlushNestedPaging;
1001 STAMCOUNTER StatFlushTlbInvlpgVirt;
1002 STAMCOUNTER StatFlushTlbInvlpgPhys;
1003 STAMCOUNTER StatTlbShootdown;
1004 STAMCOUNTER StatTlbShootdownFlush;
1005
1006 STAMCOUNTER StatSwitchTprMaskedIrq;
1007 STAMCOUNTER StatSwitchGuestIrq;
1008 STAMCOUNTER StatSwitchHmToR3FF;
1009 STAMCOUNTER StatSwitchExitToR3;
1010 STAMCOUNTER StatSwitchLongJmpToR3;
1011 STAMCOUNTER StatSwitchMaxResumeLoops;
1012 STAMCOUNTER StatSwitchHltToR3;
1013 STAMCOUNTER StatSwitchApicAccessToR3;
1014 STAMCOUNTER StatSwitchPreempt;
1015 STAMCOUNTER StatSwitchPreemptSaveHostState;
1016
1017 STAMCOUNTER StatTscParavirt;
1018 STAMCOUNTER StatTscOffset;
1019 STAMCOUNTER StatTscIntercept;
1020
1021 STAMCOUNTER StatExitReasonNpf;
1022 STAMCOUNTER StatDRxArmed;
1023 STAMCOUNTER StatDRxContextSwitch;
1024 STAMCOUNTER StatDRxIoCheck;
1025
1026 STAMCOUNTER StatLoadMinimal;
1027 STAMCOUNTER StatLoadFull;
1028
1029 STAMCOUNTER StatVmxCheckBadRmSelBase;
1030 STAMCOUNTER StatVmxCheckBadRmSelLimit;
1031 STAMCOUNTER StatVmxCheckRmOk;
1032
1033 STAMCOUNTER StatVmxCheckBadSel;
1034 STAMCOUNTER StatVmxCheckBadRpl;
1035 STAMCOUNTER StatVmxCheckBadLdt;
1036 STAMCOUNTER StatVmxCheckBadTr;
1037 STAMCOUNTER StatVmxCheckPmOk;
1038
1039#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1040 STAMCOUNTER StatFpu64SwitchBack;
1041 STAMCOUNTER StatDebug64SwitchBack;
1042#endif
1043
1044#ifdef VBOX_WITH_STATISTICS
1045 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
1046 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
1047 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
1048 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
1049#endif
1050#ifdef HM_PROFILE_EXIT_DISPATCH
1051 STAMPROFILEADV StatExitDispatch;
1052#endif
1053} HMCPU;
1054/** Pointer to HM VMCPU instance data. */
1055typedef HMCPU *PHMCPU;
1056AssertCompileMemberAlignment(HMCPU, vmx, 8);
1057AssertCompileMemberAlignment(HMCPU, svm, 8);
1058AssertCompileMemberAlignment(HMCPU, Event, 8);
1059
1060
1061#ifdef IN_RING0
1062/** @todo r=bird: s/[[:space:]]HM/ hm/ - internal functions starts with a
1063 * lower cased prefix. HMInternal.h is an internal header, so
1064 * everything here must be internal. */
1065VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
1066VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
1067
1068
1069# ifdef VBOX_STRICT
1070VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
1071VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
1072# else
1073# define HMDumpRegs(a, b ,c) do { } while (0)
1074# define HMR0DumpDescriptor(a, b, c) do { } while (0)
1075# endif /* VBOX_STRICT */
1076
1077# ifdef VBOX_WITH_KERNEL_USING_XMM
1078DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1079DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1080# endif
1081
1082#endif /* IN_RING0 */
1083
1084/** @} */
1085
1086RT_C_DECLS_END
1087
1088#endif
1089
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette