VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 58944

Last change on this file since 58944 was 58938, checked in by vboxsync, 9 years ago

HM,DBGF: Made DBGF notify HM about changes to VMM event and interrupt breakpoints. Made HM cache the basic info wrt ring-0 loop selection, opting for using a debug loop when debugging takes place to avoid cluttering slowing down the normal execution loop. The plan is to extend the single stepping loop and to put complicated dtrace probes into the same loop. Modified the VMX loop selection already.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 42.2 KB
Line 
1/* $Id: HMInternal.h 58938 2015-12-01 14:17:45Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34#include <iprt/string.h>
35
36#if defined(RT_OS_DARWIN) && HC_ARCH_BITS == 32
37# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
38#endif
39
40#if HC_ARCH_BITS == 64 || defined (VBOX_WITH_64_BITS_GUESTS)
41/* Enable 64 bits guest support. */
42# define VBOX_ENABLE_64_BITS_GUESTS
43#endif
44
45#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
46# define VMX_USE_CACHED_VMCS_ACCESSES
47#endif
48
49/** @def HM_PROFILE_EXIT_DISPATCH
50 * Enables profiling of the VM exit handler dispatching. */
51#if 0 || defined(DOXYGEN_RUNNING)
52# define HM_PROFILE_EXIT_DISPATCH
53#endif
54
55RT_C_DECLS_BEGIN
56
57
58/** @defgroup grp_hm_int Internal
59 * @ingroup grp_hm
60 * @internal
61 * @{
62 */
63
64/** @def HMCPU_CF_CLEAR
65 * Clears a HM-context flag.
66 *
67 * @param pVCpu The cross context virtual CPU structure.
68 * @param fFlag The flag to clear.
69 */
70#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
71
72/** @def HMCPU_CF_SET
73 * Sets a HM-context flag.
74 *
75 * @param pVCpu The cross context virtual CPU structure.
76 * @param fFlag The flag to set.
77 */
78#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
79
80/** @def HMCPU_CF_IS_SET
81 * Checks if all the flags in the specified HM-context set is pending.
82 *
83 * @param pVCpu The cross context virtual CPU structure.
84 * @param fFlag The flag to check.
85 */
86#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
87
88/** @def HMCPU_CF_IS_PENDING
89 * Checks if one or more of the flags in the specified HM-context set is
90 * pending.
91 *
92 * @param pVCpu The cross context virtual CPU structure.
93 * @param fFlags The flags to check for.
94 */
95#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
96
97/** @def HMCPU_CF_IS_PENDING_ONLY
98 * Checks if -only- one or more of the specified HM-context flags is pending.
99 *
100 * @param pVCpu The cross context virtual CPU structure.
101 * @param fFlags The flags to check for.
102 */
103#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
104
105/** @def HMCPU_CF_IS_SET_ONLY
106 * Checks if -only- all the flags in the specified HM-context set is pending.
107 *
108 * @param pVCpu The cross context virtual CPU structure.
109 * @param fFlags The flags to check for.
110 */
111#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
112
113/** @def HMCPU_CF_RESET_TO
114 * Resets the HM-context flags to the specified value.
115 *
116 * @param pVCpu The cross context virtual CPU structure.
117 * @param fFlags The new value.
118 */
119#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
120
121/** @def HMCPU_CF_VALUE
122 * Returns the current HM-context flags value.
123 *
124 * @param pVCpu The cross context virtual CPU structure.
125 */
126#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
127
128
129/** Resets/initializes the VM-exit/\#VMEXIT history array. */
130#define HMCPU_EXIT_HISTORY_RESET(pVCpu) (memset(&(pVCpu)->hm.s.auExitHistory, 0xff, sizeof((pVCpu)->hm.s.auExitHistory)))
131
132/** Updates the VM-exit/\#VMEXIT history array. */
133#define HMCPU_EXIT_HISTORY_ADD(pVCpu, a_ExitReason) \
134 do { \
135 AssertMsg((pVCpu)->hm.s.idxExitHistoryFree < RT_ELEMENTS((pVCpu)->hm.s.auExitHistory), ("%u\n", (pVCpu)->hm.s.idxExitHistoryFree)); \
136 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree++] = (uint16_t)(a_ExitReason); \
137 if ((pVCpu)->hm.s.idxExitHistoryFree == RT_ELEMENTS((pVCpu)->hm.s.auExitHistory)) \
138 (pVCpu)->hm.s.idxExitHistoryFree = 0; \
139 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree] = UINT16_MAX; \
140 } while (0)
141
142/** Maximum number of exit reason statistics counters. */
143#define MAX_EXITREASON_STAT 0x100
144#define MASK_EXITREASON_STAT 0xff
145#define MASK_INJECT_IRQ_STAT 0xff
146
147/** @name HM changed flags.
148 * These flags are used to keep track of which important registers that
149 * have been changed since last they were reset.
150 * @{
151 */
152#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
153#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
154#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
155#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
156#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
157#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
158#define HM_CHANGED_GUEST_TR RT_BIT(6)
159#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
160#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
161#define HM_CHANGED_GUEST_RIP RT_BIT(9)
162#define HM_CHANGED_GUEST_RSP RT_BIT(10)
163#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
164#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
165#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
166#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
167#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
168#define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16)
169#define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */
170#define HM_CHANGED_GUEST_XCPT_INTERCEPTS RT_BIT(18)
171/* VT-x specific state. */
172#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(19)
173#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(20)
174#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(21)
175#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(22)
176#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(23)
177/* AMD-V specific state. */
178#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(19)
179#define HM_CHANGED_SVM_RESERVED1 RT_BIT(20)
180#define HM_CHANGED_SVM_RESERVED2 RT_BIT(21)
181#define HM_CHANGED_SVM_RESERVED3 RT_BIT(22)
182#define HM_CHANGED_SVM_RESERVED4 RT_BIT(23)
183
184#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
185 | HM_CHANGED_GUEST_CR3 \
186 | HM_CHANGED_GUEST_CR4 \
187 | HM_CHANGED_GUEST_GDTR \
188 | HM_CHANGED_GUEST_IDTR \
189 | HM_CHANGED_GUEST_LDTR \
190 | HM_CHANGED_GUEST_TR \
191 | HM_CHANGED_GUEST_SEGMENT_REGS \
192 | HM_CHANGED_GUEST_DEBUG \
193 | HM_CHANGED_GUEST_RIP \
194 | HM_CHANGED_GUEST_RSP \
195 | HM_CHANGED_GUEST_RFLAGS \
196 | HM_CHANGED_GUEST_CR2 \
197 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
198 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
199 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
200 | HM_CHANGED_GUEST_EFER_MSR \
201 | HM_CHANGED_GUEST_LAZY_MSRS \
202 | HM_CHANGED_GUEST_XCPT_INTERCEPTS \
203 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
204 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
205 | HM_CHANGED_VMX_GUEST_APIC_STATE \
206 | HM_CHANGED_VMX_ENTRY_CTLS \
207 | HM_CHANGED_VMX_EXIT_CTLS)
208
209#define HM_CHANGED_HOST_CONTEXT RT_BIT(24)
210
211/* Bits shared between host and guest. */
212#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
213 | HM_CHANGED_GUEST_DEBUG \
214 | HM_CHANGED_GUEST_LAZY_MSRS)
215/** @} */
216
217/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
218#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
219/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
220#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
221/** Total guest mapped memory needed. */
222#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
223
224
225/** @name Macros for enabling and disabling preemption.
226 * These are really just for hiding the RTTHREADPREEMPTSTATE and asserting that
227 * preemption has already been disabled when there is no context hook.
228 * @{ */
229#ifdef VBOX_STRICT
230# define HM_DISABLE_PREEMPT() \
231 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
232 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHookIsEnabled(pVCpu)); \
233 RTThreadPreemptDisable(&PreemptStateInternal)
234#else
235# define HM_DISABLE_PREEMPT() \
236 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \
237 RTThreadPreemptDisable(&PreemptStateInternal)
238#endif /* VBOX_STRICT */
239#define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)
240/** @} */
241
242
243/** Enable for TPR guest patching. */
244#define VBOX_HM_WITH_GUEST_PATCHING
245
246/** @name HM saved state versions
247 * @{
248 */
249#ifdef VBOX_HM_WITH_GUEST_PATCHING
250# define HM_SAVED_STATE_VERSION 5
251# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
252#else
253# define HM_SAVED_STATE_VERSION 4
254# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
255#endif
256#define HM_SAVED_STATE_VERSION_2_0_X 3
257/** @} */
258
259/**
260 * Global per-cpu information. (host)
261 */
262typedef struct HMGLOBALCPUINFO
263{
264 /** The CPU ID. */
265 RTCPUID idCpu;
266 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
267 RTR0MEMOBJ hMemObj;
268 /** The physical address of the first page in hMemObj (it's a
269 * physcially contigous allocation if it spans multiple pages). */
270 RTHCPHYS HCPhysMemObj;
271 /** The address of the memory (for pfnEnable). */
272 void *pvMemObj;
273 /** Current ASID (AMD-V) / VPID (Intel). */
274 uint32_t uCurrentAsid;
275 /** TLB flush count. */
276 uint32_t cTlbFlushes;
277 /** Whether to flush each new ASID/VPID before use. */
278 bool fFlushAsidBeforeUse;
279 /** Configured for VT-x or AMD-V. */
280 bool fConfigured;
281 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
282 bool fIgnoreAMDVInUseError;
283 /** In use by our code. (for power suspend) */
284 volatile bool fInUse;
285} HMGLOBALCPUINFO;
286/** Pointer to the per-cpu global information. */
287typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
288
289typedef enum
290{
291 HMPENDINGIO_INVALID = 0,
292 HMPENDINGIO_PORT_READ,
293 HMPENDINGIO_PORT_WRITE,
294 HMPENDINGIO_STRING_READ,
295 HMPENDINGIO_STRING_WRITE,
296 /** The usual 32-bit paranoia. */
297 HMPENDINGIO_32BIT_HACK = 0x7fffffff
298} HMPENDINGIO;
299
300
301typedef enum
302{
303 HMTPRINSTR_INVALID,
304 HMTPRINSTR_READ,
305 HMTPRINSTR_READ_SHR4,
306 HMTPRINSTR_WRITE_REG,
307 HMTPRINSTR_WRITE_IMM,
308 HMTPRINSTR_JUMP_REPLACEMENT,
309 /** The usual 32-bit paranoia. */
310 HMTPRINSTR_32BIT_HACK = 0x7fffffff
311} HMTPRINSTR;
312
313typedef struct
314{
315 /** The key is the address of patched instruction. (32 bits GC ptr) */
316 AVLOU32NODECORE Core;
317 /** Original opcode. */
318 uint8_t aOpcode[16];
319 /** Instruction size. */
320 uint32_t cbOp;
321 /** Replacement opcode. */
322 uint8_t aNewOpcode[16];
323 /** Replacement instruction size. */
324 uint32_t cbNewOp;
325 /** Instruction type. */
326 HMTPRINSTR enmType;
327 /** Source operand. */
328 uint32_t uSrcOperand;
329 /** Destination operand. */
330 uint32_t uDstOperand;
331 /** Number of times the instruction caused a fault. */
332 uint32_t cFaults;
333 /** Patch address of the jump replacement. */
334 RTGCPTR32 pJumpTarget;
335} HMTPRPATCH;
336/** Pointer to HMTPRPATCH. */
337typedef HMTPRPATCH *PHMTPRPATCH;
338
339/**
340 * Switcher function, HC to the special 64-bit RC.
341 *
342 * @param pVM The cross context VM structure.
343 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
344 * @returns Return code indicating the action to take.
345 */
346typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
347/** Pointer to switcher function. */
348typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
349
350/**
351 * HM VM Instance data.
352 * Changes to this must checked against the padding of the hm union in VM!
353 */
354typedef struct HM
355{
356 /** Set when we've initialized VMX or SVM. */
357 bool fInitialized;
358 /** Set if nested paging is enabled. */
359 bool fNestedPaging;
360 /** Set if nested paging is allowed. */
361 bool fAllowNestedPaging;
362 /** Set if large pages are enabled (requires nested paging). */
363 bool fLargePages;
364 /** Set if we can support 64-bit guests or not. */
365 bool fAllow64BitGuests;
366 /** Set if an IO-APIC is configured for this VM. */
367 bool fHasIoApic;
368 /** Set when TPR patching is allowed. */
369 bool fTprPatchingAllowed;
370 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
371 bool fGlobalInit;
372 /** Set when TPR patching is active. */
373 bool fTPRPatchingActive;
374 /** Set when the debug facility has breakpoints/events enabled that requires
375 * us to use the debug execution loop in ring-0. */
376 bool fUseDebugLoop;
377 bool u8Alignment[2];
378
379 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
380 uint32_t fHostKernelFeatures;
381
382 /** Maximum ASID allowed. */
383 uint32_t uMaxAsid;
384 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
385 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
386 uint32_t cMaxResumeLoops;
387
388 /** Guest allocated memory for patching purposes. */
389 RTGCPTR pGuestPatchMem;
390 /** Current free pointer inside the patch block. */
391 RTGCPTR pFreeGuestPatchMem;
392 /** Size of the guest patch memory block. */
393 uint32_t cbGuestPatchMem;
394 uint32_t u32Alignment0;
395
396#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
397 /** 32 to 64 bits switcher entrypoint. */
398 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
399 RTR0PTR pvR0Alignment0;
400#endif
401
402 struct
403 {
404 /** Set by the ring-0 side of HM to indicate VMX is supported by the
405 * CPU. */
406 bool fSupported;
407 /** Set when we've enabled VMX. */
408 bool fEnabled;
409 /** Set if VPID is supported. */
410 bool fVpid;
411 /** Set if VT-x VPID is allowed. */
412 bool fAllowVpid;
413 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
414 bool fUnrestrictedGuest;
415 /** Set if unrestricted guest execution is allowed to be used. */
416 bool fAllowUnrestricted;
417 /** Whether we're using the preemption timer or not. */
418 bool fUsePreemptTimer;
419 /** The shift mask employed by the VMX-Preemption timer. */
420 uint8_t cPreemptTimerShift;
421
422 /** Virtual address of the TSS page used for real mode emulation. */
423 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
424 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
425 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
426
427 /** Physical address of the APIC-access page. */
428 RTHCPHYS HCPhysApicAccess;
429 /** R0 memory object for the APIC-access page. */
430 RTR0MEMOBJ hMemObjApicAccess;
431 /** Virtual address of the APIC-access page. */
432 R0PTRTYPE(uint8_t *) pbApicAccess;
433
434#ifdef VBOX_WITH_CRASHDUMP_MAGIC
435 RTHCPHYS HCPhysScratch;
436 RTR0MEMOBJ hMemObjScratch;
437 R0PTRTYPE(uint8_t *) pbScratch;
438#endif
439
440 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
441 uint32_t uFlushTaggedTlb;
442
443 /** Pause-loop exiting (PLE) gap in ticks. */
444 uint32_t cPleGapTicks;
445 /** Pause-loop exiting (PLE) window in ticks. */
446 uint32_t cPleWindowTicks;
447 uint32_t u32Alignment0;
448
449 /** Host CR4 value (set by ring-0 VMX init) */
450 uint64_t u64HostCr4;
451
452 /** Host EFER value (set by ring-0 VMX init) */
453 uint64_t u64HostEfer;
454 /** Whether the CPU supports VMCS fields for swapping EFER. */
455 bool fSupportsVmcsEfer;
456 uint8_t u8Alignment2[7];
457
458 /** VMX MSR values. */
459 VMXMSRS Msrs;
460
461 /** Flush types for invept & invvpid; they depend on capabilities. */
462 VMXFLUSHEPT enmFlushEpt;
463 VMXFLUSHVPID enmFlushVpid;
464
465 /** Host-physical address for a failing VMXON instruction. */
466 RTHCPHYS HCPhysVmxEnableError;
467 } vmx;
468
469 struct
470 {
471 /** Set by the ring-0 side of HM to indicate SVM is supported by the
472 * CPU. */
473 bool fSupported;
474 /** Set when we've enabled SVM. */
475 bool fEnabled;
476 /** Set if erratum 170 affects the AMD cpu. */
477 bool fAlwaysFlushTLB;
478 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
479 bool fIgnoreInUseError;
480 uint8_t u8Alignment0[4];
481
482 /** Physical address of the IO bitmap (12kb). */
483 RTHCPHYS HCPhysIOBitmap;
484 /** R0 memory object for the IO bitmap (12kb). */
485 RTR0MEMOBJ hMemObjIOBitmap;
486 /** Virtual address of the IO bitmap. */
487 R0PTRTYPE(void *) pvIOBitmap;
488
489 /* HWCR MSR (for diagnostics) */
490 uint64_t u64MsrHwcr;
491
492 /** SVM revision. */
493 uint32_t u32Rev;
494 /** SVM feature bits from cpuid 0x8000000a */
495 uint32_t u32Features;
496
497 /** Pause filter counter. */
498 uint16_t cPauseFilter;
499 /** Pause filter treshold in ticks. */
500 uint16_t cPauseFilterThresholdTicks;
501 uint32_t u32Alignment0;
502 } svm;
503
504 /**
505 * AVL tree with all patches (active or disabled) sorted by guest instruction
506 * address.
507 */
508 AVLOU32TREE PatchTree;
509 uint32_t cPatches;
510 HMTPRPATCH aPatches[64];
511
512 struct
513 {
514 uint32_t u32AMDFeatureECX;
515 uint32_t u32AMDFeatureEDX;
516 } cpuid;
517
518 /** Saved error from detection */
519 int32_t lLastError;
520
521 /** HMR0Init was run */
522 bool fHMR0Init;
523 bool u8Alignment1[3];
524
525 STAMCOUNTER StatTprPatchSuccess;
526 STAMCOUNTER StatTprPatchFailure;
527 STAMCOUNTER StatTprReplaceSuccessCr8;
528 STAMCOUNTER StatTprReplaceSuccessVmc;
529 STAMCOUNTER StatTprReplaceFailure;
530} HM;
531/** Pointer to HM VM instance data. */
532typedef HM *PHM;
533
534AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
535
536/* Maximum number of cached entries. */
537#define VMCSCACHE_MAX_ENTRY 128
538
539/**
540 * Structure for storing read and write VMCS actions.
541 */
542typedef struct VMCSCACHE
543{
544#ifdef VBOX_WITH_CRASHDUMP_MAGIC
545 /* Magic marker for searching in crash dumps. */
546 uint8_t aMagic[16];
547 uint64_t uMagic;
548 uint64_t u64TimeEntry;
549 uint64_t u64TimeSwitch;
550 uint64_t cResume;
551 uint64_t interPD;
552 uint64_t pSwitcher;
553 uint32_t uPos;
554 uint32_t idCpu;
555#endif
556 /* CR2 is saved here for EPT syncing. */
557 uint64_t cr2;
558 struct
559 {
560 uint32_t cValidEntries;
561 uint32_t uAlignment;
562 uint32_t aField[VMCSCACHE_MAX_ENTRY];
563 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
564 } Write;
565 struct
566 {
567 uint32_t cValidEntries;
568 uint32_t uAlignment;
569 uint32_t aField[VMCSCACHE_MAX_ENTRY];
570 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
571 } Read;
572#ifdef VBOX_STRICT
573 struct
574 {
575 RTHCPHYS HCPhysCpuPage;
576 RTHCPHYS HCPhysVmcs;
577 RTGCPTR pCache;
578 RTGCPTR pCtx;
579 } TestIn;
580 struct
581 {
582 RTHCPHYS HCPhysVmcs;
583 RTGCPTR pCache;
584 RTGCPTR pCtx;
585 uint64_t eflags;
586 uint64_t cr8;
587 } TestOut;
588 struct
589 {
590 uint64_t param1;
591 uint64_t param2;
592 uint64_t param3;
593 uint64_t param4;
594 } ScratchPad;
595#endif
596} VMCSCACHE;
597/** Pointer to VMCSCACHE. */
598typedef VMCSCACHE *PVMCSCACHE;
599AssertCompileSizeAlignment(VMCSCACHE, 8);
600
601/**
602 * VMX StartVM function.
603 *
604 * @returns VBox status code (no informational stuff).
605 * @param fResume Whether to use VMRESUME (true) or VMLAUNCH (false).
606 * @param pCtx The CPU register context.
607 * @param pCache The VMCS cache.
608 * @param pVM Pointer to the cross context VM structure.
609 * @param pVCpu Pointer to the cross context per-CPU structure.
610 */
611typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
612/** Pointer to a VMX StartVM function. */
613typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
614
615/** SVM VMRun function. */
616typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
617/** Pointer to a SVM VMRun function. */
618typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
619
620/**
621 * HM VMCPU Instance data.
622 *
623 * Note! If you change members of this struct, make sure to check if the
624 * assembly counterpart in HMInternal.mac needs to be updated as well.
625 */
626typedef struct HMCPU
627{
628 /** Set if we need to flush the TLB during the world switch. */
629 bool fForceTLBFlush;
630 /** Set when we're using VT-x or AMD-V at that moment. */
631 bool fActive;
632 /** Set when the TLB has been checked until we return from the world switch. */
633 volatile bool fCheckedTLBFlush;
634 /** Whether we're executing a single instruction. */
635 bool fSingleInstruction;
636 /** Whether we should use the debug loop because of single stepping or special
637 * debug breakpoints / events are armed. */
638 bool fUseDebugLoop;
639 /** Set if we need to clear the trap flag because of single stepping. */
640 bool fClearTrapFlag;
641 /** Whether we've completed the inner HM leave function. */
642 bool fLeaveDone;
643 /** Whether we're using the hyper DR7 or guest DR7. */
644 bool fUsingHyperDR7;
645 /** Whether to preload the guest-FPU state to avoid \#NM VM-exit overhead. */
646 bool fPreloadGuestFpu;
647 /** Set if XCR0 needs to be loaded and saved when entering and exiting guest
648 * code execution. */
649 bool fLoadSaveGuestXcr0;
650
651 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
652 bool fGIMTrapXcptUD;
653 /** Whether paravirt. hypercalls are enabled. */
654 bool fHypercallsEnabled;
655 uint8_t u8Alignment0[4];
656
657 /** World switch exit counter. */
658 volatile uint32_t cWorldSwitchExits;
659 /** HM_CHANGED_* flags. */
660 volatile uint32_t fContextUseFlags;
661 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
662 * time). */
663 RTCPUID idLastCpu;
664 /** TLB flush count. */
665 uint32_t cTlbFlushes;
666 /** Current ASID in use by the VM. */
667 uint32_t uCurrentAsid;
668 /** An additional error code used for some gurus. */
669 uint32_t u32HMError;
670 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
671 uint64_t u64HostTscAux;
672
673 struct
674 {
675 /** Ring 0 handlers for VT-x. */
676 PFNHMVMXSTARTVM pfnStartVM;
677#if HC_ARCH_BITS == 32
678 uint32_t u32Alignment0;
679#endif
680 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
681 uint32_t u32PinCtls;
682 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
683 uint32_t u32ProcCtls;
684 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
685 uint32_t u32ProcCtls2;
686 /** Current VMX_VMCS32_CTRL_EXIT. */
687 uint32_t u32ExitCtls;
688 /** Current VMX_VMCS32_CTRL_ENTRY. */
689 uint32_t u32EntryCtls;
690
691 /** Current CR0 mask. */
692 uint32_t u32CR0Mask;
693 /** Current CR4 mask. */
694 uint32_t u32CR4Mask;
695 /** Current exception bitmap. */
696 uint32_t u32XcptBitmap;
697 /** The updated-guest-state mask. */
698 volatile uint32_t fUpdatedGuestState;
699 uint32_t u32Alignment1;
700
701 /** Physical address of the VM control structure (VMCS). */
702 RTHCPHYS HCPhysVmcs;
703 /** R0 memory object for the VM control structure (VMCS). */
704 RTR0MEMOBJ hMemObjVmcs;
705 /** Virtual address of the VM control structure (VMCS). */
706 R0PTRTYPE(void *) pvVmcs;
707
708 /** Physical address of the virtual APIC page for TPR caching. */
709 RTHCPHYS HCPhysVirtApic;
710 /** R0 memory object for the virtual APIC page for TPR caching. */
711 RTR0MEMOBJ hMemObjVirtApic;
712 /** Virtual address of the virtual APIC page for TPR caching. */
713 R0PTRTYPE(uint8_t *) pbVirtApic;
714
715 /** Physical address of the MSR bitmap. */
716 RTHCPHYS HCPhysMsrBitmap;
717 /** R0 memory object for the MSR bitmap. */
718 RTR0MEMOBJ hMemObjMsrBitmap;
719 /** Virtual address of the MSR bitmap. */
720 R0PTRTYPE(void *) pvMsrBitmap;
721
722 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
723 * for guest MSRs). */
724 RTHCPHYS HCPhysGuestMsr;
725 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
726 * (used for guest MSRs). */
727 RTR0MEMOBJ hMemObjGuestMsr;
728 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
729 * for guest MSRs). */
730 R0PTRTYPE(void *) pvGuestMsr;
731
732 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
733 RTHCPHYS HCPhysHostMsr;
734 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
735 RTR0MEMOBJ hMemObjHostMsr;
736 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
737 R0PTRTYPE(void *) pvHostMsr;
738
739 /** Current EPTP. */
740 RTHCPHYS HCPhysEPTP;
741
742 /** Number of guest/host MSR pairs in the auto-load/store area. */
743 uint32_t cMsrs;
744 /** Whether the host MSR values are up-to-date in the auto-load/store area. */
745 bool fUpdatedHostMsrs;
746 uint8_t u8Alignment0[3];
747
748 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
749 uint64_t u64HostLStarMsr;
750 /** Host STAR MSR value to restore lazily while leaving VT-x. */
751 uint64_t u64HostStarMsr;
752 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
753 uint64_t u64HostSFMaskMsr;
754 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
755 uint64_t u64HostKernelGSBaseMsr;
756 /** A mask of which MSRs have been swapped and need restoration. */
757 uint32_t fLazyMsrs;
758 uint32_t u32Alignment2;
759
760 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
761 uint64_t u64MsrApicBase;
762 /** Last use TSC offset value. (cached) */
763 uint64_t u64TSCOffset;
764
765 /** VMCS cache. */
766 VMCSCACHE VMCSCache;
767
768 /** Real-mode emulation state. */
769 struct
770 {
771 X86DESCATTR AttrCS;
772 X86DESCATTR AttrDS;
773 X86DESCATTR AttrES;
774 X86DESCATTR AttrFS;
775 X86DESCATTR AttrGS;
776 X86DESCATTR AttrSS;
777 X86EFLAGS Eflags;
778 uint32_t fRealOnV86Active;
779 } RealMode;
780
781 /** VT-x error-reporting (mainly for ring-3 propagation). */
782 struct
783 {
784 uint64_t u64VMCSPhys;
785 uint32_t u32VMCSRevision;
786 uint32_t u32InstrError;
787 uint32_t u32ExitReason;
788 RTCPUID idEnteredCpu;
789 RTCPUID idCurrentCpu;
790 uint32_t u32Alignment0;
791 } LastError;
792
793 /** Current state of the VMCS. */
794 uint32_t uVmcsState;
795 /** Which host-state bits to restore before being preempted. */
796 uint32_t fRestoreHostFlags;
797 /** The host-state restoration structure. */
798 VMXRESTOREHOST RestoreHost;
799
800 /** Set if guest was executing in real mode (extra checks). */
801 bool fWasInRealMode;
802 uint8_t u8Alignment1[7];
803 } vmx;
804
805 struct
806 {
807 /** Ring 0 handlers for VT-x. */
808 PFNHMSVMVMRUN pfnVMRun;
809#if HC_ARCH_BITS == 32
810 uint32_t u32Alignment0;
811#endif
812
813 /** Physical address of the host VMCB which holds additional host-state. */
814 RTHCPHYS HCPhysVmcbHost;
815 /** R0 memory object for the host VMCB which holds additional host-state. */
816 RTR0MEMOBJ hMemObjVmcbHost;
817 /** Virtual address of the host VMCB which holds additional host-state. */
818 R0PTRTYPE(void *) pvVmcbHost;
819
820 /** Physical address of the guest VMCB. */
821 RTHCPHYS HCPhysVmcb;
822 /** R0 memory object for the guest VMCB. */
823 RTR0MEMOBJ hMemObjVmcb;
824 /** Virtual address of the guest VMCB. */
825 R0PTRTYPE(void *) pvVmcb;
826
827 /** Physical address of the MSR bitmap (8 KB). */
828 RTHCPHYS HCPhysMsrBitmap;
829 /** R0 memory object for the MSR bitmap (8 KB). */
830 RTR0MEMOBJ hMemObjMsrBitmap;
831 /** Virtual address of the MSR bitmap. */
832 R0PTRTYPE(void *) pvMsrBitmap;
833
834 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
835 * we should check if the VTPR changed on every VM-exit. */
836 bool fSyncVTpr;
837 uint8_t u8Alignment0[7];
838 } svm;
839
840 /** Event injection state. */
841 struct
842 {
843 uint32_t fPending;
844 uint32_t u32ErrCode;
845 uint32_t cbInstr;
846 uint32_t u32Padding; /**< Explicit alignment padding. */
847 uint64_t u64IntInfo;
848 RTGCUINTPTR GCPtrFaultAddress;
849 } Event;
850
851 /** IO Block emulation state. */
852 struct
853 {
854 bool fEnabled;
855 uint8_t u8Align[7];
856
857 /** RIP at the start of the io code we wish to emulate in the recompiler. */
858 RTGCPTR GCPtrFunctionEip;
859
860 uint64_t cr0;
861 } EmulateIoBlock;
862
863 struct
864 {
865 /** Pending IO operation type. */
866 HMPENDINGIO enmType;
867 uint32_t u32Alignment0;
868 RTGCPTR GCPtrRip;
869 RTGCPTR GCPtrRipNext;
870 union
871 {
872 struct
873 {
874 uint32_t uPort;
875 uint32_t uAndVal;
876 uint32_t cbSize;
877 } Port;
878 uint64_t aRaw[2];
879 } s;
880 } PendingIO;
881
882 /** The PAE PDPEs used with Nested Paging (only valid when
883 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
884 X86PDPE aPdpes[4];
885
886 /** Current shadow paging mode. */
887 PGMMODE enmShadowMode;
888
889 /** The CPU ID of the CPU currently owning the VMCS. Set in
890 * HMR0Enter and cleared in HMR0Leave. */
891 RTCPUID idEnteredCpu;
892
893 /** VT-x/AMD-V VM-exit/\#VMXEXIT history, circular array. */
894 uint16_t auExitHistory[31];
895 /** The index of the next free slot in the history array. */
896 uint16_t idxExitHistoryFree;
897
898 /** For saving stack space, the disassembler state is allocated here instead of
899 * on the stack. */
900 DISCPUSTATE DisState;
901
902 STAMPROFILEADV StatEntry;
903 STAMPROFILEADV StatExit1;
904 STAMPROFILEADV StatExit2;
905 STAMPROFILEADV StatExitIO;
906 STAMPROFILEADV StatExitMovCRx;
907 STAMPROFILEADV StatExitXcptNmi;
908 STAMPROFILEADV StatLoadGuestState;
909 STAMPROFILEADV StatInGC;
910
911#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
912 STAMPROFILEADV StatWorldSwitch3264;
913#endif
914 STAMPROFILEADV StatPoke;
915 STAMPROFILEADV StatSpinPoke;
916 STAMPROFILEADV StatSpinPokeFailed;
917
918 STAMCOUNTER StatInjectInterrupt;
919 STAMCOUNTER StatInjectXcpt;
920 STAMCOUNTER StatInjectPendingReflect;
921
922 STAMCOUNTER StatExitAll;
923 STAMCOUNTER StatExitShadowNM;
924 STAMCOUNTER StatExitGuestNM;
925 STAMCOUNTER StatExitShadowPF; /**< Misleading, currently used for MMIO \#PFs as well. */
926 STAMCOUNTER StatExitShadowPFEM;
927 STAMCOUNTER StatExitGuestPF;
928 STAMCOUNTER StatExitGuestUD;
929 STAMCOUNTER StatExitGuestSS;
930 STAMCOUNTER StatExitGuestNP;
931 STAMCOUNTER StatExitGuestTS;
932 STAMCOUNTER StatExitGuestGP;
933 STAMCOUNTER StatExitGuestDE;
934 STAMCOUNTER StatExitGuestDB;
935 STAMCOUNTER StatExitGuestMF;
936 STAMCOUNTER StatExitGuestBP;
937 STAMCOUNTER StatExitGuestXF;
938 STAMCOUNTER StatExitGuestXcpUnk;
939 STAMCOUNTER StatExitInvlpg;
940 STAMCOUNTER StatExitInvd;
941 STAMCOUNTER StatExitWbinvd;
942 STAMCOUNTER StatExitPause;
943 STAMCOUNTER StatExitCpuid;
944 STAMCOUNTER StatExitRdtsc;
945 STAMCOUNTER StatExitRdtscp;
946 STAMCOUNTER StatExitRdpmc;
947 STAMCOUNTER StatExitVmcall;
948 STAMCOUNTER StatExitRdrand;
949 STAMCOUNTER StatExitCli;
950 STAMCOUNTER StatExitSti;
951 STAMCOUNTER StatExitPushf;
952 STAMCOUNTER StatExitPopf;
953 STAMCOUNTER StatExitIret;
954 STAMCOUNTER StatExitInt;
955 STAMCOUNTER StatExitCRxWrite[16];
956 STAMCOUNTER StatExitCRxRead[16];
957 STAMCOUNTER StatExitDRxWrite;
958 STAMCOUNTER StatExitDRxRead;
959 STAMCOUNTER StatExitRdmsr;
960 STAMCOUNTER StatExitWrmsr;
961 STAMCOUNTER StatExitClts;
962 STAMCOUNTER StatExitXdtrAccess;
963 STAMCOUNTER StatExitHlt;
964 STAMCOUNTER StatExitMwait;
965 STAMCOUNTER StatExitMonitor;
966 STAMCOUNTER StatExitLmsw;
967 STAMCOUNTER StatExitIOWrite;
968 STAMCOUNTER StatExitIORead;
969 STAMCOUNTER StatExitIOStringWrite;
970 STAMCOUNTER StatExitIOStringRead;
971 STAMCOUNTER StatExitIntWindow;
972 STAMCOUNTER StatExitExtInt;
973 STAMCOUNTER StatExitHostNmiInGC;
974 STAMCOUNTER StatExitPreemptTimer;
975 STAMCOUNTER StatExitTprBelowThreshold;
976 STAMCOUNTER StatExitTaskSwitch;
977 STAMCOUNTER StatExitMtf;
978 STAMCOUNTER StatExitApicAccess;
979 STAMCOUNTER StatPendingHostIrq;
980
981 STAMCOUNTER StatFlushPage;
982 STAMCOUNTER StatFlushPageManual;
983 STAMCOUNTER StatFlushPhysPageManual;
984 STAMCOUNTER StatFlushTlb;
985 STAMCOUNTER StatFlushTlbManual;
986 STAMCOUNTER StatFlushTlbWorldSwitch;
987 STAMCOUNTER StatNoFlushTlbWorldSwitch;
988 STAMCOUNTER StatFlushEntire;
989 STAMCOUNTER StatFlushAsid;
990 STAMCOUNTER StatFlushNestedPaging;
991 STAMCOUNTER StatFlushTlbInvlpgVirt;
992 STAMCOUNTER StatFlushTlbInvlpgPhys;
993 STAMCOUNTER StatTlbShootdown;
994 STAMCOUNTER StatTlbShootdownFlush;
995
996 STAMCOUNTER StatSwitchGuestIrq;
997 STAMCOUNTER StatSwitchHmToR3FF;
998 STAMCOUNTER StatSwitchExitToR3;
999 STAMCOUNTER StatSwitchLongJmpToR3;
1000 STAMCOUNTER StatSwitchMaxResumeLoops;
1001 STAMCOUNTER StatSwitchHltToR3;
1002 STAMCOUNTER StatSwitchApicAccessToR3;
1003 STAMCOUNTER StatSwitchPreempt;
1004 STAMCOUNTER StatSwitchPreemptSaveHostState;
1005
1006 STAMCOUNTER StatTscParavirt;
1007 STAMCOUNTER StatTscOffset;
1008 STAMCOUNTER StatTscIntercept;
1009
1010 STAMCOUNTER StatExitReasonNpf;
1011 STAMCOUNTER StatDRxArmed;
1012 STAMCOUNTER StatDRxContextSwitch;
1013 STAMCOUNTER StatDRxIoCheck;
1014
1015 STAMCOUNTER StatLoadMinimal;
1016 STAMCOUNTER StatLoadFull;
1017
1018 STAMCOUNTER StatVmxCheckBadRmSelBase;
1019 STAMCOUNTER StatVmxCheckBadRmSelLimit;
1020 STAMCOUNTER StatVmxCheckRmOk;
1021
1022 STAMCOUNTER StatVmxCheckBadSel;
1023 STAMCOUNTER StatVmxCheckBadRpl;
1024 STAMCOUNTER StatVmxCheckBadLdt;
1025 STAMCOUNTER StatVmxCheckBadTr;
1026 STAMCOUNTER StatVmxCheckPmOk;
1027
1028#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1029 STAMCOUNTER StatFpu64SwitchBack;
1030 STAMCOUNTER StatDebug64SwitchBack;
1031#endif
1032
1033#ifdef VBOX_WITH_STATISTICS
1034 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
1035 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
1036 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
1037 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
1038#endif
1039#ifdef HM_PROFILE_EXIT_DISPATCH
1040 STAMPROFILEADV StatExitDispatch;
1041#endif
1042} HMCPU;
1043/** Pointer to HM VMCPU instance data. */
1044typedef HMCPU *PHMCPU;
1045AssertCompileMemberAlignment(HMCPU, vmx, 8);
1046AssertCompileMemberAlignment(HMCPU, svm, 8);
1047AssertCompileMemberAlignment(HMCPU, Event, 8);
1048
1049
1050#ifdef IN_RING0
1051/** @todo r=bird: s/[[:space:]]HM/ hm/ - internal functions starts with a
1052 * lower cased prefix. HMInternal.h is an internal header, so
1053 * everything here must be internal. */
1054VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
1055VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
1056
1057
1058# ifdef VBOX_STRICT
1059VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
1060VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
1061# else
1062# define HMDumpRegs(a, b ,c) do { } while (0)
1063# define HMR0DumpDescriptor(a, b, c) do { } while (0)
1064# endif /* VBOX_STRICT */
1065
1066# ifdef VBOX_WITH_KERNEL_USING_XMM
1067DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1068DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1069# endif
1070
1071#endif /* IN_RING0 */
1072
1073/** @} */
1074
1075RT_C_DECLS_END
1076
1077#endif
1078
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette