VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 53349

Last change on this file since 53349 was 52766, checked in by vboxsync, 10 years ago

VMM/HM: Fixing source of ambiguity.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.5 KB
Line 
1/* $Id: HMInternal.h 52766 2014-09-16 16:21:44Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34#include <iprt/string.h>
35
36#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
37/* Enable 64 bits guest support. */
38# define VBOX_ENABLE_64_BITS_GUESTS
39#endif
40
41#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
42# define VMX_USE_CACHED_VMCS_ACCESSES
43#endif
44
45/** @def HM_PROFILE_EXIT_DISPATCH
46 * Enables profiling of the VM exit handler dispatching. */
47#if 0
48# define HM_PROFILE_EXIT_DISPATCH
49#endif
50
51RT_C_DECLS_BEGIN
52
53
54/** @defgroup grp_hm_int Internal
55 * @ingroup grp_hm
56 * @internal
57 * @{
58 */
59
60/** @def HMCPU_CF_CLEAR
61 * Clears a HM-context flag.
62 *
63 * @param pVCpu Pointer to the VMCPU.
64 * @param fFlag The flag to clear.
65 */
66#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
67
68/** @def HMCPU_CF_SET
69 * Sets a HM-context flag.
70 *
71 * @param pVCpu Pointer to the VMCPU.
72 * @param fFlag The flag to set.
73 */
74#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
75
76/** @def HMCPU_CF_IS_SET
77 * Checks if all the flags in the specified HM-context set is pending.
78 *
79 * @param pVCpu Pointer to the VMCPU.
80 * @param fFlag The flag to check.
81 */
82#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
83
84/** @def HMCPU_CF_IS_PENDING
85 * Checks if one or more of the flags in the specified HM-context set is
86 * pending.
87 *
88 * @param pVCpu Pointer to the VMCPU.
89 * @param fFlags The flags to check for.
90 */
91#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
92
93/** @def HMCPU_CF_IS_PENDING_ONLY
94 * Checks if -only- one or more of the specified HM-context flags is pending.
95 *
96 * @param pVCpu Pointer to the VMCPU.
97 * @param fFlags The flags to check for.
98 */
99#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
100
101/** @def HMCPU_CF_IS_SET_ONLY
102 * Checks if -only- all the flags in the specified HM-context set is pending.
103 *
104 * @param pVCpu Pointer to the VMCPU.
105 * @param fFlags The flags to check for.
106 */
107#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
108
109/** @def HMCPU_CF_RESET_TO
110 * Resets the HM-context flags to the specified value.
111 *
112 * @param pVCpu Pointer to the VMCPU.
113 * @param fFlags The new value.
114 */
115#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
116
117/** @def HMCPU_CF_VALUE
118 * Returns the current HM-context flags value.
119 *
120 * @param pVCpu Pointer to the VMCPU.
121 */
122#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
123
124
125/** Resets/initializes the VM-exit/#VMEXIT history array. */
126#define HMCPU_EXIT_HISTORY_RESET(pVCpu) (memset(&(pVCpu)->hm.s.auExitHistory, 0xff, sizeof((pVCpu)->hm.s.auExitHistory)))
127
128/** Updates the VM-exit/#VMEXIT history array. */
129#define HMCPU_EXIT_HISTORY_ADD(pVCpu, a_ExitReason) \
130 do { \
131 AssertMsg((pVCpu)->hm.s.idxExitHistoryFree < RT_ELEMENTS((pVCpu)->hm.s.auExitHistory), ("%u\n", (pVCpu)->hm.s.idxExitHistoryFree)); \
132 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree++] = (uint16_t)(a_ExitReason); \
133 if ((pVCpu)->hm.s.idxExitHistoryFree == RT_ELEMENTS((pVCpu)->hm.s.auExitHistory)) \
134 (pVCpu)->hm.s.idxExitHistoryFree = 0; \
135 (pVCpu)->hm.s.auExitHistory[(pVCpu)->hm.s.idxExitHistoryFree] = UINT16_MAX; \
136 } while (0)
137
138/** Maximum number of exit reason statistics counters. */
139#define MAX_EXITREASON_STAT 0x100
140#define MASK_EXITREASON_STAT 0xff
141#define MASK_INJECT_IRQ_STAT 0xff
142
143/** @name HM changed flags.
144 * These flags are used to keep track of which important registers that
145 * have been changed since last they were reset.
146 * @{
147 */
148#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
149#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
150#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
151#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
152#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
153#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
154#define HM_CHANGED_GUEST_TR RT_BIT(6)
155#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
156#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
157#define HM_CHANGED_GUEST_RIP RT_BIT(9)
158#define HM_CHANGED_GUEST_RSP RT_BIT(10)
159#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
160#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
161#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
162#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
163#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
164#define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16)
165#define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */
166/* VT-x specific state. */
167#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(18)
168#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(19)
169#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(20)
170#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(21)
171#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(22)
172/* AMD-V specific state. */
173#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(18)
174#define HM_CHANGED_SVM_RESERVED1 RT_BIT(19)
175#define HM_CHANGED_SVM_RESERVED2 RT_BIT(20)
176#define HM_CHANGED_SVM_RESERVED3 RT_BIT(21)
177#define HM_CHANGED_SVM_RESERVED4 RT_BIT(22)
178
179#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
180 | HM_CHANGED_GUEST_CR3 \
181 | HM_CHANGED_GUEST_CR4 \
182 | HM_CHANGED_GUEST_GDTR \
183 | HM_CHANGED_GUEST_IDTR \
184 | HM_CHANGED_GUEST_LDTR \
185 | HM_CHANGED_GUEST_TR \
186 | HM_CHANGED_GUEST_SEGMENT_REGS \
187 | HM_CHANGED_GUEST_DEBUG \
188 | HM_CHANGED_GUEST_RIP \
189 | HM_CHANGED_GUEST_RSP \
190 | HM_CHANGED_GUEST_RFLAGS \
191 | HM_CHANGED_GUEST_CR2 \
192 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
193 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
194 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
195 | HM_CHANGED_GUEST_EFER_MSR \
196 | HM_CHANGED_GUEST_LAZY_MSRS \
197 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
198 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
199 | HM_CHANGED_VMX_GUEST_APIC_STATE \
200 | HM_CHANGED_VMX_ENTRY_CTLS \
201 | HM_CHANGED_VMX_EXIT_CTLS)
202
203#define HM_CHANGED_HOST_CONTEXT RT_BIT(23)
204
205/* Bits shared between host and guest. */
206#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
207 | HM_CHANGED_GUEST_DEBUG \
208 | HM_CHANGED_GUEST_LAZY_MSRS)
209/** @} */
210
211/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
212#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
213
214/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
215#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
216/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
217#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
218/** Total guest mapped memory needed. */
219#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
220
221/** Enable for TPR guest patching. */
222#define VBOX_HM_WITH_GUEST_PATCHING
223
224/** HM SSM version
225 */
226#ifdef VBOX_HM_WITH_GUEST_PATCHING
227# define HM_SAVED_STATE_VERSION 5
228# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
229#else
230# define HM_SAVED_STATE_VERSION 4
231# define HM_SAVED_STATE_VERSION_NO_PATCHING 4
232#endif
233#define HM_SAVED_STATE_VERSION_2_0_X 3
234
235/**
236 * Global per-cpu information. (host)
237 */
238typedef struct HMGLOBALCPUINFO
239{
240 /** The CPU ID. */
241 RTCPUID idCpu;
242 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
243 RTR0MEMOBJ hMemObj;
244 /** Current ASID (AMD-V) / VPID (Intel). */
245 uint32_t uCurrentAsid;
246 /** TLB flush count. */
247 uint32_t cTlbFlushes;
248 /** Whether to flush each new ASID/VPID before use. */
249 bool fFlushAsidBeforeUse;
250 /** Configured for VT-x or AMD-V. */
251 bool fConfigured;
252 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
253 bool fIgnoreAMDVInUseError;
254 /** In use by our code. (for power suspend) */
255 volatile bool fInUse;
256} HMGLOBALCPUINFO;
257/** Pointer to the per-cpu global information. */
258typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
259
260typedef enum
261{
262 HMPENDINGIO_INVALID = 0,
263 HMPENDINGIO_PORT_READ,
264 HMPENDINGIO_PORT_WRITE,
265 HMPENDINGIO_STRING_READ,
266 HMPENDINGIO_STRING_WRITE,
267 /** The usual 32-bit paranoia. */
268 HMPENDINGIO_32BIT_HACK = 0x7fffffff
269} HMPENDINGIO;
270
271
272typedef enum
273{
274 HMTPRINSTR_INVALID,
275 HMTPRINSTR_READ,
276 HMTPRINSTR_READ_SHR4,
277 HMTPRINSTR_WRITE_REG,
278 HMTPRINSTR_WRITE_IMM,
279 HMTPRINSTR_JUMP_REPLACEMENT,
280 /** The usual 32-bit paranoia. */
281 HMTPRINSTR_32BIT_HACK = 0x7fffffff
282} HMTPRINSTR;
283
284typedef struct
285{
286 /** The key is the address of patched instruction. (32 bits GC ptr) */
287 AVLOU32NODECORE Core;
288 /** Original opcode. */
289 uint8_t aOpcode[16];
290 /** Instruction size. */
291 uint32_t cbOp;
292 /** Replacement opcode. */
293 uint8_t aNewOpcode[16];
294 /** Replacement instruction size. */
295 uint32_t cbNewOp;
296 /** Instruction type. */
297 HMTPRINSTR enmType;
298 /** Source operand. */
299 uint32_t uSrcOperand;
300 /** Destination operand. */
301 uint32_t uDstOperand;
302 /** Number of times the instruction caused a fault. */
303 uint32_t cFaults;
304 /** Patch address of the jump replacement. */
305 RTGCPTR32 pJumpTarget;
306} HMTPRPATCH;
307/** Pointer to HMTPRPATCH. */
308typedef HMTPRPATCH *PHMTPRPATCH;
309
310/**
311 * Switcher function, HC to the special 64-bit RC.
312 *
313 * @param pVM Pointer to the VM.
314 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
315 * @returns Return code indicating the action to take.
316 */
317typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
318/** Pointer to switcher function. */
319typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
320
321/**
322 * HM VM Instance data.
323 * Changes to this must checked against the padding of the hm union in VM!
324 */
325typedef struct HM
326{
327 /** Set when we've initialized VMX or SVM. */
328 bool fInitialized;
329 /** Set if nested paging is enabled. */
330 bool fNestedPaging;
331 /** Set if nested paging is allowed. */
332 bool fAllowNestedPaging;
333 /** Set if large pages are enabled (requires nested paging). */
334 bool fLargePages;
335 /** Set if we can support 64-bit guests or not. */
336 bool fAllow64BitGuests;
337 /** Set if an IO-APIC is configured for this VM. */
338 bool fHasIoApic;
339 /** Set when TPR patching is allowed. */
340 bool fTprPatchingAllowed;
341 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
342 bool fGlobalInit;
343 /** Set when TPR patching is active. */
344 bool fTPRPatchingActive;
345 bool u8Alignment[3];
346
347 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
348 uint32_t uHostKernelFeatures;
349
350 /** Maximum ASID allowed. */
351 uint32_t uMaxAsid;
352 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
353 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
354 uint32_t cMaxResumeLoops;
355
356 /** Guest allocated memory for patching purposes. */
357 RTGCPTR pGuestPatchMem;
358 /** Current free pointer inside the patch block. */
359 RTGCPTR pFreeGuestPatchMem;
360 /** Size of the guest patch memory block. */
361 uint32_t cbGuestPatchMem;
362 uint32_t u32Alignment0;
363
364#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
365 /** 32 to 64 bits switcher entrypoint. */
366 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
367 RTR0PTR pvR0Alignment0;
368#endif
369
370 struct
371 {
372 /** Set by the ring-0 side of HM to indicate VMX is supported by the
373 * CPU. */
374 bool fSupported;
375 /** Set when we've enabled VMX. */
376 bool fEnabled;
377 /** Set if VPID is supported. */
378 bool fVpid;
379 /** Set if VT-x VPID is allowed. */
380 bool fAllowVpid;
381 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
382 bool fUnrestrictedGuest;
383 /** Set if unrestricted guest execution is allowed to be used. */
384 bool fAllowUnrestricted;
385 /** Whether we're using the preemption timer or not. */
386 bool fUsePreemptTimer;
387 /** The shift mask employed by the VMX-Preemption timer. */
388 uint8_t cPreemptTimerShift;
389
390 /** Virtual address of the TSS page used for real mode emulation. */
391 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
392 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
393 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
394
395 /** Physical address of the APIC-access page. */
396 RTHCPHYS HCPhysApicAccess;
397 /** R0 memory object for the APIC-access page. */
398 RTR0MEMOBJ hMemObjApicAccess;
399 /** Virtual address of the APIC-access page. */
400 R0PTRTYPE(uint8_t *) pbApicAccess;
401
402#ifdef VBOX_WITH_CRASHDUMP_MAGIC
403 RTHCPHYS HCPhysScratch;
404 RTR0MEMOBJ hMemObjScratch;
405 R0PTRTYPE(uint8_t *) pbScratch;
406#endif
407
408 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
409 uint32_t uFlushTaggedTlb;
410 uint32_t u32Alignment0;
411 /** Host CR4 value (set by ring-0 VMX init) */
412 uint64_t u64HostCr4;
413
414 /** Host EFER value (set by ring-0 VMX init) */
415 uint64_t u64HostEfer;
416 /** Whether the CPU supports VMCS fields for swapping EFER. */
417 bool fSupportsVmcsEfer;
418 uint8_t u8Alignment2[7];
419
420 /** VMX MSR values */
421 VMXMSRS Msrs;
422
423 /** Flush types for invept & invvpid; they depend on capabilities. */
424 VMXFLUSHEPT enmFlushEpt;
425 VMXFLUSHVPID enmFlushVpid;
426 } vmx;
427
428 struct
429 {
430 /** Set by the ring-0 side of HM to indicate SVM is supported by the
431 * CPU. */
432 bool fSupported;
433 /** Set when we've enabled SVM. */
434 bool fEnabled;
435 /** Set if erratum 170 affects the AMD cpu. */
436 bool fAlwaysFlushTLB;
437 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
438 bool fIgnoreInUseError;
439 uint8_t u8Alignment0[4];
440
441 /** Physical address of the IO bitmap (12kb). */
442 RTHCPHYS HCPhysIOBitmap;
443 /** R0 memory object for the IO bitmap (12kb). */
444 RTR0MEMOBJ hMemObjIOBitmap;
445 /** Virtual address of the IO bitmap. */
446 R0PTRTYPE(void *) pvIOBitmap;
447
448 /* HWCR MSR (for diagnostics) */
449 uint64_t u64MsrHwcr;
450
451 /** SVM revision. */
452 uint32_t u32Rev;
453 /** SVM feature bits from cpuid 0x8000000a */
454 uint32_t u32Features;
455 } svm;
456
457 /**
458 * AVL tree with all patches (active or disabled) sorted by guest instruction
459 * address.
460 */
461 AVLOU32TREE PatchTree;
462 uint32_t cPatches;
463 HMTPRPATCH aPatches[64];
464
465 struct
466 {
467 uint32_t u32AMDFeatureECX;
468 uint32_t u32AMDFeatureEDX;
469 } cpuid;
470
471 /** Saved error from detection */
472 int32_t lLastError;
473
474 /** HMR0Init was run */
475 bool fHMR0Init;
476 bool u8Alignment1[7];
477
478 STAMCOUNTER StatTprPatchSuccess;
479 STAMCOUNTER StatTprPatchFailure;
480 STAMCOUNTER StatTprReplaceSuccess;
481 STAMCOUNTER StatTprReplaceFailure;
482} HM;
483/** Pointer to HM VM instance data. */
484typedef HM *PHM;
485
486/* Maximum number of cached entries. */
487#define VMCSCACHE_MAX_ENTRY 128
488
489/**
490 * Structure for storing read and write VMCS actions.
491 */
492typedef struct VMCSCACHE
493{
494#ifdef VBOX_WITH_CRASHDUMP_MAGIC
495 /* Magic marker for searching in crash dumps. */
496 uint8_t aMagic[16];
497 uint64_t uMagic;
498 uint64_t u64TimeEntry;
499 uint64_t u64TimeSwitch;
500 uint64_t cResume;
501 uint64_t interPD;
502 uint64_t pSwitcher;
503 uint32_t uPos;
504 uint32_t idCpu;
505#endif
506 /* CR2 is saved here for EPT syncing. */
507 uint64_t cr2;
508 struct
509 {
510 uint32_t cValidEntries;
511 uint32_t uAlignment;
512 uint32_t aField[VMCSCACHE_MAX_ENTRY];
513 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
514 } Write;
515 struct
516 {
517 uint32_t cValidEntries;
518 uint32_t uAlignment;
519 uint32_t aField[VMCSCACHE_MAX_ENTRY];
520 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
521 } Read;
522#ifdef VBOX_STRICT
523 struct
524 {
525 RTHCPHYS HCPhysCpuPage;
526 RTHCPHYS HCPhysVmcs;
527 RTGCPTR pCache;
528 RTGCPTR pCtx;
529 } TestIn;
530 struct
531 {
532 RTHCPHYS HCPhysVmcs;
533 RTGCPTR pCache;
534 RTGCPTR pCtx;
535 uint64_t eflags;
536 uint64_t cr8;
537 } TestOut;
538 struct
539 {
540 uint64_t param1;
541 uint64_t param2;
542 uint64_t param3;
543 uint64_t param4;
544 } ScratchPad;
545#endif
546} VMCSCACHE;
547/** Pointer to VMCSCACHE. */
548typedef VMCSCACHE *PVMCSCACHE;
549AssertCompileSizeAlignment(VMCSCACHE, 8);
550
551/** VMX StartVM function. */
552typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
553/** Pointer to a VMX StartVM function. */
554typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
555
556/** SVM VMRun function. */
557typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
558/** Pointer to a SVM VMRun function. */
559typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
560
561/**
562 * HM VMCPU Instance data.
563 */
564typedef struct HMCPU
565{
566 /** Set if we need to flush the TLB during the world switch. */
567 bool fForceTLBFlush;
568 /** Set when we're using VT-x or AMD-V at that moment. */
569 bool fActive;
570 /** Set when the TLB has been checked until we return from the world switch. */
571 volatile bool fCheckedTLBFlush;
572 /** Whether we're executing a single instruction. */
573 bool fSingleInstruction;
574 /** Set if we need to clear the trap flag because of single stepping. */
575 bool fClearTrapFlag;
576 /** Whether we've completed the inner HM leave function. */
577 bool fLeaveDone;
578 /** Whether we're using the hyper DR7 or guest DR7. */
579 bool fUsingHyperDR7;
580 /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */
581 bool fUseGuestFpu;
582
583 /** World switch exit counter. */
584 volatile uint32_t cWorldSwitchExits;
585 /** HM_CHANGED_* flags. */
586 volatile uint32_t fContextUseFlags;
587 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
588 * time). */
589 RTCPUID idLastCpu;
590 /** TLB flush count. */
591 uint32_t cTlbFlushes;
592 /** Current ASID in use by the VM. */
593 uint32_t uCurrentAsid;
594 /** An additional error code used for some gurus. */
595 uint32_t u32HMError;
596 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
597 uint64_t u64HostTscAux;
598
599 struct
600 {
601 /** Ring 0 handlers for VT-x. */
602 PFNHMVMXSTARTVM pfnStartVM;
603#if HC_ARCH_BITS == 32
604 uint32_t u32Alignment0;
605#endif
606 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
607 uint32_t u32PinCtls;
608 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
609 uint32_t u32ProcCtls;
610 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
611 uint32_t u32ProcCtls2;
612 /** Current VMX_VMCS32_CTRL_EXIT. */
613 uint32_t u32ExitCtls;
614 /** Current VMX_VMCS32_CTRL_ENTRY. */
615 uint32_t u32EntryCtls;
616
617 /** Current CR0 mask. */
618 uint32_t u32CR0Mask;
619 /** Current CR4 mask. */
620 uint32_t u32CR4Mask;
621 /** Current exception bitmap. */
622 uint32_t u32XcptBitmap;
623 /** The updated-guest-state mask. */
624 volatile uint32_t fUpdatedGuestState;
625 uint32_t u32Alignment1;
626
627 /** Physical address of the VM control structure (VMCS). */
628 RTHCPHYS HCPhysVmcs;
629 /** R0 memory object for the VM control structure (VMCS). */
630 RTR0MEMOBJ hMemObjVmcs;
631 /** Virtual address of the VM control structure (VMCS). */
632 R0PTRTYPE(void *) pvVmcs;
633
634 /** Physical address of the virtual APIC page for TPR caching. */
635 RTHCPHYS HCPhysVirtApic;
636 /** R0 memory object for the virtual APIC page for TPR caching. */
637 RTR0MEMOBJ hMemObjVirtApic;
638 /** Virtual address of the virtual APIC page for TPR caching. */
639 R0PTRTYPE(uint8_t *) pbVirtApic;
640
641 /** Physical address of the MSR bitmap. */
642 RTHCPHYS HCPhysMsrBitmap;
643 /** R0 memory object for the MSR bitmap. */
644 RTR0MEMOBJ hMemObjMsrBitmap;
645 /** Virtual address of the MSR bitmap. */
646 R0PTRTYPE(void *) pvMsrBitmap;
647
648 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
649 * for guest MSRs). */
650 RTHCPHYS HCPhysGuestMsr;
651 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
652 * (used for guest MSRs). */
653 RTR0MEMOBJ hMemObjGuestMsr;
654 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
655 * for guest MSRs). */
656 R0PTRTYPE(void *) pvGuestMsr;
657
658 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
659 RTHCPHYS HCPhysHostMsr;
660 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
661 RTR0MEMOBJ hMemObjHostMsr;
662 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
663 R0PTRTYPE(void *) pvHostMsr;
664
665 /** Current EPTP. */
666 RTHCPHYS HCPhysEPTP;
667
668 /** Number of guest/host MSR pairs in the auto-load/store area. */
669 uint32_t cMsrs;
670 /** Whether the host MSR values are up-to-date in the auto-load/store area. */
671 bool fUpdatedHostMsrs;
672 uint8_t u8Alignment0[3];
673
674 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
675 uint64_t u64HostLStarMsr;
676 /** Host STAR MSR value to restore lazily while leaving VT-x. */
677 uint64_t u64HostStarMsr;
678 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
679 uint64_t u64HostSFMaskMsr;
680 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
681 uint64_t u64HostKernelGSBaseMsr;
682 /** A mask of which MSRs have been swapped and need restoration. */
683 uint32_t fLazyMsrs;
684 uint32_t u32Alignment2;
685
686 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
687 uint64_t u64MsrApicBase;
688 /** Last use TSC offset value. (cached) */
689 uint64_t u64TSCOffset;
690
691 /** VMCS cache. */
692 VMCSCACHE VMCSCache;
693
694 /** Real-mode emulation state. */
695 struct
696 {
697 X86DESCATTR AttrCS;
698 X86DESCATTR AttrDS;
699 X86DESCATTR AttrES;
700 X86DESCATTR AttrFS;
701 X86DESCATTR AttrGS;
702 X86DESCATTR AttrSS;
703 X86EFLAGS Eflags;
704 uint32_t fRealOnV86Active;
705 } RealMode;
706
707 /** VT-x error-reporting (mainly for ring-3 propagation). */
708 struct
709 {
710 uint64_t u64VMCSPhys;
711 uint32_t u32VMCSRevision;
712 uint32_t u32InstrError;
713 uint32_t u32ExitReason;
714 RTCPUID idEnteredCpu;
715 RTCPUID idCurrentCpu;
716 uint32_t u32Alignment0;
717 } LastError;
718
719 /** Current state of the VMCS. */
720 uint32_t uVmcsState;
721 /** Which host-state bits to restore before being preempted. */
722 uint32_t fRestoreHostFlags;
723 /** The host-state restoration structure. */
724 VMXRESTOREHOST RestoreHost;
725
726 /** Set if guest was executing in real mode (extra checks). */
727 bool fWasInRealMode;
728 uint8_t u8Alignment1[7];
729 } vmx;
730
731 struct
732 {
733 /** Ring 0 handlers for VT-x. */
734 PFNHMSVMVMRUN pfnVMRun;
735#if HC_ARCH_BITS == 32
736 uint32_t u32Alignment0;
737#endif
738
739 /** Physical address of the host VMCB which holds additional host-state. */
740 RTHCPHYS HCPhysVmcbHost;
741 /** R0 memory object for the host VMCB which holds additional host-state. */
742 RTR0MEMOBJ hMemObjVmcbHost;
743 /** Virtual address of the host VMCB which holds additional host-state. */
744 R0PTRTYPE(void *) pvVmcbHost;
745
746 /** Physical address of the guest VMCB. */
747 RTHCPHYS HCPhysVmcb;
748 /** R0 memory object for the guest VMCB. */
749 RTR0MEMOBJ hMemObjVmcb;
750 /** Virtual address of the guest VMCB. */
751 R0PTRTYPE(void *) pvVmcb;
752
753 /** Physical address of the MSR bitmap (8 KB). */
754 RTHCPHYS HCPhysMsrBitmap;
755 /** R0 memory object for the MSR bitmap (8 KB). */
756 RTR0MEMOBJ hMemObjMsrBitmap;
757 /** Virtual address of the MSR bitmap. */
758 R0PTRTYPE(void *) pvMsrBitmap;
759
760 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
761 * we should check if the VTPR changed on every VM-exit. */
762 bool fSyncVTpr;
763 uint8_t u8Alignment0[7];
764 } svm;
765
766 /** Event injection state. */
767 struct
768 {
769 uint32_t fPending;
770 uint32_t u32ErrCode;
771 uint32_t cbInstr;
772 uint32_t u32Padding; /**< Explicit alignment padding. */
773 uint64_t u64IntInfo;
774 RTGCUINTPTR GCPtrFaultAddress;
775 } Event;
776
777 /** IO Block emulation state. */
778 struct
779 {
780 bool fEnabled;
781 uint8_t u8Align[7];
782
783 /** RIP at the start of the io code we wish to emulate in the recompiler. */
784 RTGCPTR GCPtrFunctionEip;
785
786 uint64_t cr0;
787 } EmulateIoBlock;
788
789 struct
790 {
791 /** Pending IO operation type. */
792 HMPENDINGIO enmType;
793 uint32_t u32Alignment0;
794 RTGCPTR GCPtrRip;
795 RTGCPTR GCPtrRipNext;
796 union
797 {
798 struct
799 {
800 uint32_t uPort;
801 uint32_t uAndVal;
802 uint32_t cbSize;
803 } Port;
804 uint64_t aRaw[2];
805 } s;
806 } PendingIO;
807
808 /** The PAE PDPEs used with Nested Paging (only valid when
809 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
810 X86PDPE aPdpes[4];
811
812 /** Current shadow paging mode. */
813 PGMMODE enmShadowMode;
814
815 /** The CPU ID of the CPU currently owning the VMCS. Set in
816 * HMR0Enter and cleared in HMR0Leave. */
817 RTCPUID idEnteredCpu;
818
819 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
820 struct
821 {
822 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
823 uint32_t cPages;
824 uint32_t u32Alignment0; /**< Explicit alignment padding. */
825 } TlbShootdown;
826
827 /** VT-x/AMD-V VM-exit/#VMXEXIT history, circular array. */
828 uint16_t auExitHistory[31];
829 /** The index of the next free slot in the history array. */
830 uint16_t idxExitHistoryFree;
831
832 /** For saving stack space, the disassembler state is allocated here instead of
833 * on the stack. */
834 DISCPUSTATE DisState;
835
836 STAMPROFILEADV StatEntry;
837 STAMPROFILEADV StatExit1;
838 STAMPROFILEADV StatExit2;
839 STAMPROFILEADV StatExitIO;
840 STAMPROFILEADV StatExitMovCRx;
841 STAMPROFILEADV StatExitXcptNmi;
842 STAMPROFILEADV StatLoadGuestState;
843 STAMPROFILEADV StatInGC;
844
845#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
846 STAMPROFILEADV StatWorldSwitch3264;
847#endif
848 STAMPROFILEADV StatPoke;
849 STAMPROFILEADV StatSpinPoke;
850 STAMPROFILEADV StatSpinPokeFailed;
851
852 STAMCOUNTER StatInjectInterrupt;
853 STAMCOUNTER StatInjectXcpt;
854 STAMCOUNTER StatInjectPendingReflect;
855
856 STAMCOUNTER StatExitAll;
857 STAMCOUNTER StatExitShadowNM;
858 STAMCOUNTER StatExitGuestNM;
859 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
860 STAMCOUNTER StatExitShadowPFEM;
861 STAMCOUNTER StatExitGuestPF;
862 STAMCOUNTER StatExitGuestUD;
863 STAMCOUNTER StatExitGuestSS;
864 STAMCOUNTER StatExitGuestNP;
865 STAMCOUNTER StatExitGuestTS;
866 STAMCOUNTER StatExitGuestGP;
867 STAMCOUNTER StatExitGuestDE;
868 STAMCOUNTER StatExitGuestDB;
869 STAMCOUNTER StatExitGuestMF;
870 STAMCOUNTER StatExitGuestBP;
871 STAMCOUNTER StatExitGuestXF;
872 STAMCOUNTER StatExitGuestXcpUnk;
873 STAMCOUNTER StatExitInvlpg;
874 STAMCOUNTER StatExitInvd;
875 STAMCOUNTER StatExitWbinvd;
876 STAMCOUNTER StatExitPause;
877 STAMCOUNTER StatExitCpuid;
878 STAMCOUNTER StatExitRdtsc;
879 STAMCOUNTER StatExitRdtscp;
880 STAMCOUNTER StatExitRdpmc;
881 STAMCOUNTER StatExitVmcall;
882 STAMCOUNTER StatExitRdrand;
883 STAMCOUNTER StatExitCli;
884 STAMCOUNTER StatExitSti;
885 STAMCOUNTER StatExitPushf;
886 STAMCOUNTER StatExitPopf;
887 STAMCOUNTER StatExitIret;
888 STAMCOUNTER StatExitInt;
889 STAMCOUNTER StatExitCRxWrite[16];
890 STAMCOUNTER StatExitCRxRead[16];
891 STAMCOUNTER StatExitDRxWrite;
892 STAMCOUNTER StatExitDRxRead;
893 STAMCOUNTER StatExitRdmsr;
894 STAMCOUNTER StatExitWrmsr;
895 STAMCOUNTER StatExitClts;
896 STAMCOUNTER StatExitXdtrAccess;
897 STAMCOUNTER StatExitHlt;
898 STAMCOUNTER StatExitMwait;
899 STAMCOUNTER StatExitMonitor;
900 STAMCOUNTER StatExitLmsw;
901 STAMCOUNTER StatExitIOWrite;
902 STAMCOUNTER StatExitIORead;
903 STAMCOUNTER StatExitIOStringWrite;
904 STAMCOUNTER StatExitIOStringRead;
905 STAMCOUNTER StatExitIntWindow;
906 STAMCOUNTER StatExitMaxResume;
907 STAMCOUNTER StatExitExtInt;
908 STAMCOUNTER StatExitHostNmiInGC;
909 STAMCOUNTER StatExitPreemptTimer;
910 STAMCOUNTER StatExitTprBelowThreshold;
911 STAMCOUNTER StatExitTaskSwitch;
912 STAMCOUNTER StatExitMtf;
913 STAMCOUNTER StatExitApicAccess;
914 STAMCOUNTER StatExitApicAccessToR3;
915 STAMCOUNTER StatPendingHostIrq;
916
917 STAMCOUNTER StatPreemptPreempting;
918 STAMCOUNTER StatPreemptSaveHostState;
919
920 STAMCOUNTER StatFlushPage;
921 STAMCOUNTER StatFlushPageManual;
922 STAMCOUNTER StatFlushPhysPageManual;
923 STAMCOUNTER StatFlushTlb;
924 STAMCOUNTER StatFlushTlbManual;
925 STAMCOUNTER StatFlushTlbWorldSwitch;
926 STAMCOUNTER StatNoFlushTlbWorldSwitch;
927 STAMCOUNTER StatFlushEntire;
928 STAMCOUNTER StatFlushAsid;
929 STAMCOUNTER StatFlushNestedPaging;
930 STAMCOUNTER StatFlushTlbInvlpgVirt;
931 STAMCOUNTER StatFlushTlbInvlpgPhys;
932 STAMCOUNTER StatTlbShootdown;
933 STAMCOUNTER StatTlbShootdownFlush;
934
935 STAMCOUNTER StatSwitchGuestIrq;
936 STAMCOUNTER StatSwitchHmToR3FF;
937 STAMCOUNTER StatSwitchExitToR3;
938 STAMCOUNTER StatSwitchLongJmpToR3;
939
940 STAMCOUNTER StatTscOffsetAdjusted;
941 STAMCOUNTER StatTscParavirt;
942 STAMCOUNTER StatTscOffset;
943 STAMCOUNTER StatTscIntercept;
944 STAMCOUNTER StatTscInterceptOverFlow;
945
946 STAMCOUNTER StatExitReasonNpf;
947 STAMCOUNTER StatDRxArmed;
948 STAMCOUNTER StatDRxContextSwitch;
949 STAMCOUNTER StatDRxIoCheck;
950
951 STAMCOUNTER StatLoadMinimal;
952 STAMCOUNTER StatLoadFull;
953
954 STAMCOUNTER StatVmxCheckBadRmSelBase;
955 STAMCOUNTER StatVmxCheckBadRmSelLimit;
956 STAMCOUNTER StatVmxCheckRmOk;
957
958 STAMCOUNTER StatVmxCheckBadSel;
959 STAMCOUNTER StatVmxCheckBadRpl;
960 STAMCOUNTER StatVmxCheckBadLdt;
961 STAMCOUNTER StatVmxCheckBadTr;
962 STAMCOUNTER StatVmxCheckPmOk;
963
964#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
965 STAMCOUNTER StatFpu64SwitchBack;
966 STAMCOUNTER StatDebug64SwitchBack;
967#endif
968
969#ifdef VBOX_WITH_STATISTICS
970 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
971 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
972 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
973 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
974#endif
975#ifdef HM_PROFILE_EXIT_DISPATCH
976 STAMPROFILEADV StatExitDispatch;
977#endif
978} HMCPU;
979/** Pointer to HM VMCPU instance data. */
980typedef HMCPU *PHMCPU;
981AssertCompileMemberAlignment(HMCPU, vmx, 8);
982AssertCompileMemberAlignment(HMCPU, svm, 8);
983AssertCompileMemberAlignment(HMCPU, Event, 8);
984
985
986#ifdef IN_RING0
987VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
988VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
989
990
991# ifdef VBOX_STRICT
992VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
993VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
994# else
995# define HMDumpRegs(a, b ,c) do { } while (0)
996# define HMR0DumpDescriptor(a, b, c) do { } while (0)
997# endif /* VBOX_STRICT */
998
999# ifdef VBOX_WITH_KERNEL_USING_XMM
1000DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1001DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1002# endif
1003
1004# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1005/**
1006 * Gets 64-bit GDTR and IDTR on darwin.
1007 * @param pGdtr Where to store the 64-bit GDTR.
1008 * @param pIdtr Where to store the 64-bit IDTR.
1009 */
1010DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
1011
1012/**
1013 * Gets 64-bit CR3 on darwin.
1014 * @returns CR3
1015 */
1016DECLASM(uint64_t) HMR0Get64bitCR3(void);
1017# endif /* VBOX_WITH_HYBRID_32BIT_KERNEL */
1018
1019#endif /* IN_RING0 */
1020
1021/** @} */
1022
1023RT_C_DECLS_END
1024
1025#endif
1026
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette