VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 51326

Last change on this file since 51326 was 51220, checked in by vboxsync, 11 years ago

VMM/HMVMXR0: Implemented EFER swapping using VMCS controls.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.9 KB
Line 
1/* $Id: HMInternal.h 51220 2014-05-09 01:51:16Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#endif
43
44/** @def HM_PROFILE_EXIT_DISPATCH
45 * Enables profiling of the VM exit handler dispatching. */
46#if 0
47# define HM_PROFILE_EXIT_DISPATCH
48#endif
49
50RT_C_DECLS_BEGIN
51
52
53/** @defgroup grp_hm_int Internal
54 * @ingroup grp_hm
55 * @internal
56 * @{
57 */
58
59/** @def HMCPU_CF_CLEAR
60 * Clears a HM-context flag.
61 *
62 * @param pVCpu Pointer to the VMCPU.
63 * @param fFlag The flag to clear.
64 */
65#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
66
67/** @def HMCPU_CF_SET
68 * Sets a HM-context flag.
69 *
70 * @param pVCpu Pointer to the VMCPU.
71 * @param fFlag The flag to set.
72 */
73#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
74
75/** @def HMCPU_CF_IS_SET
76 * Checks if all the flags in the specified HM-context set is pending.
77 *
78 * @param pVCpu Pointer to the VMCPU.
79 * @param fFlag The flag to check.
80 */
81#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
82
83/** @def HMCPU_CF_IS_PENDING
84 * Checks if one or more of the flags in the specified HM-context set is
85 * pending.
86 *
87 * @param pVCpu Pointer to the VMCPU.
88 * @param fFlags The flags to check for.
89 */
90#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
91
92/** @def HMCPU_CF_IS_PENDING_ONLY
93 * Checks if -only- one or more of the specified HM-context flags is pending.
94 *
95 * @param pVCpu Pointer to the VMCPU.
96 * @param fFlags The flags to check for.
97 */
98#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
99
100/** @def HMCPU_CF_IS_SET_ONLY
101 * Checks if -only- all the flags in the specified HM-context set is pending.
102 *
103 * @param pVCpu Pointer to the VMCPU.
104 * @param fFlags The flags to check for.
105 */
106#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
107
108/** @def HMCPU_CF_RESET_TO
109 * Resets the HM-context flags to the specified value.
110 *
111 * @param pVCpu Pointer to the VMCPU.
112 * @param fFlags The new value.
113 */
114#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
115
116/** @def HMCPU_CF_VALUE
117 * Returns the current HM-context flags value.
118 *
119 * @param pVCpu Pointer to the VMCPU.
120 */
121#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
122
123
124/** Maximum number of exit reason statistics counters. */
125#define MAX_EXITREASON_STAT 0x100
126#define MASK_EXITREASON_STAT 0xff
127#define MASK_INJECT_IRQ_STAT 0xff
128
129/** @name HM changed flags.
130 * These flags are used to keep track of which important registers that
131 * have been changed since last they were reset.
132 * @{
133 */
134#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
135#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
136#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
137#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
138#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
139#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
140#define HM_CHANGED_GUEST_TR RT_BIT(6)
141#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
142#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
143#define HM_CHANGED_GUEST_RIP RT_BIT(9)
144#define HM_CHANGED_GUEST_RSP RT_BIT(10)
145#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
146#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
147#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
148#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
149#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
150#define HM_CHANGED_GUEST_EFER_MSR RT_BIT(16)
151#define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(17) /* Shared */
152/* VT-x specific state. */
153#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(18)
154#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(19)
155#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(20)
156#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(21)
157#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(22)
158/* AMD-V specific state. */
159#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(18)
160#define HM_CHANGED_SVM_RESERVED1 RT_BIT(19)
161#define HM_CHANGED_SVM_RESERVED2 RT_BIT(20)
162#define HM_CHANGED_SVM_RESERVED3 RT_BIT(21)
163#define HM_CHANGED_SVM_RESERVED4 RT_BIT(22)
164
165#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
166 | HM_CHANGED_GUEST_CR3 \
167 | HM_CHANGED_GUEST_CR4 \
168 | HM_CHANGED_GUEST_GDTR \
169 | HM_CHANGED_GUEST_IDTR \
170 | HM_CHANGED_GUEST_LDTR \
171 | HM_CHANGED_GUEST_TR \
172 | HM_CHANGED_GUEST_SEGMENT_REGS \
173 | HM_CHANGED_GUEST_DEBUG \
174 | HM_CHANGED_GUEST_RIP \
175 | HM_CHANGED_GUEST_RSP \
176 | HM_CHANGED_GUEST_RFLAGS \
177 | HM_CHANGED_GUEST_CR2 \
178 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
179 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
180 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
181 | HM_CHANGED_GUEST_EFER_MSR \
182 | HM_CHANGED_GUEST_LAZY_MSRS \
183 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
184 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
185 | HM_CHANGED_VMX_GUEST_APIC_STATE \
186 | HM_CHANGED_VMX_ENTRY_CTLS \
187 | HM_CHANGED_VMX_EXIT_CTLS)
188
189#define HM_CHANGED_HOST_CONTEXT RT_BIT(23)
190
191/* Bits shared between host and guest. */
192#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
193 | HM_CHANGED_GUEST_DEBUG \
194 | HM_CHANGED_GUEST_LAZY_MSRS)
195/** @} */
196
197/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
198#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
199
200/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
201#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
202/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
203#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
204/** Total guest mapped memory needed. */
205#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
206
207/** Enable for TPR guest patching. */
208#define VBOX_HM_WITH_GUEST_PATCHING
209
210/** HM SSM version
211 */
212#ifdef VBOX_HM_WITH_GUEST_PATCHING
213# define HM_SSM_VERSION 5
214# define HM_SSM_VERSION_NO_PATCHING 4
215#else
216# define HM_SSM_VERSION 4
217# define HM_SSM_VERSION_NO_PATCHING 4
218#endif
219#define HM_SSM_VERSION_2_0_X 3
220
221/**
222 * Global per-cpu information. (host)
223 */
224typedef struct HMGLOBALCPUINFO
225{
226 /** The CPU ID. */
227 RTCPUID idCpu;
228 /** The VM_HSAVE_AREA (AMD-V) / VMXON region (Intel) memory backing. */
229 RTR0MEMOBJ hMemObj;
230 /** Current ASID (AMD-V) / VPID (Intel). */
231 uint32_t uCurrentAsid;
232 /** TLB flush count. */
233 uint32_t cTlbFlushes;
234 /** Whether to flush each new ASID/VPID before use. */
235 bool fFlushAsidBeforeUse;
236 /** Configured for VT-x or AMD-V. */
237 bool fConfigured;
238 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
239 bool fIgnoreAMDVInUseError;
240 /** In use by our code. (for power suspend) */
241 volatile bool fInUse;
242} HMGLOBALCPUINFO;
243/** Pointer to the per-cpu global information. */
244typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
245
246typedef enum
247{
248 HMPENDINGIO_INVALID = 0,
249 HMPENDINGIO_PORT_READ,
250 HMPENDINGIO_PORT_WRITE,
251 HMPENDINGIO_STRING_READ,
252 HMPENDINGIO_STRING_WRITE,
253 /** The usual 32-bit paranoia. */
254 HMPENDINGIO_32BIT_HACK = 0x7fffffff
255} HMPENDINGIO;
256
257
258typedef enum
259{
260 HMTPRINSTR_INVALID,
261 HMTPRINSTR_READ,
262 HMTPRINSTR_READ_SHR4,
263 HMTPRINSTR_WRITE_REG,
264 HMTPRINSTR_WRITE_IMM,
265 HMTPRINSTR_JUMP_REPLACEMENT,
266 /** The usual 32-bit paranoia. */
267 HMTPRINSTR_32BIT_HACK = 0x7fffffff
268} HMTPRINSTR;
269
270typedef struct
271{
272 /** The key is the address of patched instruction. (32 bits GC ptr) */
273 AVLOU32NODECORE Core;
274 /** Original opcode. */
275 uint8_t aOpcode[16];
276 /** Instruction size. */
277 uint32_t cbOp;
278 /** Replacement opcode. */
279 uint8_t aNewOpcode[16];
280 /** Replacement instruction size. */
281 uint32_t cbNewOp;
282 /** Instruction type. */
283 HMTPRINSTR enmType;
284 /** Source operand. */
285 uint32_t uSrcOperand;
286 /** Destination operand. */
287 uint32_t uDstOperand;
288 /** Number of times the instruction caused a fault. */
289 uint32_t cFaults;
290 /** Patch address of the jump replacement. */
291 RTGCPTR32 pJumpTarget;
292} HMTPRPATCH;
293/** Pointer to HMTPRPATCH. */
294typedef HMTPRPATCH *PHMTPRPATCH;
295
296/**
297 * Switcher function, HC to the special 64-bit RC.
298 *
299 * @param pVM Pointer to the VM.
300 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
301 * @returns Return code indicating the action to take.
302 */
303typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
304/** Pointer to switcher function. */
305typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
306
307/**
308 * HM VM Instance data.
309 * Changes to this must checked against the padding of the hm union in VM!
310 */
311typedef struct HM
312{
313 /** Set when we've initialized VMX or SVM. */
314 bool fInitialized;
315
316 /** Set if nested paging is enabled. */
317 bool fNestedPaging;
318
319 /** Set if nested paging is allowed. */
320 bool fAllowNestedPaging;
321
322 /** Set if large pages are enabled (requires nested paging). */
323 bool fLargePages;
324
325 /** Set if we can support 64-bit guests or not. */
326 bool fAllow64BitGuests;
327
328 /** Set if an IO-APIC is configured for this VM. */
329 bool fHasIoApic;
330
331 /** Set when TPR patching is allowed. */
332 bool fTprPatchingAllowed;
333
334 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
335 bool fGlobalInit;
336
337 /** Set when TPR patching is active. */
338 bool fTPRPatchingActive;
339 bool u8Alignment[7];
340
341 /** Maximum ASID allowed. */
342 uint32_t uMaxAsid;
343
344 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
345 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
346 uint32_t cMaxResumeLoops;
347
348 /** Guest allocated memory for patching purposes. */
349 RTGCPTR pGuestPatchMem;
350 /** Current free pointer inside the patch block. */
351 RTGCPTR pFreeGuestPatchMem;
352 /** Size of the guest patch memory block. */
353 uint32_t cbGuestPatchMem;
354 uint32_t uPadding1;
355
356#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
357 /** 32 to 64 bits switcher entrypoint. */
358 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
359 RTR0PTR uPadding2;
360#endif
361
362 struct
363 {
364 /** Set by the ring-0 side of HM to indicate VMX is supported by the
365 * CPU. */
366 bool fSupported;
367
368 /** Set when we've enabled VMX. */
369 bool fEnabled;
370
371 /** Set if VPID is supported. */
372 bool fVpid;
373
374 /** Set if VT-x VPID is allowed. */
375 bool fAllowVpid;
376
377 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
378 bool fUnrestrictedGuest;
379
380 /** Set if unrestricted guest execution is allowed to be used. */
381 bool fAllowUnrestricted;
382
383 /** Whether we're using the preemption timer or not. */
384 bool fUsePreemptTimer;
385 /** The shift mask employed by the VMX-Preemption timer. */
386 uint8_t cPreemptTimerShift;
387
388 /** Virtual address of the TSS page used for real mode emulation. */
389 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
390
391 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
392 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
393
394 /** R0 memory object for the APIC-access page. */
395 RTR0MEMOBJ hMemObjApicAccess;
396 /** Physical address of the APIC-access page. */
397 RTHCPHYS HCPhysApicAccess;
398 /** Virtual address of the APIC-access page. */
399 R0PTRTYPE(uint8_t *) pbApicAccess;
400
401#ifdef VBOX_WITH_CRASHDUMP_MAGIC
402 RTR0MEMOBJ hMemObjScratch;
403 RTHCPHYS HCPhysScratch;
404 R0PTRTYPE(uint8_t *) pbScratch;
405#endif
406
407 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
408 unsigned uFlushTaggedTlb;
409
410#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
411 uint32_t u32Alignment;
412#endif
413 /** Host CR4 value (set by ring-0 VMX init) */
414 uint64_t u64HostCr4;
415
416 /** Host EFER value (set by ring-0 VMX init) */
417 uint64_t u64HostEfer;
418 /** Whether the CPU supports VMCS fields for swapping EFER. */
419 bool fSupportsVmcsEfer;
420 bool afAlignment1[7];
421
422 /** VMX MSR values */
423 VMXMSRS Msrs;
424
425 /** Flush types for invept & invvpid; they depend on capabilities. */
426 VMX_FLUSH_EPT enmFlushEpt;
427 VMX_FLUSH_VPID enmFlushVpid;
428 } vmx;
429
430 struct
431 {
432 /** Set by the ring-0 side of HM to indicate SVM is supported by the
433 * CPU. */
434 bool fSupported;
435 /** Set when we've enabled SVM. */
436 bool fEnabled;
437 /** Set if erratum 170 affects the AMD cpu. */
438 bool fAlwaysFlushTLB;
439 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
440 bool fIgnoreInUseError;
441
442 /** R0 memory object for the IO bitmap (12kb). */
443 RTR0MEMOBJ hMemObjIOBitmap;
444 /** Physical address of the IO bitmap (12kb). */
445 RTHCPHYS HCPhysIOBitmap;
446 /** Virtual address of the IO bitmap. */
447 R0PTRTYPE(void *) pvIOBitmap;
448
449 /* HWCR MSR (for diagnostics) */
450 uint64_t u64MsrHwcr;
451
452 /** SVM revision. */
453 uint32_t u32Rev;
454
455 /** SVM feature bits from cpuid 0x8000000a */
456 uint32_t u32Features;
457 } svm;
458
459 /**
460 * AVL tree with all patches (active or disabled) sorted by guest instruction address
461 */
462 AVLOU32TREE PatchTree;
463 uint32_t cPatches;
464 HMTPRPATCH aPatches[64];
465
466 struct
467 {
468 uint32_t u32AMDFeatureECX;
469 uint32_t u32AMDFeatureEDX;
470 } cpuid;
471
472 /** Saved error from detection */
473 int32_t lLastError;
474
475 /** HMR0Init was run */
476 bool fHMR0Init;
477 bool u8Alignment1[7];
478
479 STAMCOUNTER StatTprPatchSuccess;
480 STAMCOUNTER StatTprPatchFailure;
481 STAMCOUNTER StatTprReplaceSuccess;
482 STAMCOUNTER StatTprReplaceFailure;
483} HM;
484/** Pointer to HM VM instance data. */
485typedef HM *PHM;
486
487/* Maximum number of cached entries. */
488#define VMCSCACHE_MAX_ENTRY 128
489
490/* Structure for storing read and write VMCS actions. */
491typedef struct VMCSCACHE
492{
493#ifdef VBOX_WITH_CRASHDUMP_MAGIC
494 /* Magic marker for searching in crash dumps. */
495 uint8_t aMagic[16];
496 uint64_t uMagic;
497 uint64_t u64TimeEntry;
498 uint64_t u64TimeSwitch;
499 uint64_t cResume;
500 uint64_t interPD;
501 uint64_t pSwitcher;
502 uint32_t uPos;
503 uint32_t idCpu;
504#endif
505 /* CR2 is saved here for EPT syncing. */
506 uint64_t cr2;
507 struct
508 {
509 uint32_t cValidEntries;
510 uint32_t uAlignment;
511 uint32_t aField[VMCSCACHE_MAX_ENTRY];
512 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
513 } Write;
514 struct
515 {
516 uint32_t cValidEntries;
517 uint32_t uAlignment;
518 uint32_t aField[VMCSCACHE_MAX_ENTRY];
519 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
520 } Read;
521#ifdef VBOX_STRICT
522 struct
523 {
524 RTHCPHYS HCPhysCpuPage;
525 RTHCPHYS HCPhysVmcs;
526 RTGCPTR pCache;
527 RTGCPTR pCtx;
528 } TestIn;
529 struct
530 {
531 RTHCPHYS HCPhysVmcs;
532 RTGCPTR pCache;
533 RTGCPTR pCtx;
534 uint64_t eflags;
535 uint64_t cr8;
536 } TestOut;
537 struct
538 {
539 uint64_t param1;
540 uint64_t param2;
541 uint64_t param3;
542 uint64_t param4;
543 } ScratchPad;
544#endif
545} VMCSCACHE;
546/** Pointer to VMCSCACHE. */
547typedef VMCSCACHE *PVMCSCACHE;
548
549/** VMX StartVM function. */
550typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
551/** Pointer to a VMX StartVM function. */
552typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
553
554/** SVM VMRun function. */
555typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
556/** Pointer to a SVM VMRun function. */
557typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
558
559/**
560 * HM VMCPU Instance data.
561 */
562typedef struct HMCPU
563{
564 /** Set if we need to flush the TLB during the world switch. */
565 bool fForceTLBFlush;
566 /** Set when we're using VT-x or AMD-V at that moment. */
567 bool fActive;
568 /** Set when the TLB has been checked until we return from the world switch. */
569 volatile bool fCheckedTLBFlush;
570 /** Whether we're executing a single instruction. */
571 bool fSingleInstruction;
572 /** Set if we need to clear the trap flag because of single stepping. */
573 bool fClearTrapFlag;
574 /** Whether we've completed the inner HM leave function. */
575 bool fLeaveDone;
576 /** Whether we're using the hyper DR7 or guest DR7. */
577 bool fUsingHyperDR7;
578 /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */
579 bool fUseGuestFpu;
580
581 /** World switch exit counter. */
582 volatile uint32_t cWorldSwitchExits;
583 /** HM_CHANGED_* flags. */
584 volatile uint32_t fContextUseFlags;
585 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
586 * time). */
587 RTCPUID idLastCpu;
588 /** TLB flush count. */
589 uint32_t cTlbFlushes;
590 /** Current ASID in use by the VM. */
591 uint32_t uCurrentAsid;
592 /** An additional error code used for some gurus. */
593 uint32_t u32HMError;
594 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
595 uint64_t u64HostTscAux;
596
597 struct
598 {
599 /** Physical address of the VM control structure (VMCS). */
600 RTHCPHYS HCPhysVmcs;
601 /** R0 memory object for the VM control structure (VMCS). */
602 RTR0MEMOBJ hMemObjVmcs;
603 /** Virtual address of the VM control structure (VMCS). */
604 R0PTRTYPE(void *) pvVmcs;
605 /** Ring 0 handlers for VT-x. */
606 PFNHMVMXSTARTVM pfnStartVM;
607#if HC_ARCH_BITS == 32
608 uint32_t u32Alignment1;
609#endif
610
611 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
612 uint32_t u32PinCtls;
613 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
614 uint32_t u32ProcCtls;
615 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
616 uint32_t u32ProcCtls2;
617 /** Current VMX_VMCS32_CTRL_EXIT. */
618 uint32_t u32ExitCtls;
619 /** Current VMX_VMCS32_CTRL_ENTRY. */
620 uint32_t u32EntryCtls;
621
622 /** Physical address of the virtual APIC page for TPR caching. */
623 RTHCPHYS HCPhysVirtApic;
624 /** R0 memory object for the virtual APIC page for TPR caching. */
625 RTR0MEMOBJ hMemObjVirtApic;
626 /** Virtual address of the virtual APIC page for TPR caching. */
627 R0PTRTYPE(uint8_t *) pbVirtApic;
628#if HC_ARCH_BITS == 32
629 uint32_t u32Alignment2;
630#endif
631
632 /** Current CR0 mask. */
633 uint32_t u32CR0Mask;
634 /** Current CR4 mask. */
635 uint32_t u32CR4Mask;
636 /** Current exception bitmap. */
637 uint32_t u32XcptBitmap;
638 /** The updated-guest-state mask. */
639 volatile uint32_t fUpdatedGuestState;
640 /** Current EPTP. */
641 RTHCPHYS HCPhysEPTP;
642
643 /** Physical address of the MSR bitmap. */
644 RTHCPHYS HCPhysMsrBitmap;
645 /** R0 memory object for the MSR bitmap. */
646 RTR0MEMOBJ hMemObjMsrBitmap;
647 /** Virtual address of the MSR bitmap. */
648 R0PTRTYPE(void *) pvMsrBitmap;
649
650 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
651 * for guest MSRs). */
652 RTHCPHYS HCPhysGuestMsr;
653 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
654 * (used for guest MSRs). */
655 RTR0MEMOBJ hMemObjGuestMsr;
656 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
657 * for guest MSRs). */
658 R0PTRTYPE(void *) pvGuestMsr;
659
660 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
661 RTHCPHYS HCPhysHostMsr;
662 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
663 RTR0MEMOBJ hMemObjHostMsr;
664 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
665 R0PTRTYPE(void *) pvHostMsr;
666
667 /** Number of guest/host MSR pairs in the auto-load/store area. */
668 uint32_t cMsrs;
669 /** Whether the host MSR values are up-to-date in the auto-load/store area. */
670 bool fUpdatedHostMsrs;
671 uint8_t u8Align[7];
672
673 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
674 uint64_t u64HostLStarMsr;
675 /** Host STAR MSR value to restore lazily while leaving VT-x. */
676 uint64_t u64HostStarMsr;
677 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
678 uint64_t u64HostSFMaskMsr;
679 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
680 uint64_t u64HostKernelGSBaseMsr;
681 /** A mask of which MSRs have been swapped and need restoration. */
682 uint32_t fRestoreHostMsrs;
683 uint32_t u32Alignment3;
684
685 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
686 uint64_t u64MsrApicBase;
687 /** Last use TSC offset value. (cached) */
688 uint64_t u64TSCOffset;
689
690 /** VMCS cache. */
691 VMCSCACHE VMCSCache;
692
693 /** Real-mode emulation state. */
694 struct
695 {
696 X86DESCATTR AttrCS;
697 X86DESCATTR AttrDS;
698 X86DESCATTR AttrES;
699 X86DESCATTR AttrFS;
700 X86DESCATTR AttrGS;
701 X86DESCATTR AttrSS;
702 X86EFLAGS Eflags;
703 uint32_t fRealOnV86Active;
704 } RealMode;
705
706 struct
707 {
708 uint64_t u64VMCSPhys;
709 uint32_t u32VMCSRevision;
710 uint32_t u32InstrError;
711 uint32_t u32ExitReason;
712 RTCPUID idEnteredCpu;
713 RTCPUID idCurrentCpu;
714 uint32_t u32Padding;
715 } LastError;
716
717 /** State of the VMCS. */
718 uint32_t uVmcsState;
719 /** Which host-state bits to restore before being preempted. */
720 uint32_t fRestoreHostFlags;
721 /** The host-state restoration structure. */
722 VMXRESTOREHOST RestoreHost;
723 /** Set if guest was executing in real mode (extra checks). */
724 bool fWasInRealMode;
725 uint8_t u8Align2[7];
726
727 /** Alignment padding. */
728 uint32_t u32Padding;
729 } vmx;
730
731 struct
732 {
733 /** R0 memory object for the host VMCB which holds additional host-state. */
734 RTR0MEMOBJ hMemObjVmcbHost;
735 /** Physical address of the host VMCB which holds additional host-state. */
736 RTHCPHYS HCPhysVmcbHost;
737 /** Virtual address of the host VMCB which holds additional host-state. */
738 R0PTRTYPE(void *) pvVmcbHost;
739
740 /** R0 memory object for the guest VMCB. */
741 RTR0MEMOBJ hMemObjVmcb;
742 /** Physical address of the guest VMCB. */
743 RTHCPHYS HCPhysVmcb;
744 /** Virtual address of the guest VMCB. */
745 R0PTRTYPE(void *) pvVmcb;
746
747 /** Ring 0 handlers for VT-x. */
748 PFNHMSVMVMRUN pfnVMRun;
749
750 /** R0 memory object for the MSR bitmap (8 KB). */
751 RTR0MEMOBJ hMemObjMsrBitmap;
752 /** Physical address of the MSR bitmap (8 KB). */
753 RTHCPHYS HCPhysMsrBitmap;
754 /** Virtual address of the MSR bitmap. */
755 R0PTRTYPE(void *) pvMsrBitmap;
756
757 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
758 * we should check if the VTPR changed on every VM-exit. */
759 bool fSyncVTpr;
760 uint8_t u8Align[7];
761
762 /** Alignment padding. */
763 uint32_t u32Padding;
764 } svm;
765
766 /** Event injection state. */
767 struct
768 {
769 uint32_t fPending;
770 uint32_t u32ErrCode;
771 uint32_t cbInstr;
772 uint32_t u32Padding; /**< Explicit alignment padding. */
773 uint64_t u64IntInfo;
774 RTGCUINTPTR GCPtrFaultAddress;
775 } Event;
776
777 /** IO Block emulation state. */
778 struct
779 {
780 bool fEnabled;
781 uint8_t u8Align[7];
782
783 /** RIP at the start of the io code we wish to emulate in the recompiler. */
784 RTGCPTR GCPtrFunctionEip;
785
786 uint64_t cr0;
787 } EmulateIoBlock;
788
789 struct
790 {
791 /** Pending IO operation type. */
792 HMPENDINGIO enmType;
793 uint32_t uPadding;
794 RTGCPTR GCPtrRip;
795 RTGCPTR GCPtrRipNext;
796 union
797 {
798 struct
799 {
800 uint32_t uPort;
801 uint32_t uAndVal;
802 uint32_t cbSize;
803 } Port;
804 uint64_t aRaw[2];
805 } s;
806 } PendingIO;
807
808 /** The PAE PDPEs used with Nested Paging (only valid when
809 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
810 X86PDPE aPdpes[4];
811
812 /** Current shadow paging mode. */
813 PGMMODE enmShadowMode;
814
815 /** The CPU ID of the CPU currently owning the VMCS. Set in
816 * HMR0Enter and cleared in HMR0Leave. */
817 RTCPUID idEnteredCpu;
818
819 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
820 struct
821 {
822 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
823 uint32_t cPages;
824 uint32_t u32Padding; /**< Explicit alignment padding. */
825 } TlbShootdown;
826
827 /** For saving stack space, the disassembler state is allocated here instead of
828 * on the stack. */
829 DISCPUSTATE DisState;
830
831 STAMPROFILEADV StatEntry;
832 STAMPROFILEADV StatExit1;
833 STAMPROFILEADV StatExit2;
834 STAMPROFILEADV StatExitIO;
835 STAMPROFILEADV StatExitMovCRx;
836 STAMPROFILEADV StatExitXcptNmi;
837 STAMPROFILEADV StatLoadGuestState;
838 STAMPROFILEADV StatInGC;
839
840#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
841 STAMPROFILEADV StatWorldSwitch3264;
842#endif
843 STAMPROFILEADV StatPoke;
844 STAMPROFILEADV StatSpinPoke;
845 STAMPROFILEADV StatSpinPokeFailed;
846
847 STAMCOUNTER StatInjectInterrupt;
848 STAMCOUNTER StatInjectXcpt;
849 STAMCOUNTER StatInjectPendingReflect;
850
851 STAMCOUNTER StatExitAll;
852 STAMCOUNTER StatExitShadowNM;
853 STAMCOUNTER StatExitGuestNM;
854 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
855 STAMCOUNTER StatExitShadowPFEM;
856 STAMCOUNTER StatExitGuestPF;
857 STAMCOUNTER StatExitGuestUD;
858 STAMCOUNTER StatExitGuestSS;
859 STAMCOUNTER StatExitGuestNP;
860 STAMCOUNTER StatExitGuestTS;
861 STAMCOUNTER StatExitGuestGP;
862 STAMCOUNTER StatExitGuestDE;
863 STAMCOUNTER StatExitGuestDB;
864 STAMCOUNTER StatExitGuestMF;
865 STAMCOUNTER StatExitGuestBP;
866 STAMCOUNTER StatExitGuestXF;
867 STAMCOUNTER StatExitGuestXcpUnk;
868 STAMCOUNTER StatExitInvlpg;
869 STAMCOUNTER StatExitInvd;
870 STAMCOUNTER StatExitWbinvd;
871 STAMCOUNTER StatExitPause;
872 STAMCOUNTER StatExitCpuid;
873 STAMCOUNTER StatExitRdtsc;
874 STAMCOUNTER StatExitRdtscp;
875 STAMCOUNTER StatExitRdpmc;
876 STAMCOUNTER StatExitRdrand;
877 STAMCOUNTER StatExitCli;
878 STAMCOUNTER StatExitSti;
879 STAMCOUNTER StatExitPushf;
880 STAMCOUNTER StatExitPopf;
881 STAMCOUNTER StatExitIret;
882 STAMCOUNTER StatExitInt;
883 STAMCOUNTER StatExitCRxWrite[16];
884 STAMCOUNTER StatExitCRxRead[16];
885 STAMCOUNTER StatExitDRxWrite;
886 STAMCOUNTER StatExitDRxRead;
887 STAMCOUNTER StatExitRdmsr;
888 STAMCOUNTER StatExitWrmsr;
889 STAMCOUNTER StatExitClts;
890 STAMCOUNTER StatExitXdtrAccess;
891 STAMCOUNTER StatExitHlt;
892 STAMCOUNTER StatExitMwait;
893 STAMCOUNTER StatExitMonitor;
894 STAMCOUNTER StatExitLmsw;
895 STAMCOUNTER StatExitIOWrite;
896 STAMCOUNTER StatExitIORead;
897 STAMCOUNTER StatExitIOStringWrite;
898 STAMCOUNTER StatExitIOStringRead;
899 STAMCOUNTER StatExitIntWindow;
900 STAMCOUNTER StatExitMaxResume;
901 STAMCOUNTER StatExitExtInt;
902 STAMCOUNTER StatExitHostNmiInGC;
903 STAMCOUNTER StatExitPreemptTimer;
904 STAMCOUNTER StatExitTprBelowThreshold;
905 STAMCOUNTER StatExitTaskSwitch;
906 STAMCOUNTER StatExitMtf;
907 STAMCOUNTER StatExitApicAccess;
908 STAMCOUNTER StatPendingHostIrq;
909
910 STAMCOUNTER StatPreemptPreempting;
911 STAMCOUNTER StatPreemptSaveHostState;
912
913 STAMCOUNTER StatFlushPage;
914 STAMCOUNTER StatFlushPageManual;
915 STAMCOUNTER StatFlushPhysPageManual;
916 STAMCOUNTER StatFlushTlb;
917 STAMCOUNTER StatFlushTlbManual;
918 STAMCOUNTER StatFlushTlbWorldSwitch;
919 STAMCOUNTER StatNoFlushTlbWorldSwitch;
920 STAMCOUNTER StatFlushEntire;
921 STAMCOUNTER StatFlushAsid;
922 STAMCOUNTER StatFlushNestedPaging;
923 STAMCOUNTER StatFlushTlbInvlpgVirt;
924 STAMCOUNTER StatFlushTlbInvlpgPhys;
925 STAMCOUNTER StatTlbShootdown;
926 STAMCOUNTER StatTlbShootdownFlush;
927
928 STAMCOUNTER StatSwitchGuestIrq;
929 STAMCOUNTER StatSwitchHmToR3FF;
930 STAMCOUNTER StatSwitchExitToR3;
931 STAMCOUNTER StatSwitchLongJmpToR3;
932
933 STAMCOUNTER StatTscOffset;
934 STAMCOUNTER StatTscIntercept;
935 STAMCOUNTER StatTscInterceptOverFlow;
936
937 STAMCOUNTER StatExitReasonNpf;
938 STAMCOUNTER StatDRxArmed;
939 STAMCOUNTER StatDRxContextSwitch;
940 STAMCOUNTER StatDRxIoCheck;
941
942 STAMCOUNTER StatLoadMinimal;
943 STAMCOUNTER StatLoadFull;
944
945 STAMCOUNTER StatVmxCheckBadRmSelBase;
946 STAMCOUNTER StatVmxCheckBadRmSelLimit;
947 STAMCOUNTER StatVmxCheckRmOk;
948
949 STAMCOUNTER StatVmxCheckBadSel;
950 STAMCOUNTER StatVmxCheckBadRpl;
951 STAMCOUNTER StatVmxCheckBadLdt;
952 STAMCOUNTER StatVmxCheckBadTr;
953 STAMCOUNTER StatVmxCheckPmOk;
954
955#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
956 STAMCOUNTER StatFpu64SwitchBack;
957 STAMCOUNTER StatDebug64SwitchBack;
958#endif
959
960#ifdef VBOX_WITH_STATISTICS
961 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
962 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
963 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
964 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
965#endif
966#ifdef HM_PROFILE_EXIT_DISPATCH
967 STAMPROFILEADV StatExitDispatch;
968#endif
969} HMCPU;
970/** Pointer to HM VMCPU instance data. */
971typedef HMCPU *PHMCPU;
972
973
974#ifdef IN_RING0
975
976VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
977VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
978
979
980#ifdef VBOX_STRICT
981VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
982VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
983#else
984# define HMDumpRegs(a, b ,c) do { } while (0)
985# define HMR0DumpDescriptor(a, b, c) do { } while (0)
986#endif
987
988# ifdef VBOX_WITH_KERNEL_USING_XMM
989DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
990DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
991# endif
992
993# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
994/**
995 * Gets 64-bit GDTR and IDTR on darwin.
996 * @param pGdtr Where to store the 64-bit GDTR.
997 * @param pIdtr Where to store the 64-bit IDTR.
998 */
999DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
1000
1001/**
1002 * Gets 64-bit CR3 on darwin.
1003 * @returns CR3
1004 */
1005DECLASM(uint64_t) HMR0Get64bitCR3(void);
1006# endif
1007
1008#endif /* IN_RING0 */
1009
1010/** @} */
1011
1012RT_C_DECLS_END
1013
1014#endif
1015
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette