VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 49770

Last change on this file since 49770 was 49770, checked in by vboxsync, 11 years ago

VMM/HM: Fix typo in TRPPatchingAllowed member.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.5 KB
Line 
1/* $Id: HMInternal.h 49770 2013-12-04 10:51:06Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#endif
43
44/** @def HM_PROFILE_EXIT_DISPATCH
45 * Enables profiling of the VM exit handler dispatching. */
46#if 0
47# define HM_PROFILE_EXIT_DISPATCH
48#endif
49
50RT_C_DECLS_BEGIN
51
52
53/** @defgroup grp_hm_int Internal
54 * @ingroup grp_hm
55 * @internal
56 * @{
57 */
58
59/** @def HMCPU_CF_CLEAR
60 * Clears a HM-context flag.
61 *
62 * @param pVCpu Pointer to the VMCPU.
63 * @param fFlag The flag to clear.
64 */
65#define HMCPU_CF_CLEAR(pVCpu, fFlag) (ASMAtomicUoAndU32(&(pVCpu)->hm.s.fContextUseFlags, ~(fFlag)))
66
67/** @def HMCPU_CF_SET
68 * Sets a HM-context flag.
69 *
70 * @param pVCpu Pointer to the VMCPU.
71 * @param fFlag The flag to set.
72 */
73#define HMCPU_CF_SET(pVCpu, fFlag) (ASMAtomicUoOrU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlag)))
74
75/** @def HMCPU_CF_IS_SET
76 * Checks if all the flags in the specified HM-context set is pending.
77 *
78 * @param pVCpu Pointer to the VMCPU.
79 * @param fFlag The flag to check.
80 */
81#define HMCPU_CF_IS_SET(pVCpu, fFlag) ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlag)) == (fFlag))
82
83/** @def HMCPU_CF_IS_PENDING
84 * Checks if one or more of the flags in the specified HM-context set is
85 * pending.
86 *
87 * @param pVCpu Pointer to the VMCPU.
88 * @param fFlags The flags to check for.
89 */
90#define HMCPU_CF_IS_PENDING(pVCpu, fFlags) RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & (fFlags))
91
92/** @def HMCPU_CF_IS_PENDING_ONLY
93 * Checks if -only- one or more of the specified HM-context flags is pending.
94 *
95 * @param pVCpu Pointer to the VMCPU.
96 * @param fFlags The flags to check for.
97 */
98#define HMCPU_CF_IS_PENDING_ONLY(pVCpu, fFlags) !RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) & ~(fFlags))
99
100/** @def HMCPU_CF_IS_SET_ONLY
101 * Checks if -only- all the flags in the specified HM-context set is pending.
102 *
103 * @param pVCpu Pointer to the VMCPU.
104 * @param fFlags The flags to check for.
105 */
106#define HMCPU_CF_IS_SET_ONLY(pVCpu, fFlags) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags) == (fFlags))
107
108/** @def HMCPU_CF_RESET_TO
109 * Resets the HM-context flags to the specified value.
110 *
111 * @param pVCpu Pointer to the VMCPU.
112 * @param fFlags The new value.
113 */
114#define HMCPU_CF_RESET_TO(pVCpu, fFlags) (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.fContextUseFlags, (fFlags)))
115
116/** @def HMCPU_CF_VALUE
117 * Returns the current HM-context flags value.
118 *
119 * @param pVCpu Pointer to the VMCPU.
120 */
121#define HMCPU_CF_VALUE(pVCpu) (ASMAtomicUoReadU32(&(pVCpu)->hm.s.fContextUseFlags))
122
123
124/** Maximum number of exit reason statistics counters. */
125#define MAX_EXITREASON_STAT 0x100
126#define MASK_EXITREASON_STAT 0xff
127#define MASK_INJECT_IRQ_STAT 0xff
128
129/** @name HM changed flags.
130 * These flags are used to keep track of which important registers that
131 * have been changed since last they were reset.
132 * @{
133 */
134#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
135#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
136#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
137#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
138#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
139#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
140#define HM_CHANGED_GUEST_TR RT_BIT(6)
141#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
142#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
143#define HM_CHANGED_GUEST_RIP RT_BIT(9)
144#define HM_CHANGED_GUEST_RSP RT_BIT(10)
145#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
146#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
147#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
148#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
149#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
150#define HM_CHANGED_GUEST_LAZY_MSRS RT_BIT(16) /* Shared */
151/* VT-x specific state. */
152#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(17)
153#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(18)
154#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(19)
155#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(20)
156#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(21)
157/* AMD-V specific state. */
158#define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(17)
159#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(18)
160#define HM_CHANGED_SVM_RESERVED1 RT_BIT(19)
161#define HM_CHANGED_SVM_RESERVED2 RT_BIT(20)
162#define HM_CHANGED_SVM_RESERVED3 RT_BIT(21)
163
164#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
165 | HM_CHANGED_GUEST_CR3 \
166 | HM_CHANGED_GUEST_CR4 \
167 | HM_CHANGED_GUEST_GDTR \
168 | HM_CHANGED_GUEST_IDTR \
169 | HM_CHANGED_GUEST_LDTR \
170 | HM_CHANGED_GUEST_TR \
171 | HM_CHANGED_GUEST_SEGMENT_REGS \
172 | HM_CHANGED_GUEST_DEBUG \
173 | HM_CHANGED_GUEST_RIP \
174 | HM_CHANGED_GUEST_RSP \
175 | HM_CHANGED_GUEST_RFLAGS \
176 | HM_CHANGED_GUEST_CR2 \
177 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
178 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
179 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
180 | HM_CHANGED_GUEST_LAZY_MSRS \
181 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
182 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
183 | HM_CHANGED_VMX_GUEST_APIC_STATE \
184 | HM_CHANGED_VMX_ENTRY_CTLS \
185 | HM_CHANGED_VMX_EXIT_CTLS)
186
187#define HM_CHANGED_HOST_CONTEXT RT_BIT(22)
188
189/* Bits shared between host and guest. */
190#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
191 | HM_CHANGED_GUEST_DEBUG \
192 | HM_CHANGED_GUEST_LAZY_MSRS)
193/** @} */
194
195/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
196#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
197
198/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
199#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
200/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
201#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
202/** Total guest mapped memory needed. */
203#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
204
205/** Enable for TPR guest patching. */
206#define VBOX_HM_WITH_GUEST_PATCHING
207
208/** HM SSM version
209 */
210#ifdef VBOX_HM_WITH_GUEST_PATCHING
211# define HM_SSM_VERSION 5
212# define HM_SSM_VERSION_NO_PATCHING 4
213#else
214# define HM_SSM_VERSION 4
215# define HM_SSM_VERSION_NO_PATCHING 4
216#endif
217#define HM_SSM_VERSION_2_0_X 3
218
219/**
220 * Global per-cpu information. (host)
221 */
222typedef struct HMGLOBALCPUINFO
223{
224 /** The CPU ID. */
225 RTCPUID idCpu;
226 /** The memory object */
227 RTR0MEMOBJ hMemObj;
228 /** Current ASID (AMD-V) / VPID (Intel). */
229 uint32_t uCurrentAsid;
230 /** TLB flush count. */
231 uint32_t cTlbFlushes;
232 /** Whether to flush each new ASID/VPID before use. */
233 bool fFlushAsidBeforeUse;
234 /** Configured for VT-x or AMD-V. */
235 bool fConfigured;
236 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
237 bool fIgnoreAMDVInUseError;
238 /** In use by our code. (for power suspend) */
239 volatile bool fInUse;
240} HMGLOBALCPUINFO;
241/** Pointer to the per-cpu global information. */
242typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
243
244typedef enum
245{
246 HMPENDINGIO_INVALID = 0,
247 HMPENDINGIO_PORT_READ,
248 HMPENDINGIO_PORT_WRITE,
249 HMPENDINGIO_STRING_READ,
250 HMPENDINGIO_STRING_WRITE,
251 /** The usual 32-bit paranoia. */
252 HMPENDINGIO_32BIT_HACK = 0x7fffffff
253} HMPENDINGIO;
254
255
256typedef enum
257{
258 HMTPRINSTR_INVALID,
259 HMTPRINSTR_READ,
260 HMTPRINSTR_READ_SHR4,
261 HMTPRINSTR_WRITE_REG,
262 HMTPRINSTR_WRITE_IMM,
263 HMTPRINSTR_JUMP_REPLACEMENT,
264 /** The usual 32-bit paranoia. */
265 HMTPRINSTR_32BIT_HACK = 0x7fffffff
266} HMTPRINSTR;
267
268typedef struct
269{
270 /** The key is the address of patched instruction. (32 bits GC ptr) */
271 AVLOU32NODECORE Core;
272 /** Original opcode. */
273 uint8_t aOpcode[16];
274 /** Instruction size. */
275 uint32_t cbOp;
276 /** Replacement opcode. */
277 uint8_t aNewOpcode[16];
278 /** Replacement instruction size. */
279 uint32_t cbNewOp;
280 /** Instruction type. */
281 HMTPRINSTR enmType;
282 /** Source operand. */
283 uint32_t uSrcOperand;
284 /** Destination operand. */
285 uint32_t uDstOperand;
286 /** Number of times the instruction caused a fault. */
287 uint32_t cFaults;
288 /** Patch address of the jump replacement. */
289 RTGCPTR32 pJumpTarget;
290} HMTPRPATCH;
291/** Pointer to HMTPRPATCH. */
292typedef HMTPRPATCH *PHMTPRPATCH;
293
294/**
295 * Switcher function, HC to the special 64-bit RC.
296 *
297 * @param pVM Pointer to the VM.
298 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
299 * @returns Return code indicating the action to take.
300 */
301typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
302/** Pointer to switcher function. */
303typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
304
305/**
306 * HM VM Instance data.
307 * Changes to this must checked against the padding of the hm union in VM!
308 */
309typedef struct HM
310{
311 /** Set when we've initialized VMX or SVM. */
312 bool fInitialized;
313
314 /** Set if nested paging is enabled. */
315 bool fNestedPaging;
316
317 /** Set if nested paging is allowed. */
318 bool fAllowNestedPaging;
319
320 /** Set if large pages are enabled (requires nested paging). */
321 bool fLargePages;
322
323 /** Set if we can support 64-bit guests or not. */
324 bool fAllow64BitGuests;
325
326 /** Set if an IO-APIC is configured for this VM. */
327 bool fHasIoApic;
328
329 /** Set when TPR patching is allowed. */
330 bool fTprPatchingAllowed;
331
332 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
333 bool fGlobalInit;
334
335 /** Set when TPR patching is active. */
336 bool fTPRPatchingActive;
337 bool u8Alignment[7];
338
339 /** Maximum ASID allowed. */
340 uint32_t uMaxAsid;
341
342 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
343 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
344 uint32_t cMaxResumeLoops;
345
346 /** Guest allocated memory for patching purposes. */
347 RTGCPTR pGuestPatchMem;
348 /** Current free pointer inside the patch block. */
349 RTGCPTR pFreeGuestPatchMem;
350 /** Size of the guest patch memory block. */
351 uint32_t cbGuestPatchMem;
352 uint32_t uPadding1;
353
354#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
355 /** 32 to 64 bits switcher entrypoint. */
356 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
357 RTR0PTR uPadding2;
358#endif
359
360 struct
361 {
362 /** Set by the ring-0 side of HM to indicate VMX is supported by the
363 * CPU. */
364 bool fSupported;
365
366 /** Set when we've enabled VMX. */
367 bool fEnabled;
368
369 /** Set if VPID is supported. */
370 bool fVpid;
371
372 /** Set if VT-x VPID is allowed. */
373 bool fAllowVpid;
374
375 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
376 bool fUnrestrictedGuest;
377
378 /** Set if unrestricted guest execution is allowed to be used. */
379 bool fAllowUnrestricted;
380
381 /** Whether we're using the preemption timer or not. */
382 bool fUsePreemptTimer;
383 /** The shift mask employed by the VMX-Preemption timer. */
384 uint8_t cPreemptTimerShift;
385
386 /** Virtual address of the TSS page used for real mode emulation. */
387 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
388
389 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
390 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
391
392 /** R0 memory object for the APIC-access page. */
393 RTR0MEMOBJ hMemObjApicAccess;
394 /** Physical address of the APIC-access page. */
395 RTHCPHYS HCPhysApicAccess;
396 /** Virtual address of the APIC-access page. */
397 R0PTRTYPE(uint8_t *) pbApicAccess;
398
399#ifdef VBOX_WITH_CRASHDUMP_MAGIC
400 RTR0MEMOBJ hMemObjScratch;
401 RTHCPHYS HCPhysScratch;
402 R0PTRTYPE(uint8_t *) pbScratch;
403#endif
404
405 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
406 unsigned uFlushTaggedTlb;
407
408#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
409 uint32_t u32Alignment;
410#endif
411 /** Host CR4 value (set by ring-0 VMX init) */
412 uint64_t u64HostCr4;
413
414 /** Host EFER value (set by ring-0 VMX init) */
415 uint64_t u64HostEfer;
416
417 /** VMX MSR values */
418 VMXMSRS Msrs;
419
420 /** Flush types for invept & invvpid; they depend on capabilities. */
421 VMX_FLUSH_EPT enmFlushEpt;
422 VMX_FLUSH_VPID enmFlushVpid;
423 } vmx;
424
425 struct
426 {
427 /** Set by the ring-0 side of HM to indicate SVM is supported by the
428 * CPU. */
429 bool fSupported;
430 /** Set when we've enabled SVM. */
431 bool fEnabled;
432 /** Set if erratum 170 affects the AMD cpu. */
433 bool fAlwaysFlushTLB;
434 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
435 bool fIgnoreInUseError;
436
437 /** R0 memory object for the IO bitmap (12kb). */
438 RTR0MEMOBJ hMemObjIOBitmap;
439 /** Physical address of the IO bitmap (12kb). */
440 RTHCPHYS HCPhysIOBitmap;
441 /** Virtual address of the IO bitmap. */
442 R0PTRTYPE(void *) pvIOBitmap;
443
444 /* HWCR MSR (for diagnostics) */
445 uint64_t u64MsrHwcr;
446
447 /** SVM revision. */
448 uint32_t u32Rev;
449
450 /** SVM feature bits from cpuid 0x8000000a */
451 uint32_t u32Features;
452 } svm;
453
454 /**
455 * AVL tree with all patches (active or disabled) sorted by guest instruction address
456 */
457 AVLOU32TREE PatchTree;
458 uint32_t cPatches;
459 HMTPRPATCH aPatches[64];
460
461 struct
462 {
463 uint32_t u32AMDFeatureECX;
464 uint32_t u32AMDFeatureEDX;
465 } cpuid;
466
467 /** Saved error from detection */
468 int32_t lLastError;
469
470 /** HMR0Init was run */
471 bool fHMR0Init;
472 bool u8Alignment1[7];
473
474 STAMCOUNTER StatTprPatchSuccess;
475 STAMCOUNTER StatTprPatchFailure;
476 STAMCOUNTER StatTprReplaceSuccess;
477 STAMCOUNTER StatTprReplaceFailure;
478} HM;
479/** Pointer to HM VM instance data. */
480typedef HM *PHM;
481
482/* Maximum number of cached entries. */
483#define VMCSCACHE_MAX_ENTRY 128
484
485/* Structure for storing read and write VMCS actions. */
486typedef struct VMCSCACHE
487{
488#ifdef VBOX_WITH_CRASHDUMP_MAGIC
489 /* Magic marker for searching in crash dumps. */
490 uint8_t aMagic[16];
491 uint64_t uMagic;
492 uint64_t u64TimeEntry;
493 uint64_t u64TimeSwitch;
494 uint64_t cResume;
495 uint64_t interPD;
496 uint64_t pSwitcher;
497 uint32_t uPos;
498 uint32_t idCpu;
499#endif
500 /* CR2 is saved here for EPT syncing. */
501 uint64_t cr2;
502 struct
503 {
504 uint32_t cValidEntries;
505 uint32_t uAlignment;
506 uint32_t aField[VMCSCACHE_MAX_ENTRY];
507 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
508 } Write;
509 struct
510 {
511 uint32_t cValidEntries;
512 uint32_t uAlignment;
513 uint32_t aField[VMCSCACHE_MAX_ENTRY];
514 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
515 } Read;
516#ifdef VBOX_STRICT
517 struct
518 {
519 RTHCPHYS HCPhysCpuPage;
520 RTHCPHYS HCPhysVmcs;
521 RTGCPTR pCache;
522 RTGCPTR pCtx;
523 } TestIn;
524 struct
525 {
526 RTHCPHYS HCPhysVmcs;
527 RTGCPTR pCache;
528 RTGCPTR pCtx;
529 uint64_t eflags;
530 uint64_t cr8;
531 } TestOut;
532 struct
533 {
534 uint64_t param1;
535 uint64_t param2;
536 uint64_t param3;
537 uint64_t param4;
538 } ScratchPad;
539#endif
540} VMCSCACHE;
541/** Pointer to VMCSCACHE. */
542typedef VMCSCACHE *PVMCSCACHE;
543
544/** VMX StartVM function. */
545typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
546/** Pointer to a VMX StartVM function. */
547typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
548
549/** SVM VMRun function. */
550typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
551/** Pointer to a SVM VMRun function. */
552typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
553
554/**
555 * HM VMCPU Instance data.
556 */
557typedef struct HMCPU
558{
559 /** Set if we need to flush the TLB during the world switch. */
560 bool fForceTLBFlush;
561 /** Set when we're using VT-x or AMD-V at that moment. */
562 bool fActive;
563 /** Set when the TLB has been checked until we return from the world switch. */
564 volatile bool fCheckedTLBFlush;
565 /** Whether we're executing a single instruction. */
566 bool fSingleInstruction;
567 /** Set if we need to clear the trap flag because of single stepping. */
568 bool fClearTrapFlag;
569 /** Whether we've completed the inner HM leave function. */
570 bool fLeaveDone;
571 /** Whether we're using the hyper DR7 or guest DR7. */
572 bool fUsingHyperDR7;
573 /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */
574 bool fUseGuestFpu;
575
576 /** World switch exit counter. */
577 volatile uint32_t cWorldSwitchExits;
578 /** HM_CHANGED_* flags. */
579 volatile uint32_t fContextUseFlags;
580 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
581 * time). */
582 RTCPUID idLastCpu;
583 /** TLB flush count. */
584 uint32_t cTlbFlushes;
585 /** Current ASID in use by the VM. */
586 uint32_t uCurrentAsid;
587 /** An additional error code used for some gurus. */
588 uint32_t u32HMError;
589 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
590 uint64_t u64HostTscAux;
591
592 struct
593 {
594 /** Physical address of the VM control structure (VMCS). */
595 RTHCPHYS HCPhysVmcs;
596 /** R0 memory object for the VM control structure (VMCS). */
597 RTR0MEMOBJ hMemObjVmcs;
598 /** Virtual address of the VM control structure (VMCS). */
599 R0PTRTYPE(void *) pvVmcs;
600 /** Ring 0 handlers for VT-x. */
601 PFNHMVMXSTARTVM pfnStartVM;
602#if HC_ARCH_BITS == 32
603 uint32_t u32Alignment1;
604#endif
605
606 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
607 uint32_t u32PinCtls;
608 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
609 uint32_t u32ProcCtls;
610 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
611 uint32_t u32ProcCtls2;
612 /** Current VMX_VMCS32_CTRL_EXIT. */
613 uint32_t u32ExitCtls;
614 /** Current VMX_VMCS32_CTRL_ENTRY. */
615 uint32_t u32EntryCtls;
616
617 /** Physical address of the virtual APIC page for TPR caching. */
618 RTHCPHYS HCPhysVirtApic;
619 /** R0 memory object for the virtual APIC page for TPR caching. */
620 RTR0MEMOBJ hMemObjVirtApic;
621 /** Virtual address of the virtual APIC page for TPR caching. */
622 R0PTRTYPE(uint8_t *) pbVirtApic;
623#if HC_ARCH_BITS == 32
624 uint32_t u32Alignment2;
625#endif
626
627 /** Current CR0 mask. */
628 uint32_t u32CR0Mask;
629 /** Current CR4 mask. */
630 uint32_t u32CR4Mask;
631 /** Current exception bitmap. */
632 uint32_t u32XcptBitmap;
633 /** The updated-guest-state mask. */
634 volatile uint32_t fUpdatedGuestState;
635 /** Current EPTP. */
636 RTHCPHYS HCPhysEPTP;
637
638 /** Physical address of the MSR bitmap. */
639 RTHCPHYS HCPhysMsrBitmap;
640 /** R0 memory object for the MSR bitmap. */
641 RTR0MEMOBJ hMemObjMsrBitmap;
642 /** Virtual address of the MSR bitmap. */
643 R0PTRTYPE(void *) pvMsrBitmap;
644
645 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
646 * for guest MSRs). */
647 RTHCPHYS HCPhysGuestMsr;
648 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
649 * (used for guest MSRs). */
650 RTR0MEMOBJ hMemObjGuestMsr;
651 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
652 * for guest MSRs). */
653 R0PTRTYPE(void *) pvGuestMsr;
654
655 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
656 RTHCPHYS HCPhysHostMsr;
657 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
658 RTR0MEMOBJ hMemObjHostMsr;
659 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
660 R0PTRTYPE(void *) pvHostMsr;
661
662 /** Number of guest/host MSR pairs in the auto-load/store area. */
663 uint32_t cMsrs;
664 /** Whether the host MSR values are up-to-date in the auto-load/store area. */
665 bool fUpdatedHostMsrs;
666 uint8_t u8Align[7];
667
668 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
669 uint64_t u64HostLStarMsr;
670 /** Host STAR MSR value to restore lazily while leaving VT-x. */
671 uint64_t u64HostStarMsr;
672 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
673 uint64_t u64HostSFMaskMsr;
674 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
675 uint64_t u64HostKernelGSBaseMsr;
676 /** A mask of which MSRs have been swapped and need restoration. */
677 uint32_t fRestoreHostMsrs;
678 uint32_t u32Alignment3;
679
680 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
681 uint64_t u64MsrApicBase;
682 /** Last use TSC offset value. (cached) */
683 uint64_t u64TSCOffset;
684
685 /** VMCS cache. */
686 VMCSCACHE VMCSCache;
687
688 /** Real-mode emulation state. */
689 struct
690 {
691 X86DESCATTR AttrCS;
692 X86DESCATTR AttrDS;
693 X86DESCATTR AttrES;
694 X86DESCATTR AttrFS;
695 X86DESCATTR AttrGS;
696 X86DESCATTR AttrSS;
697 X86EFLAGS Eflags;
698 uint32_t fRealOnV86Active;
699 } RealMode;
700
701 struct
702 {
703 uint64_t u64VMCSPhys;
704 uint32_t u32VMCSRevision;
705 uint32_t u32InstrError;
706 uint32_t u32ExitReason;
707 RTCPUID idEnteredCpu;
708 RTCPUID idCurrentCpu;
709 uint32_t u32Padding;
710 } LastError;
711
712 /** State of the VMCS. */
713 uint32_t uVmcsState;
714 /** Which host-state bits to restore before being preempted. */
715 uint32_t fRestoreHostFlags;
716 /** The host-state restoration structure. */
717 VMXRESTOREHOST RestoreHost;
718 /** Set if guest was executing in real mode (extra checks). */
719 bool fWasInRealMode;
720 uint8_t u8Align2[7];
721
722 /** Alignment padding. */
723 uint32_t u32Padding;
724 } vmx;
725
726 struct
727 {
728 /** R0 memory object for the host VMCB which holds additional host-state. */
729 RTR0MEMOBJ hMemObjVmcbHost;
730 /** Physical address of the host VMCB which holds additional host-state. */
731 RTHCPHYS HCPhysVmcbHost;
732 /** Virtual address of the host VMCB which holds additional host-state. */
733 R0PTRTYPE(void *) pvVmcbHost;
734
735 /** R0 memory object for the guest VMCB. */
736 RTR0MEMOBJ hMemObjVmcb;
737 /** Physical address of the guest VMCB. */
738 RTHCPHYS HCPhysVmcb;
739 /** Virtual address of the guest VMCB. */
740 R0PTRTYPE(void *) pvVmcb;
741
742 /** Ring 0 handlers for VT-x. */
743 PFNHMSVMVMRUN pfnVMRun;
744
745 /** R0 memory object for the MSR bitmap (8 KB). */
746 RTR0MEMOBJ hMemObjMsrBitmap;
747 /** Physical address of the MSR bitmap (8 KB). */
748 RTHCPHYS HCPhysMsrBitmap;
749 /** Virtual address of the MSR bitmap. */
750 R0PTRTYPE(void *) pvMsrBitmap;
751
752 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
753 * we should check if the VTPR changed on every VM-exit. */
754 bool fSyncVTpr;
755 uint8_t u8Align[7];
756
757 /** Alignment padding. */
758 uint32_t u32Padding;
759 } svm;
760
761 /** Event injection state. */
762 struct
763 {
764 uint32_t fPending;
765 uint32_t u32ErrCode;
766 uint32_t cbInstr;
767 uint32_t u32Padding; /**< Explicit alignment padding. */
768 uint64_t u64IntInfo;
769 RTGCUINTPTR GCPtrFaultAddress;
770 } Event;
771
772 /** IO Block emulation state. */
773 struct
774 {
775 bool fEnabled;
776 uint8_t u8Align[7];
777
778 /** RIP at the start of the io code we wish to emulate in the recompiler. */
779 RTGCPTR GCPtrFunctionEip;
780
781 uint64_t cr0;
782 } EmulateIoBlock;
783
784 struct
785 {
786 /** Pending IO operation type. */
787 HMPENDINGIO enmType;
788 uint32_t uPadding;
789 RTGCPTR GCPtrRip;
790 RTGCPTR GCPtrRipNext;
791 union
792 {
793 struct
794 {
795 uint32_t uPort;
796 uint32_t uAndVal;
797 uint32_t cbSize;
798 } Port;
799 uint64_t aRaw[2];
800 } s;
801 } PendingIO;
802
803 /** The PAE PDPEs used with Nested Paging (only valid when
804 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
805 X86PDPE aPdpes[4];
806
807 /** Current shadow paging mode. */
808 PGMMODE enmShadowMode;
809
810 /** The CPU ID of the CPU currently owning the VMCS. Set in
811 * HMR0Enter and cleared in HMR0Leave. */
812 RTCPUID idEnteredCpu;
813
814 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
815 struct
816 {
817 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
818 uint32_t cPages;
819 uint32_t u32Padding; /**< Explicit alignment padding. */
820 } TlbShootdown;
821
822 /** For saving stack space, the disassembler state is allocated here instead of
823 * on the stack. */
824 DISCPUSTATE DisState;
825
826 STAMPROFILEADV StatEntry;
827 STAMPROFILEADV StatExit1;
828 STAMPROFILEADV StatExit2;
829 STAMPROFILEADV StatExitIO;
830 STAMPROFILEADV StatExitMovCRx;
831 STAMPROFILEADV StatExitXcptNmi;
832 STAMPROFILEADV StatLoadGuestState;
833 STAMPROFILEADV StatInGC;
834
835#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
836 STAMPROFILEADV StatWorldSwitch3264;
837#endif
838 STAMPROFILEADV StatPoke;
839 STAMPROFILEADV StatSpinPoke;
840 STAMPROFILEADV StatSpinPokeFailed;
841
842 STAMCOUNTER StatInjectInterrupt;
843 STAMCOUNTER StatInjectXcpt;
844 STAMCOUNTER StatInjectPendingReflect;
845
846 STAMCOUNTER StatExitAll;
847 STAMCOUNTER StatExitShadowNM;
848 STAMCOUNTER StatExitGuestNM;
849 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
850 STAMCOUNTER StatExitShadowPFEM;
851 STAMCOUNTER StatExitGuestPF;
852 STAMCOUNTER StatExitGuestUD;
853 STAMCOUNTER StatExitGuestSS;
854 STAMCOUNTER StatExitGuestNP;
855 STAMCOUNTER StatExitGuestGP;
856 STAMCOUNTER StatExitGuestDE;
857 STAMCOUNTER StatExitGuestDB;
858 STAMCOUNTER StatExitGuestMF;
859 STAMCOUNTER StatExitGuestBP;
860 STAMCOUNTER StatExitGuestXF;
861 STAMCOUNTER StatExitGuestXcpUnk;
862 STAMCOUNTER StatExitInvlpg;
863 STAMCOUNTER StatExitInvd;
864 STAMCOUNTER StatExitWbinvd;
865 STAMCOUNTER StatExitPause;
866 STAMCOUNTER StatExitCpuid;
867 STAMCOUNTER StatExitRdtsc;
868 STAMCOUNTER StatExitRdtscp;
869 STAMCOUNTER StatExitRdpmc;
870 STAMCOUNTER StatExitRdrand;
871 STAMCOUNTER StatExitCli;
872 STAMCOUNTER StatExitSti;
873 STAMCOUNTER StatExitPushf;
874 STAMCOUNTER StatExitPopf;
875 STAMCOUNTER StatExitIret;
876 STAMCOUNTER StatExitInt;
877 STAMCOUNTER StatExitCRxWrite[16];
878 STAMCOUNTER StatExitCRxRead[16];
879 STAMCOUNTER StatExitDRxWrite;
880 STAMCOUNTER StatExitDRxRead;
881 STAMCOUNTER StatExitRdmsr;
882 STAMCOUNTER StatExitWrmsr;
883 STAMCOUNTER StatExitClts;
884 STAMCOUNTER StatExitXdtrAccess;
885 STAMCOUNTER StatExitHlt;
886 STAMCOUNTER StatExitMwait;
887 STAMCOUNTER StatExitMonitor;
888 STAMCOUNTER StatExitLmsw;
889 STAMCOUNTER StatExitIOWrite;
890 STAMCOUNTER StatExitIORead;
891 STAMCOUNTER StatExitIOStringWrite;
892 STAMCOUNTER StatExitIOStringRead;
893 STAMCOUNTER StatExitIntWindow;
894 STAMCOUNTER StatExitMaxResume;
895 STAMCOUNTER StatExitExtInt;
896 STAMCOUNTER StatExitHostNmiInGC;
897 STAMCOUNTER StatExitPreemptTimer;
898 STAMCOUNTER StatExitTprBelowThreshold;
899 STAMCOUNTER StatExitTaskSwitch;
900 STAMCOUNTER StatExitMtf;
901 STAMCOUNTER StatExitApicAccess;
902 STAMCOUNTER StatPendingHostIrq;
903
904 STAMCOUNTER StatPreemptPreempting;
905 STAMCOUNTER StatPreemptSaveHostState;
906
907 STAMCOUNTER StatFlushPage;
908 STAMCOUNTER StatFlushPageManual;
909 STAMCOUNTER StatFlushPhysPageManual;
910 STAMCOUNTER StatFlushTlb;
911 STAMCOUNTER StatFlushTlbManual;
912 STAMCOUNTER StatFlushTlbWorldSwitch;
913 STAMCOUNTER StatNoFlushTlbWorldSwitch;
914 STAMCOUNTER StatFlushEntire;
915 STAMCOUNTER StatFlushAsid;
916 STAMCOUNTER StatFlushNestedPaging;
917 STAMCOUNTER StatFlushTlbInvlpgVirt;
918 STAMCOUNTER StatFlushTlbInvlpgPhys;
919 STAMCOUNTER StatTlbShootdown;
920 STAMCOUNTER StatTlbShootdownFlush;
921
922 STAMCOUNTER StatSwitchGuestIrq;
923 STAMCOUNTER StatSwitchHmToR3FF;
924 STAMCOUNTER StatSwitchExitToR3;
925 STAMCOUNTER StatSwitchLongJmpToR3;
926
927 STAMCOUNTER StatTscOffset;
928 STAMCOUNTER StatTscIntercept;
929 STAMCOUNTER StatTscInterceptOverFlow;
930
931 STAMCOUNTER StatExitReasonNpf;
932 STAMCOUNTER StatDRxArmed;
933 STAMCOUNTER StatDRxContextSwitch;
934 STAMCOUNTER StatDRxIoCheck;
935
936 STAMCOUNTER StatLoadMinimal;
937 STAMCOUNTER StatLoadFull;
938
939 STAMCOUNTER StatVmxCheckBadRmSelBase;
940 STAMCOUNTER StatVmxCheckBadRmSelLimit;
941 STAMCOUNTER StatVmxCheckRmOk;
942
943 STAMCOUNTER StatVmxCheckBadSel;
944 STAMCOUNTER StatVmxCheckBadRpl;
945 STAMCOUNTER StatVmxCheckBadLdt;
946 STAMCOUNTER StatVmxCheckBadTr;
947 STAMCOUNTER StatVmxCheckPmOk;
948
949#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
950 STAMCOUNTER StatFpu64SwitchBack;
951 STAMCOUNTER StatDebug64SwitchBack;
952#endif
953
954#ifdef VBOX_WITH_STATISTICS
955 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
956 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
957 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
958 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
959#endif
960#ifdef HM_PROFILE_EXIT_DISPATCH
961 STAMPROFILEADV StatExitDispatch;
962#endif
963} HMCPU;
964/** Pointer to HM VM instance data. */
965typedef HMCPU *PHMCPU;
966
967
968#ifdef IN_RING0
969
970VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
971VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
972
973
974#ifdef VBOX_STRICT
975VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
976VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
977#else
978# define HMDumpRegs(a, b ,c) do { } while (0)
979# define HMR0DumpDescriptor(a, b, c) do { } while (0)
980#endif
981
982# ifdef VBOX_WITH_KERNEL_USING_XMM
983DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
984DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
985# endif
986
987# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
988/**
989 * Gets 64-bit GDTR and IDTR on darwin.
990 * @param pGdtr Where to store the 64-bit GDTR.
991 * @param pIdtr Where to store the 64-bit IDTR.
992 */
993DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
994
995/**
996 * Gets 64-bit CR3 on darwin.
997 * @returns CR3
998 */
999DECLASM(uint64_t) HMR0Get64bitCR3(void);
1000# endif
1001
1002#endif /* IN_RING0 */
1003
1004/** @} */
1005
1006RT_C_DECLS_END
1007
1008#endif
1009
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette