VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 48210

Last change on this file since 48210 was 48210, checked in by vboxsync, 11 years ago

VMM: More naming fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.2 KB
Line 
1/* $Id: HMInternal.h 48210 2013-08-30 22:22:15Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#endif
43
44/** @def HM_PROFILE_EXIT_DISPATCH
45 * Enables profiling of the VM exit handler dispatching. */
46#if 0
47# define HM_PROFILE_EXIT_DISPATCH
48#endif
49
50/* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
51 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
52 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
53 * always auto load/store the KERNEL_GS_BASE MSR.
54 *
55 * Note: don't forget to update the assembly files while modifying this!
56 */
57/** @todo This define should always be in effect and the define itself removed
58 after 'sufficient' testing. */
59# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
60
61RT_C_DECLS_BEGIN
62
63
64/** @defgroup grp_hm_int Internal
65 * @ingroup grp_hm
66 * @internal
67 * @{
68 */
69
70
71/** Maximum number of exit reason statistics counters. */
72#define MAX_EXITREASON_STAT 0x100
73#define MASK_EXITREASON_STAT 0xff
74#define MASK_INJECT_IRQ_STAT 0xff
75
76/** @name HM changed flags.
77 * These flags are used to keep track of which important registers that
78 * have been changed since last they were reset.
79 * @{
80 */
81#define HM_CHANGED_GUEST_CR0 RT_BIT(0)
82#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
83#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
84#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
85#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
86#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
87#define HM_CHANGED_GUEST_TR RT_BIT(6)
88#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
89#define HM_CHANGED_GUEST_DEBUG RT_BIT(8)
90#define HM_CHANGED_GUEST_RIP RT_BIT(9)
91#define HM_CHANGED_GUEST_RSP RT_BIT(10)
92#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
93#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
94#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
95#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
96#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
97/* VT-x specific state. */
98#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(16)
99#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(17)
100#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(18)
101#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19)
102#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20)
103/* AMD-V specific state. */
104#define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(16)
105#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(17)
106#define HM_CHANGED_SVM_RESERVED1 RT_BIT(18)
107#define HM_CHANGED_SVM_RESERVED2 RT_BIT(19)
108#define HM_CHANGED_SVM_RESERVED3 RT_BIT(20)
109
110#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
111 | HM_CHANGED_GUEST_CR3 \
112 | HM_CHANGED_GUEST_CR4 \
113 | HM_CHANGED_GUEST_GDTR \
114 | HM_CHANGED_GUEST_IDTR \
115 | HM_CHANGED_GUEST_LDTR \
116 | HM_CHANGED_GUEST_TR \
117 | HM_CHANGED_GUEST_SEGMENT_REGS \
118 | HM_CHANGED_GUEST_DEBUG \
119 | HM_CHANGED_GUEST_RIP \
120 | HM_CHANGED_GUEST_RSP \
121 | HM_CHANGED_GUEST_RFLAGS \
122 | HM_CHANGED_GUEST_CR2 \
123 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
124 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
125 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
126 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
127 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
128 | HM_CHANGED_VMX_GUEST_APIC_STATE \
129 | HM_CHANGED_VMX_ENTRY_CTLS \
130 | HM_CHANGED_VMX_EXIT_CTLS)
131
132#define HM_CHANGED_HOST_CONTEXT RT_BIT(21)
133
134/* Bits shared between host and guest. */
135#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
136 | HM_CHANGED_GUEST_DEBUG)
137/** @} */
138
139/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
140#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
141
142/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
143#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
144/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
145#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
146/** Total guest mapped memory needed. */
147#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
148
149/** Enable for TPR guest patching. */
150#define VBOX_HM_WITH_GUEST_PATCHING
151
152/** HM SSM version
153 */
154#ifdef VBOX_HM_WITH_GUEST_PATCHING
155# define HM_SSM_VERSION 5
156# define HM_SSM_VERSION_NO_PATCHING 4
157#else
158# define HM_SSM_VERSION 4
159# define HM_SSM_VERSION_NO_PATCHING 4
160#endif
161#define HM_SSM_VERSION_2_0_X 3
162
163/**
164 * Global per-cpu information. (host)
165 */
166typedef struct HMGLOBALCPUINFO
167{
168 /** The CPU ID. */
169 RTCPUID idCpu;
170 /** The memory object */
171 RTR0MEMOBJ hMemObj;
172 /** Current ASID (AMD-V) / VPID (Intel). */
173 uint32_t uCurrentAsid;
174 /** TLB flush count. */
175 uint32_t cTlbFlushes;
176 /** Whether to flush each new ASID/VPID before use. */
177 bool fFlushAsidBeforeUse;
178 /** Configured for VT-x or AMD-V. */
179 bool fConfigured;
180 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
181 bool fIgnoreAMDVInUseError;
182 /** In use by our code. (for power suspend) */
183 volatile bool fInUse;
184} HMGLOBALCPUINFO;
185/** Pointer to the per-cpu global information. */
186typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
187
188typedef enum
189{
190 HMPENDINGIO_INVALID = 0,
191 HMPENDINGIO_PORT_READ,
192 HMPENDINGIO_PORT_WRITE,
193 HMPENDINGIO_STRING_READ,
194 HMPENDINGIO_STRING_WRITE,
195 /** The usual 32-bit paranoia. */
196 HMPENDINGIO_32BIT_HACK = 0x7fffffff
197} HMPENDINGIO;
198
199
200typedef enum
201{
202 HMTPRINSTR_INVALID,
203 HMTPRINSTR_READ,
204 HMTPRINSTR_READ_SHR4,
205 HMTPRINSTR_WRITE_REG,
206 HMTPRINSTR_WRITE_IMM,
207 HMTPRINSTR_JUMP_REPLACEMENT,
208 /** The usual 32-bit paranoia. */
209 HMTPRINSTR_32BIT_HACK = 0x7fffffff
210} HMTPRINSTR;
211
212typedef struct
213{
214 /** The key is the address of patched instruction. (32 bits GC ptr) */
215 AVLOU32NODECORE Core;
216 /** Original opcode. */
217 uint8_t aOpcode[16];
218 /** Instruction size. */
219 uint32_t cbOp;
220 /** Replacement opcode. */
221 uint8_t aNewOpcode[16];
222 /** Replacement instruction size. */
223 uint32_t cbNewOp;
224 /** Instruction type. */
225 HMTPRINSTR enmType;
226 /** Source operand. */
227 uint32_t uSrcOperand;
228 /** Destination operand. */
229 uint32_t uDstOperand;
230 /** Number of times the instruction caused a fault. */
231 uint32_t cFaults;
232 /** Patch address of the jump replacement. */
233 RTGCPTR32 pJumpTarget;
234} HMTPRPATCH;
235/** Pointer to HMTPRPATCH. */
236typedef HMTPRPATCH *PHMTPRPATCH;
237
238/**
239 * Switcher function, HC to the special 64-bit RC.
240 *
241 * @param pVM Pointer to the VM.
242 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
243 * @returns Return code indicating the action to take.
244 */
245typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
246/** Pointer to switcher function. */
247typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
248
249/**
250 * HM VM Instance data.
251 * Changes to this must checked against the padding of the hm union in VM!
252 */
253typedef struct HM
254{
255 /** Set when we've initialized VMX or SVM. */
256 bool fInitialized;
257
258 /** Set if nested paging is enabled. */
259 bool fNestedPaging;
260
261 /** Set if nested paging is allowed. */
262 bool fAllowNestedPaging;
263
264 /** Set if large pages are enabled (requires nested paging). */
265 bool fLargePages;
266
267 /** Set if we can support 64-bit guests or not. */
268 bool fAllow64BitGuests;
269
270 /** Set if an IO-APIC is configured for this VM. */
271 bool fHasIoApic;
272
273 /** Set when TPR patching is allowed. */
274 bool fTRPPatchingAllowed;
275
276 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
277 bool fGlobalInit;
278
279 /** Set when TPR patching is active. */
280 bool fTPRPatchingActive;
281 bool u8Alignment[7];
282
283 /** Maximum ASID allowed. */
284 uint32_t uMaxAsid;
285
286 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
287 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
288 uint32_t cMaxResumeLoops;
289
290 /** Guest allocated memory for patching purposes. */
291 RTGCPTR pGuestPatchMem;
292 /** Current free pointer inside the patch block. */
293 RTGCPTR pFreeGuestPatchMem;
294 /** Size of the guest patch memory block. */
295 uint32_t cbGuestPatchMem;
296 uint32_t uPadding1;
297
298#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
299 /** 32 to 64 bits switcher entrypoint. */
300 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
301 RTR0PTR uPadding2;
302#endif
303
304 struct
305 {
306 /** Set by the ring-0 side of HM to indicate VMX is supported by the
307 * CPU. */
308 bool fSupported;
309
310 /** Set when we've enabled VMX. */
311 bool fEnabled;
312
313 /** Set if VPID is supported. */
314 bool fVpid;
315
316 /** Set if VT-x VPID is allowed. */
317 bool fAllowVpid;
318
319 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
320 bool fUnrestrictedGuest;
321
322 /** Set if unrestricted guest execution is allowed to be used. */
323 bool fAllowUnrestricted;
324
325 /** Whether we're using the preemption timer or not. */
326 bool fUsePreemptTimer;
327 /** The shift mask employed by the VMX-Preemption timer. */
328 uint8_t cPreemptTimerShift;
329
330 /** Virtual address of the TSS page used for real mode emulation. */
331 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
332
333 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
334 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
335
336 /** R0 memory object for the APIC-access page. */
337 RTR0MEMOBJ hMemObjApicAccess;
338 /** Physical address of the APIC-access page. */
339 RTHCPHYS HCPhysApicAccess;
340 /** Virtual address of the APIC-access page. */
341 R0PTRTYPE(uint8_t *) pbApicAccess;
342
343#ifdef VBOX_WITH_CRASHDUMP_MAGIC
344 RTR0MEMOBJ hMemObjScratch;
345 RTHCPHYS HCPhysScratch;
346 R0PTRTYPE(uint8_t *) pbScratch;
347#endif
348
349 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
350 unsigned uFlushTaggedTlb;
351
352#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
353 uint32_t u32Alignment;
354#endif
355 /** Host CR4 value (set by ring-0 VMX init) */
356 uint64_t u64HostCr4;
357
358 /** Host EFER value (set by ring-0 VMX init) */
359 uint64_t u64HostEfer;
360
361 /** VMX MSR values */
362 struct
363 {
364 uint64_t feature_ctrl;
365 uint64_t vmx_basic_info;
366 VMX_CAPABILITY vmx_pin_ctls;
367 VMX_CAPABILITY vmx_proc_ctls;
368 VMX_CAPABILITY vmx_proc_ctls2;
369 VMX_CAPABILITY vmx_exit;
370 VMX_CAPABILITY vmx_entry;
371 uint64_t vmx_misc;
372 uint64_t vmx_cr0_fixed0;
373 uint64_t vmx_cr0_fixed1;
374 uint64_t vmx_cr4_fixed0;
375 uint64_t vmx_cr4_fixed1;
376 uint64_t vmx_vmcs_enum;
377 uint64_t vmx_vmfunc;
378 uint64_t vmx_ept_vpid_caps;
379 } msr;
380
381 /** Flush types for invept & invvpid; they depend on capabilities. */
382 VMX_FLUSH_EPT enmFlushEpt;
383 VMX_FLUSH_VPID enmFlushVpid;
384 } vmx;
385
386 struct
387 {
388 /** Set by the ring-0 side of HM to indicate SVM is supported by the
389 * CPU. */
390 bool fSupported;
391 /** Set when we've enabled SVM. */
392 bool fEnabled;
393 /** Set if erratum 170 affects the AMD cpu. */
394 bool fAlwaysFlushTLB;
395 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
396 bool fIgnoreInUseError;
397
398 /** R0 memory object for the IO bitmap (12kb). */
399 RTR0MEMOBJ hMemObjIOBitmap;
400 /** Physical address of the IO bitmap (12kb). */
401 RTHCPHYS HCPhysIOBitmap;
402 /** Virtual address of the IO bitmap. */
403 R0PTRTYPE(void *) pvIOBitmap;
404
405 /* HWCR MSR (for diagnostics) */
406 uint64_t msrHwcr;
407
408 /** SVM revision. */
409 uint32_t u32Rev;
410
411 /** SVM feature bits from cpuid 0x8000000a */
412 uint32_t u32Features;
413 } svm;
414
415 /**
416 * AVL tree with all patches (active or disabled) sorted by guest instruction address
417 */
418 AVLOU32TREE PatchTree;
419 uint32_t cPatches;
420 HMTPRPATCH aPatches[64];
421
422 struct
423 {
424 uint32_t u32AMDFeatureECX;
425 uint32_t u32AMDFeatureEDX;
426 } cpuid;
427
428 /** Saved error from detection */
429 int32_t lLastError;
430
431 /** HMR0Init was run */
432 bool fHMR0Init;
433 bool u8Alignment1[7];
434
435 STAMCOUNTER StatTprPatchSuccess;
436 STAMCOUNTER StatTprPatchFailure;
437 STAMCOUNTER StatTprReplaceSuccess;
438 STAMCOUNTER StatTprReplaceFailure;
439} HM;
440/** Pointer to HM VM instance data. */
441typedef HM *PHM;
442
443/* Maximum number of cached entries. */
444#define VMCSCACHE_MAX_ENTRY 128
445
446/* Structure for storing read and write VMCS actions. */
447typedef struct VMCSCACHE
448{
449#ifdef VBOX_WITH_CRASHDUMP_MAGIC
450 /* Magic marker for searching in crash dumps. */
451 uint8_t aMagic[16];
452 uint64_t uMagic;
453 uint64_t u64TimeEntry;
454 uint64_t u64TimeSwitch;
455 uint64_t cResume;
456 uint64_t interPD;
457 uint64_t pSwitcher;
458 uint32_t uPos;
459 uint32_t idCpu;
460#endif
461 /* CR2 is saved here for EPT syncing. */
462 uint64_t cr2;
463 struct
464 {
465 uint32_t cValidEntries;
466 uint32_t uAlignment;
467 uint32_t aField[VMCSCACHE_MAX_ENTRY];
468 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
469 } Write;
470 struct
471 {
472 uint32_t cValidEntries;
473 uint32_t uAlignment;
474 uint32_t aField[VMCSCACHE_MAX_ENTRY];
475 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
476 } Read;
477#ifdef VBOX_STRICT
478 struct
479 {
480 RTHCPHYS HCPhysCpuPage;
481 RTHCPHYS HCPhysVmcs;
482 RTGCPTR pCache;
483 RTGCPTR pCtx;
484 } TestIn;
485 struct
486 {
487 RTHCPHYS HCPhysVmcs;
488 RTGCPTR pCache;
489 RTGCPTR pCtx;
490 uint64_t eflags;
491 uint64_t cr8;
492 } TestOut;
493 struct
494 {
495 uint64_t param1;
496 uint64_t param2;
497 uint64_t param3;
498 uint64_t param4;
499 } ScratchPad;
500#endif
501} VMCSCACHE;
502/** Pointer to VMCSCACHE. */
503typedef VMCSCACHE *PVMCSCACHE;
504
505/** VMX StartVM function. */
506typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
507/** Pointer to a VMX StartVM function. */
508typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
509
510/** SVM VMRun function. */
511typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
512/** Pointer to a SVM VMRun function. */
513typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
514
515/**
516 * HM VMCPU Instance data.
517 */
518typedef struct HMCPU
519{
520 /** Set if we need to flush the TLB during the world switch. */
521 bool fForceTLBFlush;
522 /** Set when we're using VT-x or AMD-V at that moment. */
523 bool fActive;
524 /** Set when the TLB has been checked until we return from the world switch. */
525 volatile bool fCheckedTLBFlush;
526 /** Whether we're executing a single instruction. */
527 bool fSingleInstruction;
528 /** Set if we need to clear the trap flag because of single stepping. */
529 bool fClearTrapFlag;
530 /** Whether we've completed the inner HM leave function. */
531 bool fLeaveDone;
532 uint8_t abAlignment[2];
533
534 /** World switch exit counter. */
535 volatile uint32_t cWorldSwitchExits;
536 /** HM_CHANGED_* flags. */
537 uint32_t fContextUseFlags;
538 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
539 * time). */
540 RTCPUID idLastCpu;
541 /** TLB flush count. */
542 uint32_t cTlbFlushes;
543 /** Current ASID in use by the VM. */
544 uint32_t uCurrentAsid;
545 /** An additional error code used for some gurus. */
546 uint32_t u32HMError;
547 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
548 uint64_t u64HostTscAux;
549
550 struct
551 {
552 /** Physical address of the VM control structure (VMCS). */
553 RTHCPHYS HCPhysVmcs;
554 /** R0 memory object for the VM control structure (VMCS). */
555 RTR0MEMOBJ hMemObjVmcs;
556 /** Virtual address of the VM control structure (VMCS). */
557 R0PTRTYPE(void *) pvVmcs;
558 /** Ring 0 handlers for VT-x. */
559 PFNHMVMXSTARTVM pfnStartVM;
560#if HC_ARCH_BITS == 32
561 uint32_t u32Alignment1;
562#endif
563
564 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
565 uint32_t u32PinCtls;
566 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
567 uint32_t u32ProcCtls;
568 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
569 uint32_t u32ProcCtls2;
570 /** Current VMX_VMCS32_CTRL_EXIT. */
571 uint32_t u32ExitCtls;
572 /** Current VMX_VMCS32_CTRL_ENTRY. */
573 uint32_t u32EntryCtls;
574 /** Physical address of the virtual APIC page for TPR caching. */
575 RTHCPHYS HCPhysVirtApic;
576 /** R0 memory object for the virtual APIC page for TPR caching. */
577 RTR0MEMOBJ hMemObjVirtApic;
578 /** Virtual address of the virtual APIC page for TPR caching. */
579 R0PTRTYPE(uint8_t *) pbVirtApic;
580#if HC_ARCH_BITS == 32
581 uint32_t u32Alignment2;
582#endif
583
584 /** Current CR0 mask. */
585 uint32_t u32CR0Mask;
586 /** Current CR4 mask. */
587 uint32_t u32CR4Mask;
588 /** Current exception bitmap. */
589 uint32_t u32XcptBitmap;
590 /** The updated-guest-state mask. */
591 uint32_t fUpdatedGuestState;
592 /** Current EPTP. */
593 RTHCPHYS HCPhysEPTP;
594
595 /** Physical address of the MSR bitmap. */
596 RTHCPHYS HCPhysMsrBitmap;
597 /** R0 memory object for the MSR bitmap. */
598 RTR0MEMOBJ hMemObjMsrBitmap;
599 /** Virtual address of the MSR bitmap. */
600 R0PTRTYPE(void *) pvMsrBitmap;
601
602#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
603 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
604 * for guest MSRs). */
605 RTHCPHYS HCPhysGuestMsr;
606 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
607 * (used for guest MSRs). */
608 RTR0MEMOBJ hMemObjGuestMsr;
609 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
610 * for guest MSRs). */
611 R0PTRTYPE(void *) pvGuestMsr;
612
613 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
614 RTHCPHYS HCPhysHostMsr;
615 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
616 RTR0MEMOBJ hMemObjHostMsr;
617 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
618 R0PTRTYPE(void *) pvHostMsr;
619
620 /** Number of automatically loaded/restored guest MSRs during the world switch. */
621 uint32_t cGuestMsrs;
622 uint32_t uAlignment;
623#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
624
625 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
626 uint64_t u64MsrApicBase;
627 /** Last use TSC offset value. (cached) */
628 uint64_t u64TSCOffset;
629 /** VMCS cache. */
630 VMCSCACHE VMCSCache;
631
632 /** Real-mode emulation state. */
633 struct
634 {
635 X86DESCATTR AttrCS;
636 X86DESCATTR AttrDS;
637 X86DESCATTR AttrES;
638 X86DESCATTR AttrFS;
639 X86DESCATTR AttrGS;
640 X86DESCATTR AttrSS;
641 X86EFLAGS Eflags;
642 uint32_t fRealOnV86Active;
643 } RealMode;
644
645 struct
646 {
647 uint64_t u64VMCSPhys;
648 uint32_t u32VMCSRevision;
649 uint32_t u32InstrError;
650 uint32_t u32ExitReason;
651 RTCPUID idEnteredCpu;
652 RTCPUID idCurrentCpu;
653 uint32_t u32Padding;
654 } LastError;
655
656 /** State of the VMCS. */
657 uint32_t uVmcsState;
658 /** Which host-state bits to restore before being preempted. */
659 uint32_t fRestoreHostFlags;
660 /** The host-state restoration structure. */
661 VMXRESTOREHOST RestoreHost;
662 /** Set if guest was executing in real mode (extra checks). */
663 bool fWasInRealMode;
664 /** Padding. */
665 uint32_t u32Padding;
666 } vmx;
667
668 struct
669 {
670 /** R0 memory object for the host VMCB which holds additional host-state. */
671 RTR0MEMOBJ hMemObjVmcbHost;
672 /** Physical address of the host VMCB which holds additional host-state. */
673 RTHCPHYS HCPhysVmcbHost;
674 /** Virtual address of the host VMCB which holds additional host-state. */
675 R0PTRTYPE(void *) pvVmcbHost;
676
677 /** R0 memory object for the guest VMCB. */
678 RTR0MEMOBJ hMemObjVmcb;
679 /** Physical address of the guest VMCB. */
680 RTHCPHYS HCPhysVmcb;
681 /** Virtual address of the guest VMCB. */
682 R0PTRTYPE(void *) pvVmcb;
683
684 /** Ring 0 handlers for VT-x. */
685 PFNHMSVMVMRUN pfnVMRun;
686
687 /** R0 memory object for the MSR bitmap (8 KB). */
688 RTR0MEMOBJ hMemObjMsrBitmap;
689 /** Physical address of the MSR bitmap (8 KB). */
690 RTHCPHYS HCPhysMsrBitmap;
691 /** Virtual address of the MSR bitmap. */
692 R0PTRTYPE(void *) pvMsrBitmap;
693
694 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
695 * we should check if the VTPR changed on every VM-exit. */
696 bool fSyncVTpr;
697 uint8_t u8Align[7];
698
699 /** Alignment padding. */
700 uint32_t u32Padding;
701 } svm;
702
703 /** Event injection state. */
704 struct
705 {
706 uint32_t fPending;
707 uint32_t u32ErrCode;
708 uint32_t cbInstr;
709 uint32_t u32Padding; /**< Explicit alignment padding. */
710 uint64_t u64IntrInfo;
711 RTGCUINTPTR GCPtrFaultAddress;
712 } Event;
713
714 /** IO Block emulation state. */
715 struct
716 {
717 bool fEnabled;
718 uint8_t u8Align[7];
719
720 /** RIP at the start of the io code we wish to emulate in the recompiler. */
721 RTGCPTR GCPtrFunctionEip;
722
723 uint64_t cr0;
724 } EmulateIoBlock;
725
726 struct
727 {
728 /** Pending IO operation type. */
729 HMPENDINGIO enmType;
730 uint32_t uPadding;
731 RTGCPTR GCPtrRip;
732 RTGCPTR GCPtrRipNext;
733 union
734 {
735 struct
736 {
737 uint32_t uPort;
738 uint32_t uAndVal;
739 uint32_t cbSize;
740 } Port;
741 uint64_t aRaw[2];
742 } s;
743 } PendingIO;
744
745 /** The PAE PDPEs used with Nested Paging (only valid when
746 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
747 X86PDPE aPdpes[4];
748
749 /** Current shadow paging mode. */
750 PGMMODE enmShadowMode;
751
752 /** The CPU ID of the CPU currently owning the VMCS. Set in
753 * HMR0Enter and cleared in HMR0Leave. */
754 RTCPUID idEnteredCpu;
755
756 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
757 struct
758 {
759 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
760 uint32_t cPages;
761 uint32_t u32Padding; /**< Explicit alignment padding. */
762 } TlbShootdown;
763
764 /** For saving stack space, the disassembler state is allocated here instead of
765 * on the stack. */
766 DISCPUSTATE DisState;
767
768 STAMPROFILEADV StatEntry;
769 STAMPROFILEADV StatExit1;
770 STAMPROFILEADV StatExit2;
771 STAMPROFILEADV StatExitIO;
772 STAMPROFILEADV StatExitMovCRx;
773 STAMPROFILEADV StatExitXcptNmi;
774 STAMPROFILEADV StatLoadGuestState;
775 STAMPROFILEADV StatInGC;
776
777#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
778 STAMPROFILEADV StatWorldSwitch3264;
779#endif
780 STAMPROFILEADV StatPoke;
781 STAMPROFILEADV StatSpinPoke;
782 STAMPROFILEADV StatSpinPokeFailed;
783
784 STAMCOUNTER StatInjectInterrupt;
785 STAMCOUNTER StatInjectXcpt;
786 STAMCOUNTER StatInjectPendingReflect;
787
788 STAMCOUNTER StatExitShadowNM;
789 STAMCOUNTER StatExitGuestNM;
790 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
791 STAMCOUNTER StatExitShadowPFEM;
792 STAMCOUNTER StatExitGuestPF;
793 STAMCOUNTER StatExitGuestUD;
794 STAMCOUNTER StatExitGuestSS;
795 STAMCOUNTER StatExitGuestNP;
796 STAMCOUNTER StatExitGuestGP;
797 STAMCOUNTER StatExitGuestDE;
798 STAMCOUNTER StatExitGuestDB;
799 STAMCOUNTER StatExitGuestMF;
800 STAMCOUNTER StatExitGuestBP;
801 STAMCOUNTER StatExitGuestXF;
802 STAMCOUNTER StatExitGuestXcpUnk;
803 STAMCOUNTER StatExitInvlpg;
804 STAMCOUNTER StatExitInvd;
805 STAMCOUNTER StatExitWbinvd;
806 STAMCOUNTER StatExitPause;
807 STAMCOUNTER StatExitCpuid;
808 STAMCOUNTER StatExitRdtsc;
809 STAMCOUNTER StatExitRdtscp;
810 STAMCOUNTER StatExitRdpmc;
811 STAMCOUNTER StatExitRdrand;
812 STAMCOUNTER StatExitCli;
813 STAMCOUNTER StatExitSti;
814 STAMCOUNTER StatExitPushf;
815 STAMCOUNTER StatExitPopf;
816 STAMCOUNTER StatExitIret;
817 STAMCOUNTER StatExitInt;
818 STAMCOUNTER StatExitCRxWrite[16];
819 STAMCOUNTER StatExitCRxRead[16];
820 STAMCOUNTER StatExitDRxWrite;
821 STAMCOUNTER StatExitDRxRead;
822 STAMCOUNTER StatExitRdmsr;
823 STAMCOUNTER StatExitWrmsr;
824 STAMCOUNTER StatExitClts;
825 STAMCOUNTER StatExitXdtrAccess;
826 STAMCOUNTER StatExitHlt;
827 STAMCOUNTER StatExitMwait;
828 STAMCOUNTER StatExitMonitor;
829 STAMCOUNTER StatExitLmsw;
830 STAMCOUNTER StatExitIOWrite;
831 STAMCOUNTER StatExitIORead;
832 STAMCOUNTER StatExitIOStringWrite;
833 STAMCOUNTER StatExitIOStringRead;
834 STAMCOUNTER StatExitIntWindow;
835 STAMCOUNTER StatExitMaxResume;
836 STAMCOUNTER StatExitExtInt;
837 STAMCOUNTER StatExitHostNmi;
838 STAMCOUNTER StatExitPreemptTimer;
839 STAMCOUNTER StatExitTprBelowThreshold;
840 STAMCOUNTER StatExitTaskSwitch;
841 STAMCOUNTER StatExitMtf;
842 STAMCOUNTER StatExitApicAccess;
843 STAMCOUNTER StatPendingHostIrq;
844
845 STAMCOUNTER StatFlushPage;
846 STAMCOUNTER StatFlushPageManual;
847 STAMCOUNTER StatFlushPhysPageManual;
848 STAMCOUNTER StatFlushTlb;
849 STAMCOUNTER StatFlushTlbManual;
850 STAMCOUNTER StatFlushTlbWorldSwitch;
851 STAMCOUNTER StatNoFlushTlbWorldSwitch;
852 STAMCOUNTER StatFlushEntire;
853 STAMCOUNTER StatFlushAsid;
854 STAMCOUNTER StatFlushNestedPaging;
855 STAMCOUNTER StatFlushTlbInvlpgVirt;
856 STAMCOUNTER StatFlushTlbInvlpgPhys;
857 STAMCOUNTER StatTlbShootdown;
858 STAMCOUNTER StatTlbShootdownFlush;
859
860 STAMCOUNTER StatSwitchGuestIrq;
861 STAMCOUNTER StatSwitchHmToR3FF;
862 STAMCOUNTER StatSwitchExitToR3;
863 STAMCOUNTER StatSwitchLongJmpToR3;
864
865 STAMCOUNTER StatTscOffset;
866 STAMCOUNTER StatTscIntercept;
867 STAMCOUNTER StatTscInterceptOverFlow;
868
869 STAMCOUNTER StatExitReasonNpf;
870 STAMCOUNTER StatDRxArmed;
871 STAMCOUNTER StatDRxContextSwitch;
872 STAMCOUNTER StatDRxIoCheck;
873
874 STAMCOUNTER StatLoadMinimal;
875 STAMCOUNTER StatLoadFull;
876
877 STAMCOUNTER StatVmxCheckBadRmSelBase;
878 STAMCOUNTER StatVmxCheckBadRmSelLimit;
879 STAMCOUNTER StatVmxCheckRmOk;
880
881 STAMCOUNTER StatVmxCheckBadSel;
882 STAMCOUNTER StatVmxCheckBadRpl;
883 STAMCOUNTER StatVmxCheckBadLdt;
884 STAMCOUNTER StatVmxCheckBadTr;
885 STAMCOUNTER StatVmxCheckPmOk;
886
887#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
888 STAMCOUNTER StatFpu64SwitchBack;
889 STAMCOUNTER StatDebug64SwitchBack;
890#endif
891
892#ifdef VBOX_WITH_STATISTICS
893 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
894 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
895 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
896 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
897#endif
898#ifdef HM_PROFILE_EXIT_DISPATCH
899 STAMPROFILEADV StatExitDispatch;
900#endif
901} HMCPU;
902/** Pointer to HM VM instance data. */
903typedef HMCPU *PHMCPU;
904
905
906#ifdef IN_RING0
907
908VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
909VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
910
911
912#ifdef VBOX_STRICT
913VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
914VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
915#else
916# define HMDumpRegs(a, b ,c) do { } while (0)
917# define HMR0DumpDescriptor(a, b, c) do { } while (0)
918#endif
919
920# ifdef VBOX_WITH_KERNEL_USING_XMM
921DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
922DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
923# endif
924
925# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
926/**
927 * Gets 64-bit GDTR and IDTR on darwin.
928 * @param pGdtr Where to store the 64-bit GDTR.
929 * @param pIdtr Where to store the 64-bit IDTR.
930 */
931DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
932
933/**
934 * Gets 64-bit CR3 on darwin.
935 * @returns CR3
936 */
937DECLASM(uint64_t) HMR0Get64bitCR3(void);
938# endif
939
940#endif /* IN_RING0 */
941
942/** @} */
943
944RT_C_DECLS_END
945
946#endif
947
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette