VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 49591

Last change on this file since 49591 was 49532, checked in by vboxsync, 11 years ago

VMM: 32-bit build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.3 KB
Line 
1/* $Id: HMInternal.h 49532 2013-11-18 14:11:18Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#endif
43
44/** @def HM_PROFILE_EXIT_DISPATCH
45 * Enables profiling of the VM exit handler dispatching. */
46#if 0
47# define HM_PROFILE_EXIT_DISPATCH
48#endif
49
50RT_C_DECLS_BEGIN
51
52
53/** @defgroup grp_hm_int Internal
54 * @ingroup grp_hm
55 * @internal
56 * @{
57 */
58
59
60/** Maximum number of exit reason statistics counters. */
61#define MAX_EXITREASON_STAT 0x100
62#define MASK_EXITREASON_STAT 0xff
63#define MASK_INJECT_IRQ_STAT 0xff
64
65/** @name HM changed flags.
66 * These flags are used to keep track of which important registers that
67 * have been changed since last they were reset.
68 * @{
69 */
70#define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */
71#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
72#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
73#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
74#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
75#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
76#define HM_CHANGED_GUEST_TR RT_BIT(6)
77#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7)
78#define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */
79#define HM_CHANGED_GUEST_RIP RT_BIT(9)
80#define HM_CHANGED_GUEST_RSP RT_BIT(10)
81#define HM_CHANGED_GUEST_RFLAGS RT_BIT(11)
82#define HM_CHANGED_GUEST_CR2 RT_BIT(12)
83#define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13)
84#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14)
85#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15)
86/* VT-x specific state. */
87#define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(16)
88#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(17)
89#define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(18)
90#define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19)
91#define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20)
92/* AMD-V specific state. */
93#define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(16)
94#define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(17)
95#define HM_CHANGED_SVM_RESERVED1 RT_BIT(18)
96#define HM_CHANGED_SVM_RESERVED2 RT_BIT(19)
97#define HM_CHANGED_SVM_RESERVED3 RT_BIT(20)
98
99#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
100 | HM_CHANGED_GUEST_CR3 \
101 | HM_CHANGED_GUEST_CR4 \
102 | HM_CHANGED_GUEST_GDTR \
103 | HM_CHANGED_GUEST_IDTR \
104 | HM_CHANGED_GUEST_LDTR \
105 | HM_CHANGED_GUEST_TR \
106 | HM_CHANGED_GUEST_SEGMENT_REGS \
107 | HM_CHANGED_GUEST_DEBUG \
108 | HM_CHANGED_GUEST_RIP \
109 | HM_CHANGED_GUEST_RSP \
110 | HM_CHANGED_GUEST_RFLAGS \
111 | HM_CHANGED_GUEST_CR2 \
112 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
113 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
114 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
115 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
116 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
117 | HM_CHANGED_VMX_GUEST_APIC_STATE \
118 | HM_CHANGED_VMX_ENTRY_CTLS \
119 | HM_CHANGED_VMX_EXIT_CTLS)
120
121#define HM_CHANGED_HOST_CONTEXT RT_BIT(21)
122
123/* Bits shared between host and guest. */
124#define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \
125 | HM_CHANGED_GUEST_DEBUG)
126/** @} */
127
128/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
129#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
130
131/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
132#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
133/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
134#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
135/** Total guest mapped memory needed. */
136#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
137
138/** Enable for TPR guest patching. */
139#define VBOX_HM_WITH_GUEST_PATCHING
140
141/** HM SSM version
142 */
143#ifdef VBOX_HM_WITH_GUEST_PATCHING
144# define HM_SSM_VERSION 5
145# define HM_SSM_VERSION_NO_PATCHING 4
146#else
147# define HM_SSM_VERSION 4
148# define HM_SSM_VERSION_NO_PATCHING 4
149#endif
150#define HM_SSM_VERSION_2_0_X 3
151
152/**
153 * Global per-cpu information. (host)
154 */
155typedef struct HMGLOBALCPUINFO
156{
157 /** The CPU ID. */
158 RTCPUID idCpu;
159 /** The memory object */
160 RTR0MEMOBJ hMemObj;
161 /** Current ASID (AMD-V) / VPID (Intel). */
162 uint32_t uCurrentAsid;
163 /** TLB flush count. */
164 uint32_t cTlbFlushes;
165 /** Whether to flush each new ASID/VPID before use. */
166 bool fFlushAsidBeforeUse;
167 /** Configured for VT-x or AMD-V. */
168 bool fConfigured;
169 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
170 bool fIgnoreAMDVInUseError;
171 /** In use by our code. (for power suspend) */
172 volatile bool fInUse;
173} HMGLOBALCPUINFO;
174/** Pointer to the per-cpu global information. */
175typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
176
177typedef enum
178{
179 HMPENDINGIO_INVALID = 0,
180 HMPENDINGIO_PORT_READ,
181 HMPENDINGIO_PORT_WRITE,
182 HMPENDINGIO_STRING_READ,
183 HMPENDINGIO_STRING_WRITE,
184 /** The usual 32-bit paranoia. */
185 HMPENDINGIO_32BIT_HACK = 0x7fffffff
186} HMPENDINGIO;
187
188
189typedef enum
190{
191 HMTPRINSTR_INVALID,
192 HMTPRINSTR_READ,
193 HMTPRINSTR_READ_SHR4,
194 HMTPRINSTR_WRITE_REG,
195 HMTPRINSTR_WRITE_IMM,
196 HMTPRINSTR_JUMP_REPLACEMENT,
197 /** The usual 32-bit paranoia. */
198 HMTPRINSTR_32BIT_HACK = 0x7fffffff
199} HMTPRINSTR;
200
201typedef struct
202{
203 /** The key is the address of patched instruction. (32 bits GC ptr) */
204 AVLOU32NODECORE Core;
205 /** Original opcode. */
206 uint8_t aOpcode[16];
207 /** Instruction size. */
208 uint32_t cbOp;
209 /** Replacement opcode. */
210 uint8_t aNewOpcode[16];
211 /** Replacement instruction size. */
212 uint32_t cbNewOp;
213 /** Instruction type. */
214 HMTPRINSTR enmType;
215 /** Source operand. */
216 uint32_t uSrcOperand;
217 /** Destination operand. */
218 uint32_t uDstOperand;
219 /** Number of times the instruction caused a fault. */
220 uint32_t cFaults;
221 /** Patch address of the jump replacement. */
222 RTGCPTR32 pJumpTarget;
223} HMTPRPATCH;
224/** Pointer to HMTPRPATCH. */
225typedef HMTPRPATCH *PHMTPRPATCH;
226
227/**
228 * Switcher function, HC to the special 64-bit RC.
229 *
230 * @param pVM Pointer to the VM.
231 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
232 * @returns Return code indicating the action to take.
233 */
234typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
235/** Pointer to switcher function. */
236typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
237
238/**
239 * HM VM Instance data.
240 * Changes to this must checked against the padding of the hm union in VM!
241 */
242typedef struct HM
243{
244 /** Set when we've initialized VMX or SVM. */
245 bool fInitialized;
246
247 /** Set if nested paging is enabled. */
248 bool fNestedPaging;
249
250 /** Set if nested paging is allowed. */
251 bool fAllowNestedPaging;
252
253 /** Set if large pages are enabled (requires nested paging). */
254 bool fLargePages;
255
256 /** Set if we can support 64-bit guests or not. */
257 bool fAllow64BitGuests;
258
259 /** Set if an IO-APIC is configured for this VM. */
260 bool fHasIoApic;
261
262 /** Set when TPR patching is allowed. */
263 bool fTRPPatchingAllowed;
264
265 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
266 bool fGlobalInit;
267
268 /** Set when TPR patching is active. */
269 bool fTPRPatchingActive;
270 bool u8Alignment[7];
271
272 /** Maximum ASID allowed. */
273 uint32_t uMaxAsid;
274
275 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
276 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
277 uint32_t cMaxResumeLoops;
278
279 /** Guest allocated memory for patching purposes. */
280 RTGCPTR pGuestPatchMem;
281 /** Current free pointer inside the patch block. */
282 RTGCPTR pFreeGuestPatchMem;
283 /** Size of the guest patch memory block. */
284 uint32_t cbGuestPatchMem;
285 uint32_t uPadding1;
286
287#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
288 /** 32 to 64 bits switcher entrypoint. */
289 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
290 RTR0PTR uPadding2;
291#endif
292
293 struct
294 {
295 /** Set by the ring-0 side of HM to indicate VMX is supported by the
296 * CPU. */
297 bool fSupported;
298
299 /** Set when we've enabled VMX. */
300 bool fEnabled;
301
302 /** Set if VPID is supported. */
303 bool fVpid;
304
305 /** Set if VT-x VPID is allowed. */
306 bool fAllowVpid;
307
308 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
309 bool fUnrestrictedGuest;
310
311 /** Set if unrestricted guest execution is allowed to be used. */
312 bool fAllowUnrestricted;
313
314 /** Whether we're using the preemption timer or not. */
315 bool fUsePreemptTimer;
316 /** The shift mask employed by the VMX-Preemption timer. */
317 uint8_t cPreemptTimerShift;
318
319 /** Virtual address of the TSS page used for real mode emulation. */
320 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
321
322 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
323 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
324
325 /** R0 memory object for the APIC-access page. */
326 RTR0MEMOBJ hMemObjApicAccess;
327 /** Physical address of the APIC-access page. */
328 RTHCPHYS HCPhysApicAccess;
329 /** Virtual address of the APIC-access page. */
330 R0PTRTYPE(uint8_t *) pbApicAccess;
331
332#ifdef VBOX_WITH_CRASHDUMP_MAGIC
333 RTR0MEMOBJ hMemObjScratch;
334 RTHCPHYS HCPhysScratch;
335 R0PTRTYPE(uint8_t *) pbScratch;
336#endif
337
338 /** Internal Id of which flush-handler to use for tagged-TLB entries. */
339 unsigned uFlushTaggedTlb;
340
341#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
342 uint32_t u32Alignment;
343#endif
344 /** Host CR4 value (set by ring-0 VMX init) */
345 uint64_t u64HostCr4;
346
347 /** Host EFER value (set by ring-0 VMX init) */
348 uint64_t u64HostEfer;
349
350 /** VMX MSR values */
351 VMXMSRS Msrs;
352
353 /** Flush types for invept & invvpid; they depend on capabilities. */
354 VMX_FLUSH_EPT enmFlushEpt;
355 VMX_FLUSH_VPID enmFlushVpid;
356 } vmx;
357
358 struct
359 {
360 /** Set by the ring-0 side of HM to indicate SVM is supported by the
361 * CPU. */
362 bool fSupported;
363 /** Set when we've enabled SVM. */
364 bool fEnabled;
365 /** Set if erratum 170 affects the AMD cpu. */
366 bool fAlwaysFlushTLB;
367 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
368 bool fIgnoreInUseError;
369
370 /** R0 memory object for the IO bitmap (12kb). */
371 RTR0MEMOBJ hMemObjIOBitmap;
372 /** Physical address of the IO bitmap (12kb). */
373 RTHCPHYS HCPhysIOBitmap;
374 /** Virtual address of the IO bitmap. */
375 R0PTRTYPE(void *) pvIOBitmap;
376
377 /* HWCR MSR (for diagnostics) */
378 uint64_t u64MsrHwcr;
379
380 /** SVM revision. */
381 uint32_t u32Rev;
382
383 /** SVM feature bits from cpuid 0x8000000a */
384 uint32_t u32Features;
385 } svm;
386
387 /**
388 * AVL tree with all patches (active or disabled) sorted by guest instruction address
389 */
390 AVLOU32TREE PatchTree;
391 uint32_t cPatches;
392 HMTPRPATCH aPatches[64];
393
394 struct
395 {
396 uint32_t u32AMDFeatureECX;
397 uint32_t u32AMDFeatureEDX;
398 } cpuid;
399
400 /** Saved error from detection */
401 int32_t lLastError;
402
403 /** HMR0Init was run */
404 bool fHMR0Init;
405 bool u8Alignment1[7];
406
407 STAMCOUNTER StatTprPatchSuccess;
408 STAMCOUNTER StatTprPatchFailure;
409 STAMCOUNTER StatTprReplaceSuccess;
410 STAMCOUNTER StatTprReplaceFailure;
411} HM;
412/** Pointer to HM VM instance data. */
413typedef HM *PHM;
414
415/* Maximum number of cached entries. */
416#define VMCSCACHE_MAX_ENTRY 128
417
418/* Structure for storing read and write VMCS actions. */
419typedef struct VMCSCACHE
420{
421#ifdef VBOX_WITH_CRASHDUMP_MAGIC
422 /* Magic marker for searching in crash dumps. */
423 uint8_t aMagic[16];
424 uint64_t uMagic;
425 uint64_t u64TimeEntry;
426 uint64_t u64TimeSwitch;
427 uint64_t cResume;
428 uint64_t interPD;
429 uint64_t pSwitcher;
430 uint32_t uPos;
431 uint32_t idCpu;
432#endif
433 /* CR2 is saved here for EPT syncing. */
434 uint64_t cr2;
435 struct
436 {
437 uint32_t cValidEntries;
438 uint32_t uAlignment;
439 uint32_t aField[VMCSCACHE_MAX_ENTRY];
440 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
441 } Write;
442 struct
443 {
444 uint32_t cValidEntries;
445 uint32_t uAlignment;
446 uint32_t aField[VMCSCACHE_MAX_ENTRY];
447 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
448 } Read;
449#ifdef VBOX_STRICT
450 struct
451 {
452 RTHCPHYS HCPhysCpuPage;
453 RTHCPHYS HCPhysVmcs;
454 RTGCPTR pCache;
455 RTGCPTR pCtx;
456 } TestIn;
457 struct
458 {
459 RTHCPHYS HCPhysVmcs;
460 RTGCPTR pCache;
461 RTGCPTR pCtx;
462 uint64_t eflags;
463 uint64_t cr8;
464 } TestOut;
465 struct
466 {
467 uint64_t param1;
468 uint64_t param2;
469 uint64_t param3;
470 uint64_t param4;
471 } ScratchPad;
472#endif
473} VMCSCACHE;
474/** Pointer to VMCSCACHE. */
475typedef VMCSCACHE *PVMCSCACHE;
476
477/** VMX StartVM function. */
478typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
479/** Pointer to a VMX StartVM function. */
480typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
481
482/** SVM VMRun function. */
483typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
484/** Pointer to a SVM VMRun function. */
485typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
486
487/**
488 * HM VMCPU Instance data.
489 */
490typedef struct HMCPU
491{
492 /** Set if we need to flush the TLB during the world switch. */
493 bool fForceTLBFlush;
494 /** Set when we're using VT-x or AMD-V at that moment. */
495 bool fActive;
496 /** Set when the TLB has been checked until we return from the world switch. */
497 volatile bool fCheckedTLBFlush;
498 /** Whether we're executing a single instruction. */
499 bool fSingleInstruction;
500 /** Set if we need to clear the trap flag because of single stepping. */
501 bool fClearTrapFlag;
502 /** Whether we've completed the inner HM leave function. */
503 bool fLeaveDone;
504 /** Whether we're using the hyper DR7 or guest DR7. */
505 bool fUsingHyperDR7;
506 /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */
507 bool fUseGuestFpu;
508
509 /** World switch exit counter. */
510 volatile uint32_t cWorldSwitchExits;
511 /** HM_CHANGED_* flags. */
512 uint32_t fContextUseFlags;
513 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
514 * time). */
515 RTCPUID idLastCpu;
516 /** TLB flush count. */
517 uint32_t cTlbFlushes;
518 /** Current ASID in use by the VM. */
519 uint32_t uCurrentAsid;
520 /** An additional error code used for some gurus. */
521 uint32_t u32HMError;
522 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
523 uint64_t u64HostTscAux;
524
525 struct
526 {
527 /** Physical address of the VM control structure (VMCS). */
528 RTHCPHYS HCPhysVmcs;
529 /** R0 memory object for the VM control structure (VMCS). */
530 RTR0MEMOBJ hMemObjVmcs;
531 /** Virtual address of the VM control structure (VMCS). */
532 R0PTRTYPE(void *) pvVmcs;
533 /** Ring 0 handlers for VT-x. */
534 PFNHMVMXSTARTVM pfnStartVM;
535#if HC_ARCH_BITS == 32
536 uint32_t u32Alignment1;
537#endif
538
539 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
540 uint32_t u32PinCtls;
541 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
542 uint32_t u32ProcCtls;
543 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
544 uint32_t u32ProcCtls2;
545 /** Current VMX_VMCS32_CTRL_EXIT. */
546 uint32_t u32ExitCtls;
547 /** Current VMX_VMCS32_CTRL_ENTRY. */
548 uint32_t u32EntryCtls;
549
550 /** Physical address of the virtual APIC page for TPR caching. */
551 RTHCPHYS HCPhysVirtApic;
552 /** R0 memory object for the virtual APIC page for TPR caching. */
553 RTR0MEMOBJ hMemObjVirtApic;
554 /** Virtual address of the virtual APIC page for TPR caching. */
555 R0PTRTYPE(uint8_t *) pbVirtApic;
556#if HC_ARCH_BITS == 32
557 uint32_t u32Alignment2;
558#endif
559
560 /** Current CR0 mask. */
561 uint32_t u32CR0Mask;
562 /** Current CR4 mask. */
563 uint32_t u32CR4Mask;
564 /** Current exception bitmap. */
565 uint32_t u32XcptBitmap;
566 /** The updated-guest-state mask. */
567 uint32_t fUpdatedGuestState;
568 /** Current EPTP. */
569 RTHCPHYS HCPhysEPTP;
570
571 /** Physical address of the MSR bitmap. */
572 RTHCPHYS HCPhysMsrBitmap;
573 /** R0 memory object for the MSR bitmap. */
574 RTR0MEMOBJ hMemObjMsrBitmap;
575 /** Virtual address of the MSR bitmap. */
576 R0PTRTYPE(void *) pvMsrBitmap;
577
578 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
579 * for guest MSRs). */
580 RTHCPHYS HCPhysGuestMsr;
581 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
582 * (used for guest MSRs). */
583 RTR0MEMOBJ hMemObjGuestMsr;
584 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
585 * for guest MSRs). */
586 R0PTRTYPE(void *) pvGuestMsr;
587
588 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
589 RTHCPHYS HCPhysHostMsr;
590 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
591 RTR0MEMOBJ hMemObjHostMsr;
592 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
593 R0PTRTYPE(void *) pvHostMsr;
594
595 /** Number of guest/host MSR pairs in the auto-load/store area. */
596 uint32_t cMsrs;
597 /** Whether the host MSR values are up-to-date. */
598 bool fUpdatedHostMsrs;
599 uint8_t u8Align[7];
600
601 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
602 uint64_t u64MsrApicBase;
603 /** Last use TSC offset value. (cached) */
604 uint64_t u64TSCOffset;
605
606 /** VMCS cache. */
607 VMCSCACHE VMCSCache;
608
609 /** Real-mode emulation state. */
610 struct
611 {
612 X86DESCATTR AttrCS;
613 X86DESCATTR AttrDS;
614 X86DESCATTR AttrES;
615 X86DESCATTR AttrFS;
616 X86DESCATTR AttrGS;
617 X86DESCATTR AttrSS;
618 X86EFLAGS Eflags;
619 uint32_t fRealOnV86Active;
620 } RealMode;
621
622 struct
623 {
624 uint64_t u64VMCSPhys;
625 uint32_t u32VMCSRevision;
626 uint32_t u32InstrError;
627 uint32_t u32ExitReason;
628 RTCPUID idEnteredCpu;
629 RTCPUID idCurrentCpu;
630 uint32_t u32Padding;
631 } LastError;
632
633 /** State of the VMCS. */
634 uint32_t uVmcsState;
635 /** Which host-state bits to restore before being preempted. */
636 uint32_t fRestoreHostFlags;
637 /** The host-state restoration structure. */
638 VMXRESTOREHOST RestoreHost;
639 /** Set if guest was executing in real mode (extra checks). */
640 bool fWasInRealMode;
641 uint8_t u8Align2[7];
642
643 /** Padding. */
644 uint32_t u32Padding;
645 } vmx;
646
647 struct
648 {
649 /** R0 memory object for the host VMCB which holds additional host-state. */
650 RTR0MEMOBJ hMemObjVmcbHost;
651 /** Physical address of the host VMCB which holds additional host-state. */
652 RTHCPHYS HCPhysVmcbHost;
653 /** Virtual address of the host VMCB which holds additional host-state. */
654 R0PTRTYPE(void *) pvVmcbHost;
655
656 /** R0 memory object for the guest VMCB. */
657 RTR0MEMOBJ hMemObjVmcb;
658 /** Physical address of the guest VMCB. */
659 RTHCPHYS HCPhysVmcb;
660 /** Virtual address of the guest VMCB. */
661 R0PTRTYPE(void *) pvVmcb;
662
663 /** Ring 0 handlers for VT-x. */
664 PFNHMSVMVMRUN pfnVMRun;
665
666 /** R0 memory object for the MSR bitmap (8 KB). */
667 RTR0MEMOBJ hMemObjMsrBitmap;
668 /** Physical address of the MSR bitmap (8 KB). */
669 RTHCPHYS HCPhysMsrBitmap;
670 /** Virtual address of the MSR bitmap. */
671 R0PTRTYPE(void *) pvMsrBitmap;
672
673 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
674 * we should check if the VTPR changed on every VM-exit. */
675 bool fSyncVTpr;
676 uint8_t u8Align[7];
677
678 /** Alignment padding. */
679 uint32_t u32Padding;
680 } svm;
681
682 /** Event injection state. */
683 struct
684 {
685 uint32_t fPending;
686 uint32_t u32ErrCode;
687 uint32_t cbInstr;
688 uint32_t u32Padding; /**< Explicit alignment padding. */
689 uint64_t u64IntInfo;
690 RTGCUINTPTR GCPtrFaultAddress;
691 } Event;
692
693 /** IO Block emulation state. */
694 struct
695 {
696 bool fEnabled;
697 uint8_t u8Align[7];
698
699 /** RIP at the start of the io code we wish to emulate in the recompiler. */
700 RTGCPTR GCPtrFunctionEip;
701
702 uint64_t cr0;
703 } EmulateIoBlock;
704
705 struct
706 {
707 /** Pending IO operation type. */
708 HMPENDINGIO enmType;
709 uint32_t uPadding;
710 RTGCPTR GCPtrRip;
711 RTGCPTR GCPtrRipNext;
712 union
713 {
714 struct
715 {
716 uint32_t uPort;
717 uint32_t uAndVal;
718 uint32_t cbSize;
719 } Port;
720 uint64_t aRaw[2];
721 } s;
722 } PendingIO;
723
724 /** The PAE PDPEs used with Nested Paging (only valid when
725 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
726 X86PDPE aPdpes[4];
727
728 /** Current shadow paging mode. */
729 PGMMODE enmShadowMode;
730
731 /** The CPU ID of the CPU currently owning the VMCS. Set in
732 * HMR0Enter and cleared in HMR0Leave. */
733 RTCPUID idEnteredCpu;
734
735 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
736 struct
737 {
738 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
739 uint32_t cPages;
740 uint32_t u32Padding; /**< Explicit alignment padding. */
741 } TlbShootdown;
742
743 /** For saving stack space, the disassembler state is allocated here instead of
744 * on the stack. */
745 DISCPUSTATE DisState;
746
747 STAMPROFILEADV StatEntry;
748 STAMPROFILEADV StatExit1;
749 STAMPROFILEADV StatExit2;
750 STAMPROFILEADV StatExitIO;
751 STAMPROFILEADV StatExitMovCRx;
752 STAMPROFILEADV StatExitXcptNmi;
753 STAMPROFILEADV StatLoadGuestState;
754 STAMPROFILEADV StatInGC;
755
756#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
757 STAMPROFILEADV StatWorldSwitch3264;
758#endif
759 STAMPROFILEADV StatPoke;
760 STAMPROFILEADV StatSpinPoke;
761 STAMPROFILEADV StatSpinPokeFailed;
762
763 STAMCOUNTER StatInjectInterrupt;
764 STAMCOUNTER StatInjectXcpt;
765 STAMCOUNTER StatInjectPendingReflect;
766
767 STAMCOUNTER StatExitAll;
768 STAMCOUNTER StatExitShadowNM;
769 STAMCOUNTER StatExitGuestNM;
770 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
771 STAMCOUNTER StatExitShadowPFEM;
772 STAMCOUNTER StatExitGuestPF;
773 STAMCOUNTER StatExitGuestUD;
774 STAMCOUNTER StatExitGuestSS;
775 STAMCOUNTER StatExitGuestNP;
776 STAMCOUNTER StatExitGuestGP;
777 STAMCOUNTER StatExitGuestDE;
778 STAMCOUNTER StatExitGuestDB;
779 STAMCOUNTER StatExitGuestMF;
780 STAMCOUNTER StatExitGuestBP;
781 STAMCOUNTER StatExitGuestXF;
782 STAMCOUNTER StatExitGuestXcpUnk;
783 STAMCOUNTER StatExitInvlpg;
784 STAMCOUNTER StatExitInvd;
785 STAMCOUNTER StatExitWbinvd;
786 STAMCOUNTER StatExitPause;
787 STAMCOUNTER StatExitCpuid;
788 STAMCOUNTER StatExitRdtsc;
789 STAMCOUNTER StatExitRdtscp;
790 STAMCOUNTER StatExitRdpmc;
791 STAMCOUNTER StatExitRdrand;
792 STAMCOUNTER StatExitCli;
793 STAMCOUNTER StatExitSti;
794 STAMCOUNTER StatExitPushf;
795 STAMCOUNTER StatExitPopf;
796 STAMCOUNTER StatExitIret;
797 STAMCOUNTER StatExitInt;
798 STAMCOUNTER StatExitCRxWrite[16];
799 STAMCOUNTER StatExitCRxRead[16];
800 STAMCOUNTER StatExitDRxWrite;
801 STAMCOUNTER StatExitDRxRead;
802 STAMCOUNTER StatExitRdmsr;
803 STAMCOUNTER StatExitWrmsr;
804 STAMCOUNTER StatExitClts;
805 STAMCOUNTER StatExitXdtrAccess;
806 STAMCOUNTER StatExitHlt;
807 STAMCOUNTER StatExitMwait;
808 STAMCOUNTER StatExitMonitor;
809 STAMCOUNTER StatExitLmsw;
810 STAMCOUNTER StatExitIOWrite;
811 STAMCOUNTER StatExitIORead;
812 STAMCOUNTER StatExitIOStringWrite;
813 STAMCOUNTER StatExitIOStringRead;
814 STAMCOUNTER StatExitIntWindow;
815 STAMCOUNTER StatExitMaxResume;
816 STAMCOUNTER StatExitExtInt;
817 STAMCOUNTER StatExitHostNmiInGC;
818 STAMCOUNTER StatExitPreemptTimer;
819 STAMCOUNTER StatExitTprBelowThreshold;
820 STAMCOUNTER StatExitTaskSwitch;
821 STAMCOUNTER StatExitMtf;
822 STAMCOUNTER StatExitApicAccess;
823 STAMCOUNTER StatPendingHostIrq;
824
825 STAMCOUNTER StatPreemptPreempting;
826 STAMCOUNTER StatPreemptSaveHostState;
827
828 STAMCOUNTER StatFlushPage;
829 STAMCOUNTER StatFlushPageManual;
830 STAMCOUNTER StatFlushPhysPageManual;
831 STAMCOUNTER StatFlushTlb;
832 STAMCOUNTER StatFlushTlbManual;
833 STAMCOUNTER StatFlushTlbWorldSwitch;
834 STAMCOUNTER StatNoFlushTlbWorldSwitch;
835 STAMCOUNTER StatFlushEntire;
836 STAMCOUNTER StatFlushAsid;
837 STAMCOUNTER StatFlushNestedPaging;
838 STAMCOUNTER StatFlushTlbInvlpgVirt;
839 STAMCOUNTER StatFlushTlbInvlpgPhys;
840 STAMCOUNTER StatTlbShootdown;
841 STAMCOUNTER StatTlbShootdownFlush;
842
843 STAMCOUNTER StatSwitchGuestIrq;
844 STAMCOUNTER StatSwitchHmToR3FF;
845 STAMCOUNTER StatSwitchExitToR3;
846 STAMCOUNTER StatSwitchLongJmpToR3;
847
848 STAMCOUNTER StatTscOffset;
849 STAMCOUNTER StatTscIntercept;
850 STAMCOUNTER StatTscInterceptOverFlow;
851
852 STAMCOUNTER StatExitReasonNpf;
853 STAMCOUNTER StatDRxArmed;
854 STAMCOUNTER StatDRxContextSwitch;
855 STAMCOUNTER StatDRxIoCheck;
856
857 STAMCOUNTER StatLoadMinimal;
858 STAMCOUNTER StatLoadFull;
859
860 STAMCOUNTER StatVmxCheckBadRmSelBase;
861 STAMCOUNTER StatVmxCheckBadRmSelLimit;
862 STAMCOUNTER StatVmxCheckRmOk;
863
864 STAMCOUNTER StatVmxCheckBadSel;
865 STAMCOUNTER StatVmxCheckBadRpl;
866 STAMCOUNTER StatVmxCheckBadLdt;
867 STAMCOUNTER StatVmxCheckBadTr;
868 STAMCOUNTER StatVmxCheckPmOk;
869
870#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
871 STAMCOUNTER StatFpu64SwitchBack;
872 STAMCOUNTER StatDebug64SwitchBack;
873#endif
874
875#ifdef VBOX_WITH_STATISTICS
876 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
877 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
878 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
879 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
880#endif
881#ifdef HM_PROFILE_EXIT_DISPATCH
882 STAMPROFILEADV StatExitDispatch;
883#endif
884} HMCPU;
885/** Pointer to HM VM instance data. */
886typedef HMCPU *PHMCPU;
887
888
889#ifdef IN_RING0
890
891VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void);
892VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
893
894
895#ifdef VBOX_STRICT
896VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
897VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
898#else
899# define HMDumpRegs(a, b ,c) do { } while (0)
900# define HMR0DumpDescriptor(a, b, c) do { } while (0)
901#endif
902
903# ifdef VBOX_WITH_KERNEL_USING_XMM
904DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
905DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
906# endif
907
908# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
909/**
910 * Gets 64-bit GDTR and IDTR on darwin.
911 * @param pGdtr Where to store the 64-bit GDTR.
912 * @param pIdtr Where to store the 64-bit IDTR.
913 */
914DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
915
916/**
917 * Gets 64-bit CR3 on darwin.
918 * @returns CR3
919 */
920DECLASM(uint64_t) HMR0Get64bitCR3(void);
921# endif
922
923#endif /* IN_RING0 */
924
925/** @} */
926
927RT_C_DECLS_END
928
929#endif
930
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette