VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 47538

Last change on this file since 47538 was 47123, checked in by vboxsync, 12 years ago

VMM/HM: Dispatch host NMIs on Intel. Added separate STAM counter for host NMIs with the necessary changes to old, new VT-x, AMD-V code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.6 KB
Line 
1/* $Id: HMInternal.h 47123 2013-07-12 15:31:44Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#ifdef VBOX_WITH_OLD_VTX_CODE
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
43# define VMX_USE_CACHED_VMCS_ACCESSES
44#endif
45
46/** @def HM_PROFILE_EXIT_DISPATCH
47 * Enables profiling of the VM exit handler dispatching. */
48#if 0
49# define HM_PROFILE_EXIT_DISPATCH
50#endif
51
52/* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
53 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
54 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
55 * always auto load/store the KERNEL_GS_BASE MSR.
56 *
57 * Note: don't forget to update the assembly files while modifying this!
58 */
59/** @todo This define should always be in effect and the define itself removed
60 after 'sufficient' testing. */
61# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
62
63RT_C_DECLS_BEGIN
64
65
66/** @defgroup grp_hm_int Internal
67 * @ingroup grp_hm
68 * @internal
69 * @{
70 */
71
72
73/** Maximum number of exit reason statistics counters. */
74#define MAX_EXITREASON_STAT 0x100
75#define MASK_EXITREASON_STAT 0xff
76#define MASK_INJECT_IRQ_STAT 0xff
77
78/** @name HM changed flags.
79 * These flags are used to keep track of which important registers that
80 * have been changed since last they were reset.
81 * @{
82 */
83#define HM_CHANGED_GUEST_CR0 RT_BIT(0)
84#define HM_CHANGED_GUEST_CR3 RT_BIT(1)
85#define HM_CHANGED_GUEST_CR4 RT_BIT(2)
86#define HM_CHANGED_GUEST_GDTR RT_BIT(3)
87#define HM_CHANGED_GUEST_IDTR RT_BIT(4)
88#define HM_CHANGED_GUEST_LDTR RT_BIT(5)
89#define HM_CHANGED_GUEST_TR RT_BIT(6)
90#define HM_CHANGED_GUEST_MSR RT_BIT(7) /* Unused in new VT-x, AMD-V code. */
91#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(8)
92#define HM_CHANGED_GUEST_DEBUG RT_BIT(9)
93#define HM_CHANGED_ALL_GUEST_BASE ( HM_CHANGED_GUEST_CR0 \
94 | HM_CHANGED_GUEST_CR3 \
95 | HM_CHANGED_GUEST_CR4 \
96 | HM_CHANGED_GUEST_GDTR \
97 | HM_CHANGED_GUEST_IDTR \
98 | HM_CHANGED_GUEST_LDTR \
99 | HM_CHANGED_GUEST_TR \
100 | HM_CHANGED_GUEST_MSR \
101 | HM_CHANGED_GUEST_SEGMENT_REGS \
102 | HM_CHANGED_GUEST_DEBUG)
103#define HM_CHANGED_ALL_GUEST HM_CHANGED_ALL_GUEST_BASE
104
105/** New VT-x, AMD-V code uses extra flags for more fine-grained state
106 * tracking. */
107#if !defined(VBOX_WITH_OLD_VTX_CODE) || !defined(VBOX_WITH_OLD_AMDV_CODE)
108# define HM_CHANGED_GUEST_RIP RT_BIT(10)
109# define HM_CHANGED_GUEST_RSP RT_BIT(11)
110# define HM_CHANGED_GUEST_RFLAGS RT_BIT(12)
111# define HM_CHANGED_GUEST_CR2 RT_BIT(13)
112# define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
113# define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
114# define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
115/* VT-x specific state. */
116# define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(17)
117# define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(18)
118# define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(19)
119# define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(20)
120# define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(21)
121/* AMD-V specific state. */
122# define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(17)
123# define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(18)
124# define HM_CHANGED_SVM_RESERVED1 RT_BIT(19)
125# define HM_CHANGED_SVM_RESERVED2 RT_BIT(20)
126# define HM_CHANGED_SVM_RESERVED3 RT_BIT(21)
127
128# undef HM_CHANGED_ALL_GUEST
129# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_ALL_GUEST_BASE \
130 | HM_CHANGED_GUEST_RIP \
131 | HM_CHANGED_GUEST_RSP \
132 | HM_CHANGED_GUEST_RFLAGS \
133 | HM_CHANGED_GUEST_CR2 \
134 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
135 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
136 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
137 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
138 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
139 | HM_CHANGED_VMX_GUEST_APIC_STATE \
140 | HM_CHANGED_VMX_ENTRY_CTLS \
141 | HM_CHANGED_VMX_EXIT_CTLS)
142#endif
143
144#define HM_CHANGED_HOST_CONTEXT RT_BIT(22)
145/** @} */
146
147/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
148#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
149
150/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
151#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
152/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
153#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
154/** Total guest mapped memory needed. */
155#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
156
157/** Enable for TPR guest patching. */
158#define VBOX_HM_WITH_GUEST_PATCHING
159
160/** HM SSM version
161 */
162#ifdef VBOX_HM_WITH_GUEST_PATCHING
163# define HM_SSM_VERSION 5
164# define HM_SSM_VERSION_NO_PATCHING 4
165#else
166# define HM_SSM_VERSION 4
167# define HM_SSM_VERSION_NO_PATCHING 4
168#endif
169#define HM_SSM_VERSION_2_0_X 3
170
171/**
172 * Global per-cpu information. (host)
173 */
174typedef struct HMGLOBLCPUINFO
175{
176 /** The CPU ID. */
177 RTCPUID idCpu;
178 /** The memory object */
179 RTR0MEMOBJ hMemObj;
180 /** Current ASID (AMD-V) / VPID (Intel). */
181 uint32_t uCurrentAsid;
182 /** TLB flush count. */
183 uint32_t cTlbFlushes;
184 /** Whether to flush each new ASID/VPID before use. */
185 bool fFlushAsidBeforeUse;
186 /** Configured for VT-x or AMD-V. */
187 bool fConfigured;
188 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
189 bool fIgnoreAMDVInUseError;
190 /** In use by our code. (for power suspend) */
191 volatile bool fInUse;
192} HMGLOBLCPUINFO;
193/** Pointer to the per-cpu global information. */
194typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
195
196typedef enum
197{
198 HMPENDINGIO_INVALID = 0,
199 HMPENDINGIO_PORT_READ,
200 HMPENDINGIO_PORT_WRITE,
201 HMPENDINGIO_STRING_READ,
202 HMPENDINGIO_STRING_WRITE,
203 /** The usual 32-bit paranoia. */
204 HMPENDINGIO_32BIT_HACK = 0x7fffffff
205} HMPENDINGIO;
206
207
208typedef enum
209{
210 HMTPRINSTR_INVALID,
211 HMTPRINSTR_READ,
212 HMTPRINSTR_READ_SHR4,
213 HMTPRINSTR_WRITE_REG,
214 HMTPRINSTR_WRITE_IMM,
215 HMTPRINSTR_JUMP_REPLACEMENT,
216 /** The usual 32-bit paranoia. */
217 HMTPRINSTR_32BIT_HACK = 0x7fffffff
218} HMTPRINSTR;
219
220typedef struct
221{
222 /** The key is the address of patched instruction. (32 bits GC ptr) */
223 AVLOU32NODECORE Core;
224 /** Original opcode. */
225 uint8_t aOpcode[16];
226 /** Instruction size. */
227 uint32_t cbOp;
228 /** Replacement opcode. */
229 uint8_t aNewOpcode[16];
230 /** Replacement instruction size. */
231 uint32_t cbNewOp;
232 /** Instruction type. */
233 HMTPRINSTR enmType;
234 /** Source operand. */
235 uint32_t uSrcOperand;
236 /** Destination operand. */
237 uint32_t uDstOperand;
238 /** Number of times the instruction caused a fault. */
239 uint32_t cFaults;
240 /** Patch address of the jump replacement. */
241 RTGCPTR32 pJumpTarget;
242} HMTPRPATCH;
243/** Pointer to HMTPRPATCH. */
244typedef HMTPRPATCH *PHMTPRPATCH;
245
246/**
247 * Switcher function, HC to the special 64-bit RC.
248 *
249 * @param pVM Pointer to the VM.
250 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
251 * @returns Return code indicating the action to take.
252 */
253typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
254/** Pointer to switcher function. */
255typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
256
257/**
258 * HM VM Instance data.
259 * Changes to this must checked against the padding of the hm union in VM!
260 */
261typedef struct HM
262{
263 /** Set when we've initialized VMX or SVM. */
264 bool fInitialized;
265
266 /** Set if nested paging is enabled. */
267 bool fNestedPaging;
268
269 /** Set if nested paging is allowed. */
270 bool fAllowNestedPaging;
271
272 /** Set if large pages are enabled (requires nested paging). */
273 bool fLargePages;
274
275 /** Set if we can support 64-bit guests or not. */
276 bool fAllow64BitGuests;
277
278 /** Set if an IO-APIC is configured for this VM. */
279 bool fHasIoApic;
280
281 /** Set when TPR patching is allowed. */
282 bool fTRPPatchingAllowed;
283
284 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
285 bool fGlobalInit;
286
287 /** Set when TPR patching is active. */
288 bool fTPRPatchingActive;
289 bool u8Alignment[7];
290
291 /** Maximum ASID allowed. */
292 uint32_t uMaxAsid;
293
294 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
295 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
296 uint32_t cMaxResumeLoops;
297
298 /** Guest allocated memory for patching purposes. */
299 RTGCPTR pGuestPatchMem;
300 /** Current free pointer inside the patch block. */
301 RTGCPTR pFreeGuestPatchMem;
302 /** Size of the guest patch memory block. */
303 uint32_t cbGuestPatchMem;
304 uint32_t uPadding1;
305
306#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
307 /** 32 to 64 bits switcher entrypoint. */
308 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
309 RTR0PTR uPadding2;
310#endif
311
312 struct
313 {
314 /** Set by the ring-0 side of HM to indicate VMX is supported by the
315 * CPU. */
316 bool fSupported;
317
318 /** Set when we've enabled VMX. */
319 bool fEnabled;
320
321 /** Set if VPID is supported. */
322 bool fVpid;
323
324 /** Set if VT-x VPID is allowed. */
325 bool fAllowVpid;
326
327 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
328 bool fUnrestrictedGuest;
329
330 /** Set if unrestricted guest execution is allowed to be used. */
331 bool fAllowUnrestricted;
332
333 /** Whether we're using the preemption timer or not. */
334 bool fUsePreemptTimer;
335 /** The shift mask employed by the VMX-Preemption timer. */
336 uint8_t cPreemptTimerShift;
337
338 /** Virtual address of the TSS page used for real mode emulation. */
339 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
340
341 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
342 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
343
344 /** R0 memory object for the APIC-access page. */
345 RTR0MEMOBJ hMemObjApicAccess;
346 /** Physical address of the APIC-access page. */
347 RTHCPHYS HCPhysApicAccess;
348 /** Virtual address of the APIC-access page. */
349 R0PTRTYPE(uint8_t *) pbApicAccess;
350
351#ifdef VBOX_WITH_CRASHDUMP_MAGIC
352 RTR0MEMOBJ hMemObjScratch;
353 RTHCPHYS HCPhysScratch;
354 R0PTRTYPE(uint8_t *) pbScratch;
355#endif
356
357#ifndef VBOX_WITH_OLD_VTX_CODE
358 unsigned uFlushTaggedTlb;
359#else
360 /** Ring 0 handlers for VT-x. */
361 DECLR0CALLBACKMEMBER(void, pfnFlushTaggedTlb, (PVM pVM, PVMCPU pVCpu));
362#endif
363
364#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
365 uint32_t u32Alignment;
366#endif
367 /** Host CR4 value (set by ring-0 VMX init) */
368 uint64_t hostCR4;
369
370 /** Host EFER value (set by ring-0 VMX init) */
371 uint64_t hostEFER;
372
373 /** VMX MSR values */
374 struct
375 {
376 uint64_t feature_ctrl;
377 uint64_t vmx_basic_info;
378 VMX_CAPABILITY vmx_pin_ctls;
379 VMX_CAPABILITY vmx_proc_ctls;
380 VMX_CAPABILITY vmx_proc_ctls2;
381 VMX_CAPABILITY vmx_exit;
382 VMX_CAPABILITY vmx_entry;
383 uint64_t vmx_misc;
384 uint64_t vmx_cr0_fixed0;
385 uint64_t vmx_cr0_fixed1;
386 uint64_t vmx_cr4_fixed0;
387 uint64_t vmx_cr4_fixed1;
388 uint64_t vmx_vmcs_enum;
389 uint64_t vmx_vmfunc;
390 uint64_t vmx_ept_vpid_caps;
391 } msr;
392
393 /** Flush types for invept & invvpid; they depend on capabilities. */
394 VMX_FLUSH_EPT enmFlushEpt;
395 VMX_FLUSH_VPID enmFlushVpid;
396 } vmx;
397
398 struct
399 {
400 /** Set by the ring-0 side of HM to indicate SVM is supported by the
401 * CPU. */
402 bool fSupported;
403 /** Set when we've enabled SVM. */
404 bool fEnabled;
405 /** Set if erratum 170 affects the AMD cpu. */
406 bool fAlwaysFlushTLB;
407 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
408 bool fIgnoreInUseError;
409
410 /** R0 memory object for the IO bitmap (12kb). */
411 RTR0MEMOBJ hMemObjIOBitmap;
412 /** Physical address of the IO bitmap (12kb). */
413 RTHCPHYS HCPhysIOBitmap;
414 /** Virtual address of the IO bitmap. */
415 R0PTRTYPE(void *) pvIOBitmap;
416
417 /* HWCR MSR (for diagnostics) */
418 uint64_t msrHwcr;
419
420 /** SVM revision. */
421 uint32_t u32Rev;
422
423 /** SVM feature bits from cpuid 0x8000000a */
424 uint32_t u32Features;
425 } svm;
426
427 /**
428 * AVL tree with all patches (active or disabled) sorted by guest instruction address
429 */
430 AVLOU32TREE PatchTree;
431 uint32_t cPatches;
432 HMTPRPATCH aPatches[64];
433
434 struct
435 {
436 uint32_t u32AMDFeatureECX;
437 uint32_t u32AMDFeatureEDX;
438 } cpuid;
439
440 /** Saved error from detection */
441 int32_t lLastError;
442
443 /** HMR0Init was run */
444 bool fHMR0Init;
445 bool u8Alignment1[7];
446
447 STAMCOUNTER StatTprPatchSuccess;
448 STAMCOUNTER StatTprPatchFailure;
449 STAMCOUNTER StatTprReplaceSuccess;
450 STAMCOUNTER StatTprReplaceFailure;
451} HM;
452/** Pointer to HM VM instance data. */
453typedef HM *PHM;
454
455/* Maximum number of cached entries. */
456#define VMCSCACHE_MAX_ENTRY 128
457
458/* Structure for storing read and write VMCS actions. */
459typedef struct VMCSCACHE
460{
461#ifdef VBOX_WITH_CRASHDUMP_MAGIC
462 /* Magic marker for searching in crash dumps. */
463 uint8_t aMagic[16];
464 uint64_t uMagic;
465 uint64_t u64TimeEntry;
466 uint64_t u64TimeSwitch;
467 uint64_t cResume;
468 uint64_t interPD;
469 uint64_t pSwitcher;
470 uint32_t uPos;
471 uint32_t idCpu;
472#endif
473 /* CR2 is saved here for EPT syncing. */
474 uint64_t cr2;
475 struct
476 {
477 uint32_t cValidEntries;
478 uint32_t uAlignment;
479 uint32_t aField[VMCSCACHE_MAX_ENTRY];
480 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
481 } Write;
482 struct
483 {
484 uint32_t cValidEntries;
485 uint32_t uAlignment;
486 uint32_t aField[VMCSCACHE_MAX_ENTRY];
487 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
488 } Read;
489#ifdef VBOX_STRICT
490 struct
491 {
492 RTHCPHYS HCPhysCpuPage;
493 RTHCPHYS HCPhysVmcs;
494 RTGCPTR pCache;
495 RTGCPTR pCtx;
496 } TestIn;
497 struct
498 {
499 RTHCPHYS HCPhysVmcs;
500 RTGCPTR pCache;
501 RTGCPTR pCtx;
502 uint64_t eflags;
503 uint64_t cr8;
504 } TestOut;
505 struct
506 {
507 uint64_t param1;
508 uint64_t param2;
509 uint64_t param3;
510 uint64_t param4;
511 } ScratchPad;
512#endif
513} VMCSCACHE;
514/** Pointer to VMCSCACHE. */
515typedef VMCSCACHE *PVMCSCACHE;
516
517/** VMX StartVM function. */
518typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
519/** Pointer to a VMX StartVM function. */
520typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
521
522/** SVM VMRun function. */
523typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
524/** Pointer to a SVM VMRun function. */
525typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
526
527/**
528 * HM VMCPU Instance data.
529 */
530typedef struct HMCPU
531{
532 /** Set if we don't have to flush the TLB on VM entry. */
533 bool fResumeVM;
534 /** Set if we need to flush the TLB during the world switch. */
535 bool fForceTLBFlush;
536 /** Set when we're using VT-x or AMD-V at that moment. */
537 bool fActive;
538 /** Set when the TLB has been checked until we return from the world switch. */
539 volatile bool fCheckedTLBFlush;
540 uint8_t u8Alignment[4];
541
542 /** World switch exit counter. */
543 volatile uint32_t cWorldSwitchExits;
544 /** HM_CHANGED_* flags. */
545 uint32_t fContextUseFlags;
546 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first
547 * time). */
548 RTCPUID idLastCpu;
549 /** TLB flush count. */
550 uint32_t cTlbFlushes;
551 /** Current ASID in use by the VM. */
552 uint32_t uCurrentAsid;
553 /** An additional error code used for some gurus. */
554 uint32_t u32HMError;
555
556 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
557 uint64_t u64HostTscAux;
558
559 struct
560 {
561 /** Physical address of the VM control structure (VMCS). */
562 RTHCPHYS HCPhysVmcs;
563 /** R0 memory object for the VM control structure (VMCS). */
564 RTR0MEMOBJ hMemObjVmcs;
565 /** Virtual address of the VM control structure (VMCS). */
566 R0PTRTYPE(void *) pvVmcs;
567 /** Ring 0 handlers for VT-x. */
568 PFNHMVMXSTARTVM pfnStartVM;
569#if HC_ARCH_BITS == 32
570 uint32_t u32Alignment1;
571#endif
572
573 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
574 uint32_t u32PinCtls;
575 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
576 uint32_t u32ProcCtls;
577 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
578 uint32_t u32ProcCtls2;
579 /** Current VMX_VMCS32_CTRL_EXIT. */
580 uint32_t u32ExitCtls;
581 /** Current VMX_VMCS32_CTRL_ENTRY. */
582 uint32_t u32EntryCtls;
583 /** Physical address of the virtual APIC page for TPR caching. */
584 RTHCPHYS HCPhysVirtApic;
585 /** R0 memory object for the virtual APIC page for TPR caching. */
586 RTR0MEMOBJ hMemObjVirtApic;
587 /** Virtual address of the virtual APIC page for TPR caching. */
588 R0PTRTYPE(uint8_t *) pbVirtApic;
589#if HC_ARCH_BITS == 32
590 uint32_t u32Alignment2;
591#endif
592
593 /** Current CR0 mask. */
594 uint32_t u32CR0Mask;
595 /** Current CR4 mask. */
596 uint32_t u32CR4Mask;
597 /** Current exception bitmap. */
598 uint32_t u32XcptBitmap;
599 /** The updated-guest-state mask. */
600 uint32_t fUpdatedGuestState;
601 /** Current EPTP. */
602 RTHCPHYS HCPhysEPTP;
603
604 /** Physical address of the MSR bitmap. */
605 RTHCPHYS HCPhysMsrBitmap;
606 /** R0 memory object for the MSR bitmap. */
607 RTR0MEMOBJ hMemObjMsrBitmap;
608 /** Virtual address of the MSR bitmap. */
609 R0PTRTYPE(void *) pvMsrBitmap;
610
611#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
612 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
613 * for guest MSRs). */
614 RTHCPHYS HCPhysGuestMsr;
615 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
616 * (used for guest MSRs). */
617 RTR0MEMOBJ hMemObjGuestMsr;
618 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
619 * for guest MSRs). */
620 R0PTRTYPE(void *) pvGuestMsr;
621
622 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
623 RTHCPHYS HCPhysHostMsr;
624 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
625 RTR0MEMOBJ hMemObjHostMsr;
626 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
627 R0PTRTYPE(void *) pvHostMsr;
628
629 /** Number of automatically loaded/restored guest MSRs during the world switch. */
630 uint32_t cGuestMsrs;
631 uint32_t uAlignment;
632#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
633
634 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
635 uint64_t u64MsrApicBase;
636 /** Last use TSC offset value. (cached) */
637 uint64_t u64TSCOffset;
638 /** VMCS cache. */
639 VMCSCACHE VMCSCache;
640
641 /** Real-mode emulation state. */
642 struct
643 {
644 X86DESCATTR uAttrCS;
645 X86DESCATTR uAttrDS;
646 X86DESCATTR uAttrES;
647 X86DESCATTR uAttrFS;
648 X86DESCATTR uAttrGS;
649 X86DESCATTR uAttrSS;
650 X86EFLAGS eflags;
651 uint32_t fRealOnV86Active;
652 } RealMode;
653
654 struct
655 {
656 uint64_t u64VMCSPhys;
657 uint32_t u32VMCSRevision;
658 uint32_t u32InstrError;
659 uint32_t u32ExitReason;
660 RTCPUID idEnteredCpu;
661 RTCPUID idCurrentCpu;
662 uint32_t padding;
663 } LastError;
664
665#ifdef VBOX_WITH_OLD_VTX_CODE
666 /** The last seen guest paging mode (by VT-x). */
667 PGMMODE enmLastSeenGuestMode;
668 /** Current guest paging mode (as seen by HMR3PagingModeChanged). */
669 PGMMODE enmCurrGuestMode;
670 /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
671 PGMMODE enmPrevGuestMode;
672#else
673 /** Which host-state bits to restore before being preempted. */
674 uint32_t fRestoreHostFlags;
675 /** The host-state restoration structure. */
676 VMXRESTOREHOST RestoreHost;
677 /** Set if guest was executing in real mode (extra checks). */
678 bool fWasInRealMode;
679#endif
680 } vmx;
681
682 struct
683 {
684 /** R0 memory object for the host VMCB which holds additional host-state. */
685 RTR0MEMOBJ hMemObjVmcbHost;
686 /** Physical address of the host VMCB which holds additional host-state. */
687 RTHCPHYS HCPhysVmcbHost;
688 /** Virtual address of the host VMCB which holds additional host-state. */
689 R0PTRTYPE(void *) pvVmcbHost;
690
691 /** R0 memory object for the guest VMCB. */
692 RTR0MEMOBJ hMemObjVmcb;
693 /** Physical address of the guest VMCB. */
694 RTHCPHYS HCPhysVmcb;
695 /** Virtual address of the guest VMCB. */
696 R0PTRTYPE(void *) pvVmcb;
697
698 /** Ring 0 handlers for VT-x. */
699 PFNHMSVMVMRUN pfnVMRun;
700
701 /** R0 memory object for the MSR bitmap (8 KB). */
702 RTR0MEMOBJ hMemObjMsrBitmap;
703 /** Physical address of the MSR bitmap (8 KB). */
704 RTHCPHYS HCPhysMsrBitmap;
705 /** Virtual address of the MSR bitmap. */
706 R0PTRTYPE(void *) pvMsrBitmap;
707
708 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
709 * we should check if the VTPR changed on every VM-exit. */
710 bool fSyncVTpr;
711 uint8_t u8Align[7];
712
713 /** Alignment padding. */
714 uint32_t u32Padding;
715 } svm;
716
717 /** Event injection state. */
718 struct
719 {
720 uint32_t fPending;
721 uint32_t u32ErrCode;
722 uint32_t cbInstr;
723 uint32_t u32Padding; /**< Explicit alignment padding. */
724 uint64_t u64IntrInfo;
725 RTGCUINTPTR GCPtrFaultAddress;
726 } Event;
727
728 /** IO Block emulation state. */
729 struct
730 {
731 bool fEnabled;
732 uint8_t u8Align[7];
733
734 /** RIP at the start of the io code we wish to emulate in the recompiler. */
735 RTGCPTR GCPtrFunctionEip;
736
737 uint64_t cr0;
738 } EmulateIoBlock;
739
740 struct
741 {
742 /** Pending IO operation type. */
743 HMPENDINGIO enmType;
744 uint32_t uPadding;
745 RTGCPTR GCPtrRip;
746 RTGCPTR GCPtrRipNext;
747 union
748 {
749 struct
750 {
751 uint32_t uPort;
752 uint32_t uAndVal;
753 uint32_t cbSize;
754 } Port;
755 uint64_t aRaw[2];
756 } s;
757 } PendingIO;
758
759 /** The PAE PDPEs used with Nested Paging (only valid when
760 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
761 X86PDPE aPdpes[4];
762
763 /** Current shadow paging mode. */
764 PGMMODE enmShadowMode;
765
766 /** The CPU ID of the CPU currently owning the VMCS. Set in
767 * HMR0Enter and cleared in HMR0Leave. */
768 RTCPUID idEnteredCpu;
769
770 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
771 struct
772 {
773 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
774 uint32_t cPages;
775 uint32_t u32Padding; /**< Explicit alignment padding. */
776 } TlbShootdown;
777
778 /** For saving stack space, the disassembler state is allocated here instead of
779 * on the stack. */
780 DISCPUSTATE DisState;
781
782 STAMPROFILEADV StatEntry;
783 STAMPROFILEADV StatExit1;
784 STAMPROFILEADV StatExit2;
785 STAMPROFILEADV StatExitIO;
786 STAMPROFILEADV StatExitMovCRx;
787 STAMPROFILEADV StatExitXcptNmi;
788 STAMPROFILEADV StatLoadGuestState;
789 STAMPROFILEADV StatInGC;
790
791#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
792 STAMPROFILEADV StatWorldSwitch3264;
793#endif
794 STAMPROFILEADV StatPoke;
795 STAMPROFILEADV StatSpinPoke;
796 STAMPROFILEADV StatSpinPokeFailed;
797
798 STAMCOUNTER StatInjectInterrupt;
799 STAMCOUNTER StatInjectXcpt;
800 STAMCOUNTER StatInjectPendingReflect;
801
802 STAMCOUNTER StatExitShadowNM;
803 STAMCOUNTER StatExitGuestNM;
804 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
805 STAMCOUNTER StatExitShadowPFEM;
806 STAMCOUNTER StatExitGuestPF;
807 STAMCOUNTER StatExitGuestUD;
808 STAMCOUNTER StatExitGuestSS;
809 STAMCOUNTER StatExitGuestNP;
810 STAMCOUNTER StatExitGuestGP;
811 STAMCOUNTER StatExitGuestDE;
812 STAMCOUNTER StatExitGuestDB;
813 STAMCOUNTER StatExitGuestMF;
814 STAMCOUNTER StatExitGuestBP;
815 STAMCOUNTER StatExitGuestXF;
816 STAMCOUNTER StatExitGuestXcpUnk;
817 STAMCOUNTER StatExitInvlpg;
818 STAMCOUNTER StatExitInvd;
819 STAMCOUNTER StatExitWbinvd;
820 STAMCOUNTER StatExitPause;
821 STAMCOUNTER StatExitCpuid;
822 STAMCOUNTER StatExitRdtsc;
823 STAMCOUNTER StatExitRdtscp;
824 STAMCOUNTER StatExitRdpmc;
825 STAMCOUNTER StatExitRdrand;
826 STAMCOUNTER StatExitCli;
827 STAMCOUNTER StatExitSti;
828 STAMCOUNTER StatExitPushf;
829 STAMCOUNTER StatExitPopf;
830 STAMCOUNTER StatExitIret;
831 STAMCOUNTER StatExitInt;
832 STAMCOUNTER StatExitCRxWrite[16];
833 STAMCOUNTER StatExitCRxRead[16];
834 STAMCOUNTER StatExitDRxWrite;
835 STAMCOUNTER StatExitDRxRead;
836 STAMCOUNTER StatExitRdmsr;
837 STAMCOUNTER StatExitWrmsr;
838 STAMCOUNTER StatExitClts;
839 STAMCOUNTER StatExitXdtrAccess;
840 STAMCOUNTER StatExitHlt;
841 STAMCOUNTER StatExitMwait;
842 STAMCOUNTER StatExitMonitor;
843 STAMCOUNTER StatExitLmsw;
844 STAMCOUNTER StatExitIOWrite;
845 STAMCOUNTER StatExitIORead;
846 STAMCOUNTER StatExitIOStringWrite;
847 STAMCOUNTER StatExitIOStringRead;
848 STAMCOUNTER StatExitIntWindow;
849 STAMCOUNTER StatExitMaxResume;
850 STAMCOUNTER StatExitExtInt;
851 STAMCOUNTER StatExitHostNmi;
852 STAMCOUNTER StatExitPreemptTimer;
853 STAMCOUNTER StatExitTprBelowThreshold;
854 STAMCOUNTER StatExitTaskSwitch;
855 STAMCOUNTER StatExitMtf;
856 STAMCOUNTER StatExitApicAccess;
857 STAMCOUNTER StatPendingHostIrq;
858
859 STAMCOUNTER StatFlushPage;
860 STAMCOUNTER StatFlushPageManual;
861 STAMCOUNTER StatFlushPhysPageManual;
862 STAMCOUNTER StatFlushTlb;
863 STAMCOUNTER StatFlushTlbManual;
864 STAMCOUNTER StatFlushTlbWorldSwitch;
865 STAMCOUNTER StatNoFlushTlbWorldSwitch;
866 STAMCOUNTER StatFlushEntire;
867 STAMCOUNTER StatFlushAsid;
868 STAMCOUNTER StatFlushNestedPaging;
869 STAMCOUNTER StatFlushTlbInvlpgVirt;
870 STAMCOUNTER StatFlushTlbInvlpgPhys;
871 STAMCOUNTER StatTlbShootdown;
872 STAMCOUNTER StatTlbShootdownFlush;
873
874 STAMCOUNTER StatSwitchGuestIrq;
875 STAMCOUNTER StatSwitchHmToR3FF;
876 STAMCOUNTER StatSwitchExitToR3;
877 STAMCOUNTER StatSwitchLongJmpToR3;
878
879 STAMCOUNTER StatTscOffset;
880 STAMCOUNTER StatTscIntercept;
881 STAMCOUNTER StatTscInterceptOverFlow;
882
883 STAMCOUNTER StatExitReasonNpf;
884 STAMCOUNTER StatDRxArmed;
885 STAMCOUNTER StatDRxContextSwitch;
886 STAMCOUNTER StatDRxIoCheck;
887
888 STAMCOUNTER StatLoadMinimal;
889 STAMCOUNTER StatLoadFull;
890
891 STAMCOUNTER StatVmxCheckBadRmSelBase;
892 STAMCOUNTER StatVmxCheckBadRmSelLimit;
893 STAMCOUNTER StatVmxCheckRmOk;
894
895 STAMCOUNTER StatVmxCheckBadSel;
896 STAMCOUNTER StatVmxCheckBadRpl;
897 STAMCOUNTER StatVmxCheckBadLdt;
898 STAMCOUNTER StatVmxCheckBadTr;
899 STAMCOUNTER StatVmxCheckPmOk;
900
901#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
902 STAMCOUNTER StatFpu64SwitchBack;
903 STAMCOUNTER StatDebug64SwitchBack;
904#endif
905
906#ifdef VBOX_WITH_STATISTICS
907 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
908 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
909 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
910 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
911#endif
912#ifdef HM_PROFILE_EXIT_DISPATCH
913 STAMPROFILEADV StatExitDispatch;
914#endif
915} HMCPU;
916/** Pointer to HM VM instance data. */
917typedef HMCPU *PHMCPU;
918
919
920#ifdef IN_RING0
921
922VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void);
923VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
924
925
926#ifdef VBOX_STRICT
927VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
928VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
929#else
930# define HMDumpRegs(a, b ,c) do { } while (0)
931# define HMR0DumpDescriptor(a, b, c) do { } while (0)
932#endif
933
934# ifdef VBOX_WITH_KERNEL_USING_XMM
935DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
936DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
937# endif
938
939# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
940/**
941 * Gets 64-bit GDTR and IDTR on darwin.
942 * @param pGdtr Where to store the 64-bit GDTR.
943 * @param pIdtr Where to store the 64-bit IDTR.
944 */
945DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
946
947/**
948 * Gets 64-bit CR3 on darwin.
949 * @returns CR3
950 */
951DECLASM(uint64_t) HMR0Get64bitCR3(void);
952# endif
953
954#endif /* IN_RING0 */
955
956/** @} */
957
958RT_C_DECLS_END
959
960#endif
961
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette