VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 43803

Last change on this file since 43803 was 43803, checked in by vboxsync, 12 years ago

VMM/VMMR0: bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.4 KB
Line 
1/* $Id: HMInternal.h 43803 2012-11-05 13:50:57Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#define VMX_USE_CACHED_VMCS_ACCESSES
41#define HM_VMX_EMULATE_REALMODE
42
43/* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
44 * handle this MSR manually. See @bugref{6208}. This is clearly visible while
45 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
46 *
47 * Note: don't forget to update the assembly files while modifying this!
48 */
49# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
50
51RT_C_DECLS_BEGIN
52
53
54/** @defgroup grp_hm_int Internal
55 * @ingroup grp_hm
56 * @internal
57 * @{
58 */
59
60
61/** Maximum number of exit reason statistics counters. */
62#define MAX_EXITREASON_STAT 0x100
63#define MASK_EXITREASON_STAT 0xff
64#define MASK_INJECT_IRQ_STAT 0xff
65
66/** @name Changed flags
67 * These flags are used to keep track of which important registers that
68 * have been changed since last they were reset.
69 * @{
70 */
71#define HM_CHANGED_GUEST_FPU RT_BIT(0)
72#define HM_CHANGED_GUEST_CR0 RT_BIT(1)
73#define HM_CHANGED_GUEST_CR3 RT_BIT(2)
74#define HM_CHANGED_GUEST_CR4 RT_BIT(3)
75#define HM_CHANGED_GUEST_GDTR RT_BIT(4)
76#define HM_CHANGED_GUEST_IDTR RT_BIT(5)
77#define HM_CHANGED_GUEST_LDTR RT_BIT(6)
78#define HM_CHANGED_GUEST_TR RT_BIT(7)
79#define HM_CHANGED_GUEST_MSR RT_BIT(8)
80#define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
81#define HM_CHANGED_GUEST_DEBUG RT_BIT(10)
82#define HM_CHANGED_HOST_CONTEXT RT_BIT(11)
83
84#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \
85 | HM_CHANGED_GUEST_CR0 \
86 | HM_CHANGED_GUEST_CR3 \
87 | HM_CHANGED_GUEST_CR4 \
88 | HM_CHANGED_GUEST_GDTR \
89 | HM_CHANGED_GUEST_IDTR \
90 | HM_CHANGED_GUEST_LDTR \
91 | HM_CHANGED_GUEST_TR \
92 | HM_CHANGED_GUEST_MSR \
93 | HM_CHANGED_GUEST_DEBUG \
94 | HM_CHANGED_GUEST_FPU)
95
96#define HM_CHANGED_ALL (HM_CHANGED_ALL_GUEST | HM_CHANGED_HOST_CONTEXT)
97/** @} */
98
99/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
100#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
101
102/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
103#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
104/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
105#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
106/** Total guest mapped memory needed. */
107#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
108
109/** Enable for TPR guest patching. */
110#define VBOX_HM_WITH_GUEST_PATCHING
111
112/** HM SSM version
113 */
114#ifdef VBOX_HM_WITH_GUEST_PATCHING
115# define HM_SSM_VERSION 5
116# define HM_SSM_VERSION_NO_PATCHING 4
117#else
118# define HM_SSM_VERSION 4
119# define HM_SSM_VERSION_NO_PATCHING 4
120#endif
121#define HM_SSM_VERSION_2_0_X 3
122
123/**
124 * Global per-cpu information. (host)
125 */
126typedef struct HMGLOBLCPUINFO
127{
128 /** The CPU ID. */
129 RTCPUID idCpu;
130 /** The memory object */
131 RTR0MEMOBJ hMemObj;
132 /** Current ASID (AMD-V) / VPID (Intel). */
133 uint32_t uCurrentAsid;
134 /** TLB flush count. */
135 uint32_t cTlbFlushes;
136 /** Whether to flush each new ASID/VPID before use. */
137 bool fFlushAsidBeforeUse;
138 /** Configured for VT-x or AMD-V. */
139 bool fConfigured;
140 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
141 bool fIgnoreAMDVInUseError;
142 /** In use by our code. (for power suspend) */
143 volatile bool fInUse;
144} HMGLOBLCPUINFO;
145/** Pointer to the per-cpu global information. */
146typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
147
148typedef enum
149{
150 HMPENDINGIO_INVALID = 0,
151 HMPENDINGIO_PORT_READ,
152 HMPENDINGIO_PORT_WRITE,
153 HMPENDINGIO_STRING_READ,
154 HMPENDINGIO_STRING_WRITE,
155 /** The usual 32-bit paranoia. */
156 HMPENDINGIO_32BIT_HACK = 0x7fffffff
157} HMPENDINGIO;
158
159
160typedef enum
161{
162 HMTPRINSTR_INVALID,
163 HMTPRINSTR_READ,
164 HMTPRINSTR_READ_SHR4,
165 HMTPRINSTR_WRITE_REG,
166 HMTPRINSTR_WRITE_IMM,
167 HMTPRINSTR_JUMP_REPLACEMENT,
168 /** The usual 32-bit paranoia. */
169 HMTPRINSTR_32BIT_HACK = 0x7fffffff
170} HMTPRINSTR;
171
172typedef struct
173{
174 /** The key is the address of patched instruction. (32 bits GC ptr) */
175 AVLOU32NODECORE Core;
176 /** Original opcode. */
177 uint8_t aOpcode[16];
178 /** Instruction size. */
179 uint32_t cbOp;
180 /** Replacement opcode. */
181 uint8_t aNewOpcode[16];
182 /** Replacement instruction size. */
183 uint32_t cbNewOp;
184 /** Instruction type. */
185 HMTPRINSTR enmType;
186 /** Source operand. */
187 uint32_t uSrcOperand;
188 /** Destination operand. */
189 uint32_t uDstOperand;
190 /** Number of times the instruction caused a fault. */
191 uint32_t cFaults;
192 /** Patch address of the jump replacement. */
193 RTGCPTR32 pJumpTarget;
194} HMTPRPATCH;
195/** Pointer to HMTPRPATCH. */
196typedef HMTPRPATCH *PHMTPRPATCH;
197
198/**
199 * Switcher function, HC to RC.
200 *
201 * @param pVM Pointer to the VM.
202 * @param uOffsetVMCPU VMCPU offset from pVM
203 * @returns Return code indicating the action to take.
204 */
205typedef DECLCALLBACK (int) FNHMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
206/** Pointer to switcher function. */
207typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
208
209/**
210 * HM VM Instance data.
211 * Changes to this must checked against the padding of the hm union in VM!
212 */
213typedef struct HM
214{
215 /** Set when we've initialized VMX or SVM. */
216 bool fInitialized;
217
218 /** Set when hardware acceleration is allowed. */
219 bool fAllowed;
220
221 /** Set if nested paging is enabled. */
222 bool fNestedPaging;
223
224 /** Set if nested paging is allowed. */
225 bool fAllowNestedPaging;
226
227 /** Set if large pages are enabled (requires nested paging). */
228 bool fLargePages;
229
230 /** Set if we can support 64-bit guests or not. */
231 bool fAllow64BitGuests;
232
233 /** Set if an IO-APIC is configured for this VM. */
234 bool fHasIoApic;
235
236 /** Set when TPR patching is allowed. */
237 bool fTRPPatchingAllowed;
238
239 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
240 bool fGlobalInit;
241
242 /** Set when TPR patching is active. */
243 bool fTPRPatchingActive;
244 bool u8Alignment[6];
245
246 /** And mask for copying register contents. */
247 uint64_t u64RegisterMask;
248
249 /** Maximum ASID allowed. */
250 uint32_t uMaxAsid;
251
252 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
253 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
254 uint32_t cMaxResumeLoops;
255
256 /** Guest allocated memory for patching purposes. */
257 RTGCPTR pGuestPatchMem;
258 /** Current free pointer inside the patch block. */
259 RTGCPTR pFreeGuestPatchMem;
260 /** Size of the guest patch memory block. */
261 uint32_t cbGuestPatchMem;
262 uint32_t uPadding1;
263
264#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
265 /** 32 to 64 bits switcher entrypoint. */
266 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
267
268 /* AMD-V 64 bits vmrun handler */
269 RTRCPTR pfnSVMGCVMRun64;
270
271 /* VT-x 64 bits vmlaunch handler */
272 RTRCPTR pfnVMXGCStartVM64;
273
274 /* RC handler to setup the 64 bits FPU state. */
275 RTRCPTR pfnSaveGuestFPU64;
276
277 /* RC handler to setup the 64 bits debug state. */
278 RTRCPTR pfnSaveGuestDebug64;
279
280 /* Test handler */
281 RTRCPTR pfnTest64;
282
283 RTRCPTR uAlignment[2];
284/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
285 uint32_t u32Alignment[1]; */
286#endif
287
288 struct
289 {
290 /** Set by the ring-0 side of HM to indicate VMX is supported by the
291 * CPU. */
292 bool fSupported;
293
294 /** Set when we've enabled VMX. */
295 bool fEnabled;
296
297 /** Set if VPID is supported. */
298 bool fVpid;
299
300 /** Set if VT-x VPID is allowed. */
301 bool fAllowVpid;
302
303 /** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
304 bool fUnrestrictedGuest;
305
306 /** Whether we're using the preemption timer or not. */
307 bool fUsePreemptTimer;
308 /** The shift mask employed by the VMX-Preemption timer. */
309 uint8_t cPreemptTimerShift;
310
311 bool uAlignment[1];
312
313 /** Virtual address of the TSS page used for real mode emulation. */
314 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
315
316 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
317 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
318
319 /** R0 memory object for the APIC-access page. */
320 RTR0MEMOBJ hMemObjApicAccess;
321 /** Physical address of the APIC-access page. */
322 RTHCPHYS HCPhysApicAccess;
323 /** Virtual address of the APIC-access page. */
324 R0PTRTYPE(uint8_t *) pbApicAccess;
325
326#ifdef VBOX_WITH_CRASHDUMP_MAGIC
327 RTR0MEMOBJ hMemObjScratch;
328 RTHCPHYS HCPhysScratch;
329 R0PTRTYPE(uint8_t *) pbScratch;
330#endif
331 /** Ring 0 handlers for VT-x. */
332 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTlb, (PVM pVM, PVMCPU pVCpu));
333
334#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
335 uint32_t u32Alignment;
336#endif
337 /** Host CR4 value (set by ring-0 VMX init) */
338 uint64_t hostCR4;
339
340 /** Host EFER value (set by ring-0 VMX init) */
341 uint64_t hostEFER;
342
343 /** VMX MSR values */
344 struct
345 {
346 uint64_t feature_ctrl;
347 uint64_t vmx_basic_info;
348 VMX_CAPABILITY vmx_pin_ctls;
349 VMX_CAPABILITY vmx_proc_ctls;
350 VMX_CAPABILITY vmx_proc_ctls2;
351 VMX_CAPABILITY vmx_exit;
352 VMX_CAPABILITY vmx_entry;
353 uint64_t vmx_misc;
354 uint64_t vmx_cr0_fixed0;
355 uint64_t vmx_cr0_fixed1;
356 uint64_t vmx_cr4_fixed0;
357 uint64_t vmx_cr4_fixed1;
358 uint64_t vmx_vmcs_enum;
359 uint64_t vmx_ept_vpid_caps;
360 } msr;
361
362 /** Flush types for invept & invvpid; they depend on capabilities. */
363 VMX_FLUSH_EPT enmFlushEpt;
364 VMX_FLUSH_VPID enmFlushVpid;
365 } vmx;
366
367 struct
368 {
369 /** Set by the ring-0 side of HM to indicate SVM is supported by the
370 * CPU. */
371 bool fSupported;
372 /** Set when we've enabled SVM. */
373 bool fEnabled;
374 /** Set if erratum 170 affects the AMD cpu. */
375 bool fAlwaysFlushTLB;
376 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
377 bool fIgnoreInUseError;
378
379 /** R0 memory object for the IO bitmap (12kb). */
380 RTR0MEMOBJ hMemObjIOBitmap;
381 /** Physical address of the IO bitmap (12kb). */
382 RTHCPHYS HCPhysIOBitmap;
383 /** Virtual address of the IO bitmap. */
384 R0PTRTYPE(void *) pvIOBitmap;
385
386 /* HWCR MSR (for diagnostics) */
387 uint64_t msrHwcr;
388
389 /** SVM revision. */
390 uint32_t u32Rev;
391
392 /** SVM feature bits from cpuid 0x8000000a */
393 uint32_t u32Features;
394 } svm;
395
396 /**
397 * AVL tree with all patches (active or disabled) sorted by guest instruction address
398 */
399 AVLOU32TREE PatchTree;
400 uint32_t cPatches;
401 HMTPRPATCH aPatches[64];
402
403 struct
404 {
405 uint32_t u32AMDFeatureECX;
406 uint32_t u32AMDFeatureEDX;
407 } cpuid;
408
409 /** Saved error from detection */
410 int32_t lLastError;
411
412 /** HMR0Init was run */
413 bool fHMR0Init;
414 bool u8Alignment1[7];
415
416 STAMCOUNTER StatTprPatchSuccess;
417 STAMCOUNTER StatTprPatchFailure;
418 STAMCOUNTER StatTprReplaceSuccess;
419 STAMCOUNTER StatTprReplaceFailure;
420} HM;
421/** Pointer to HM VM instance data. */
422typedef HM *PHM;
423
424/* Maximum number of cached entries. */
425#define VMCSCACHE_MAX_ENTRY 128
426
427/* Structure for storing read and write VMCS actions. */
428typedef struct VMCSCACHE
429{
430#ifdef VBOX_WITH_CRASHDUMP_MAGIC
431 /* Magic marker for searching in crash dumps. */
432 uint8_t aMagic[16];
433 uint64_t uMagic;
434 uint64_t u64TimeEntry;
435 uint64_t u64TimeSwitch;
436 uint64_t cResume;
437 uint64_t interPD;
438 uint64_t pSwitcher;
439 uint32_t uPos;
440 uint32_t idCpu;
441#endif
442 /* CR2 is saved here for EPT syncing. */
443 uint64_t cr2;
444 struct
445 {
446 uint32_t cValidEntries;
447 uint32_t uAlignment;
448 uint32_t aField[VMCSCACHE_MAX_ENTRY];
449 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
450 } Write;
451 struct
452 {
453 uint32_t cValidEntries;
454 uint32_t uAlignment;
455 uint32_t aField[VMCSCACHE_MAX_ENTRY];
456 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
457 } Read;
458#ifdef DEBUG
459 struct
460 {
461 RTHCPHYS HCPhysCpuPage;
462 RTHCPHYS HCPhysVMCS;
463 RTGCPTR pCache;
464 RTGCPTR pCtx;
465 } TestIn;
466 struct
467 {
468 RTHCPHYS HCPhysVMCS;
469 RTGCPTR pCache;
470 RTGCPTR pCtx;
471 uint64_t eflags;
472 uint64_t cr8;
473 } TestOut;
474 struct
475 {
476 uint64_t param1;
477 uint64_t param2;
478 uint64_t param3;
479 uint64_t param4;
480 } ScratchPad;
481#endif
482} VMCSCACHE;
483/** Pointer to VMCSCACHE. */
484typedef VMCSCACHE *PVMCSCACHE;
485
486/** VMX StartVM function. */
487typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
488/** Pointer to a VMX StartVM function. */
489typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
490
491/** SVM VMRun function. */
492typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
493/** Pointer to a SVM VMRun function. */
494typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
495
496/**
497 * HM VMCPU Instance data.
498 */
499typedef struct HMCPU
500{
501 /** Old style FPU reporting trap mask override performed (optimization) */
502 bool fFPUOldStyleOverride;
503 /** Set if we don't have to flush the TLB on VM entry. */
504 bool fResumeVM;
505 /** Set if we need to flush the TLB during the world switch. */
506 bool fForceTLBFlush;
507 /** Set when we're using VT-x or AMD-V at that moment. */
508 bool fActive;
509 /** Set when the TLB has been checked until we return from the world switch. */
510 volatile bool fCheckedTLBFlush;
511 uint8_t u8Alignment[3];
512
513 /** World switch exit counter. */
514 volatile uint32_t cWorldSwitchExits;
515 /** HM_CHANGED_* flags. */
516 uint32_t fContextUseFlags;
517 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
518 RTCPUID idLastCpu;
519 /** TLB flush count */
520 uint32_t cTlbFlushes;
521 /** Current ASID in use by the VM */
522 uint32_t uCurrentAsid;
523 uint32_t u32Alignment;
524
525 /* Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
526 uint64_t u64HostTscAux;
527
528 struct
529 {
530 /** Physical address of the VM control structure (VMCS). */
531 RTHCPHYS HCPhysVMCS;
532 /** R0 memory object for the VM control structure (VMCS). */
533 RTR0MEMOBJ hMemObjVMCS;
534 /** Virtual address of the VM control structure (VMCS). */
535 R0PTRTYPE(void *) pvVMCS;
536 /** Ring 0 handlers for VT-x. */
537 PFNHMVMXSTARTVM pfnStartVM;
538
539#if HC_ARCH_BITS == 32
540 uint32_t u32Alignment;
541#endif
542
543 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
544 uint64_t u64ProcCtls;
545 /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
546 uint64_t u64ProcCtls2;
547 /** Current VMX_VMCS_CTRL_EXIT_CONTROLS. */
548 uint64_t u64ExitCtls;
549 /** Current VMX_VMCS_CTRL_ENTRY_CONTROLS. */
550 uint64_t u64EntryCtls;
551 /** Physical address of the virtual APIC page for TPR caching. */
552 RTHCPHYS HCPhysVAPIC;
553 /** R0 memory object for the virtual APIC page for TPR caching. */
554 RTR0MEMOBJ hMemObjVAPIC;
555 /** Virtual address of the virtual APIC page for TPR caching. */
556 R0PTRTYPE(uint8_t *) pbVAPIC;
557
558 /** Current CR0 mask. */
559 uint64_t cr0_mask;
560 /** Current CR4 mask. */
561 uint64_t cr4_mask;
562 /** Current exception bitmap. */
563 uint32_t u32XcptBitmap;
564 uint32_t uAlignment0;
565 /** Current EPTP. */
566 RTHCPHYS GCPhysEPTP;
567
568 /** Physical address of the MSR bitmap. */
569 RTHCPHYS HCPhysMsrBitmap;
570 /** R0 memory object for the MSR bitmap. */
571 RTR0MEMOBJ hMemObjMsrBitmap;
572 /** Virtual address of the MSR bitmap. */
573 R0PTRTYPE(void *) pvMsrBitmap;
574
575#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
576 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
577 * for guest MSRs). */
578 RTHCPHYS HCPhysGuestMsr;
579 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
580 * (used for guest MSRs). */
581 RTR0MEMOBJ hMemObjGuestMsr;
582 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
583 * for guest MSRs). */
584 R0PTRTYPE(void *) pvGuestMsr;
585
586 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
587 RTHCPHYS HCPhysHostMsr;
588 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
589 RTR0MEMOBJ hMemObjHostMsr;
590 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
591 R0PTRTYPE(void *) pvHostMsr;
592
593 /* Number of automatically loaded/restored guest MSRs during the world switch. */
594 uint32_t cGuestMsrs;
595 uint32_t uAlignment;
596#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
597
598 /* Last use TSC offset value. (cached) */
599 uint64_t u64TSCOffset;
600 /** VMCS cache. */
601 VMCSCACHE VMCSCache;
602
603 /** Real-mode emulation state. */
604 struct
605 {
606 X86EFLAGS eflags;
607 uint32_t fValid;
608 } RealMode;
609
610 struct
611 {
612 uint64_t u64VMCSPhys;
613 uint32_t ulVMCSRevision;
614 uint32_t ulInstrError;
615 uint32_t ulExitReason;
616 RTCPUID idEnteredCpu;
617 RTCPUID idCurrentCpu;
618 uint32_t padding;
619 } lasterror;
620
621 /** The last seen guest paging mode (by VT-x). */
622 PGMMODE enmLastSeenGuestMode;
623 /** Current guest paging mode (as seen by HMR3PagingModeChanged). */
624 PGMMODE enmCurrGuestMode;
625 /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
626 PGMMODE enmPrevGuestMode;
627 } vmx;
628
629 struct
630 {
631 /** R0 memory object for the host VM control block (VMCB). */
632 RTR0MEMOBJ hMemObjVMCBHost;
633 /** Physical address of the host VM control block (VMCB). */
634 RTHCPHYS HCPhysVMCBHost;
635 /** Virtual address of the host VM control block (VMCB). */
636 R0PTRTYPE(void *) pvVMCBHost;
637
638 /** R0 memory object for the VM control block (VMCB). */
639 RTR0MEMOBJ hMemObjVMCB;
640 /** Physical address of the VM control block (VMCB). */
641 RTHCPHYS HCPhysVMCB;
642 /** Virtual address of the VM control block (VMCB). */
643 R0PTRTYPE(void *) pvVMCB;
644
645 /** Ring 0 handlers for VT-x. */
646 PFNHMSVMVMRUN pfnVMRun;
647
648 /** R0 memory object for the MSR bitmap (8kb). */
649 RTR0MEMOBJ hMemObjMsrBitmap;
650 /** Physical address of the MSR bitmap (8kb). */
651 RTHCPHYS HCPhysMsrBitmap;
652 /** Virtual address of the MSR bitmap. */
653 R0PTRTYPE(void *) pvMsrBitmap;
654 } svm;
655
656 /** Event injection state. */
657 struct
658 {
659 uint32_t fPending;
660 uint32_t errCode;
661 uint64_t intInfo;
662 } Event;
663
664 /** IO Block emulation state. */
665 struct
666 {
667 bool fEnabled;
668 uint8_t u8Align[7];
669
670 /** RIP at the start of the io code we wish to emulate in the recompiler. */
671 RTGCPTR GCPtrFunctionEip;
672
673 uint64_t cr0;
674 } EmulateIoBlock;
675
676 struct
677 {
678 /* Pending IO operation type. */
679 HMPENDINGIO enmType;
680 uint32_t uPadding;
681 RTGCPTR GCPtrRip;
682 RTGCPTR GCPtrRipNext;
683 union
684 {
685 struct
686 {
687 unsigned uPort;
688 unsigned uAndVal;
689 unsigned cbSize;
690 } Port;
691 uint64_t aRaw[2];
692 } s;
693 } PendingIO;
694
695 /** Currently shadow paging mode. */
696 PGMMODE enmShadowMode;
697
698 /** The CPU ID of the CPU currently owning the VMCS. Set in
699 * HMR0Enter and cleared in HMR0Leave. */
700 RTCPUID idEnteredCpu;
701
702 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
703 struct
704 {
705 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
706 unsigned cPages;
707 } TlbShootdown;
708
709 /** For saving stack space, the disassembler state is allocated here instead of
710 * on the stack. */
711 DISCPUSTATE DisState;
712
713 uint32_t padding2[1];
714
715 STAMPROFILEADV StatEntry;
716 STAMPROFILEADV StatExit1;
717 STAMPROFILEADV StatExit2;
718#if 1 /* temporary for tracking down darwin issues. */
719 STAMPROFILEADV StatExit2Sub1;
720 STAMPROFILEADV StatExit2Sub2;
721 STAMPROFILEADV StatExit2Sub3;
722#endif
723 STAMPROFILEADV StatInGC;
724
725#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
726 STAMPROFILEADV StatWorldSwitch3264;
727#endif
728 STAMPROFILEADV StatPoke;
729 STAMPROFILEADV StatSpinPoke;
730 STAMPROFILEADV StatSpinPokeFailed;
731
732 STAMCOUNTER StatIntInject;
733
734 STAMCOUNTER StatExitShadowNM;
735 STAMCOUNTER StatExitGuestNM;
736 STAMCOUNTER StatExitShadowPF;
737 STAMCOUNTER StatExitShadowPFEM;
738 STAMCOUNTER StatExitGuestPF;
739 STAMCOUNTER StatExitGuestUD;
740 STAMCOUNTER StatExitGuestSS;
741 STAMCOUNTER StatExitGuestNP;
742 STAMCOUNTER StatExitGuestGP;
743 STAMCOUNTER StatExitGuestDE;
744 STAMCOUNTER StatExitGuestDB;
745 STAMCOUNTER StatExitGuestMF;
746 STAMCOUNTER StatExitGuestBP;
747 STAMCOUNTER StatExitGuestXF;
748 STAMCOUNTER StatExitGuestXcpUnk;
749 STAMCOUNTER StatExitInvlpg;
750 STAMCOUNTER StatExitInvd;
751 STAMCOUNTER StatExitCpuid;
752 STAMCOUNTER StatExitRdtsc;
753 STAMCOUNTER StatExitRdtscp;
754 STAMCOUNTER StatExitRdpmc;
755 STAMCOUNTER StatExitCli;
756 STAMCOUNTER StatExitSti;
757 STAMCOUNTER StatExitPushf;
758 STAMCOUNTER StatExitPopf;
759 STAMCOUNTER StatExitIret;
760 STAMCOUNTER StatExitInt;
761 STAMCOUNTER StatExitCRxWrite[16];
762 STAMCOUNTER StatExitCRxRead[16];
763 STAMCOUNTER StatExitDRxWrite;
764 STAMCOUNTER StatExitDRxRead;
765 STAMCOUNTER StatExitRdmsr;
766 STAMCOUNTER StatExitWrmsr;
767 STAMCOUNTER StatExitClts;
768 STAMCOUNTER StatExitHlt;
769 STAMCOUNTER StatExitMwait;
770 STAMCOUNTER StatExitMonitor;
771 STAMCOUNTER StatExitLMSW;
772 STAMCOUNTER StatExitIOWrite;
773 STAMCOUNTER StatExitIORead;
774 STAMCOUNTER StatExitIOStringWrite;
775 STAMCOUNTER StatExitIOStringRead;
776 STAMCOUNTER StatExitIrqWindow;
777 STAMCOUNTER StatExitMaxResume;
778 STAMCOUNTER StatExitPreemptPending;
779 STAMCOUNTER StatExitMtf;
780 STAMCOUNTER StatIntReinject;
781 STAMCOUNTER StatPendingHostIrq;
782
783 STAMCOUNTER StatFlushPage;
784 STAMCOUNTER StatFlushPageManual;
785 STAMCOUNTER StatFlushPhysPageManual;
786 STAMCOUNTER StatFlushTlb;
787 STAMCOUNTER StatFlushTlbManual;
788 STAMCOUNTER StatFlushPageInvlpg;
789 STAMCOUNTER StatFlushTlbWorldSwitch;
790 STAMCOUNTER StatNoFlushTlbWorldSwitch;
791 STAMCOUNTER StatFlushTlbCRxChange;
792 STAMCOUNTER StatFlushAsid;
793 STAMCOUNTER StatFlushNestedPaging;
794 STAMCOUNTER StatFlushTlbInvlpga;
795 STAMCOUNTER StatTlbShootdown;
796 STAMCOUNTER StatTlbShootdownFlush;
797
798 STAMCOUNTER StatSwitchGuestIrq;
799 STAMCOUNTER StatSwitchToR3;
800
801 STAMCOUNTER StatTscOffset;
802 STAMCOUNTER StatTscIntercept;
803 STAMCOUNTER StatTscInterceptOverFlow;
804
805 STAMCOUNTER StatExitReasonNpf;
806 STAMCOUNTER StatDRxArmed;
807 STAMCOUNTER StatDRxContextSwitch;
808 STAMCOUNTER StatDRxIoCheck;
809
810 STAMCOUNTER StatLoadMinimal;
811 STAMCOUNTER StatLoadFull;
812
813#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
814 STAMCOUNTER StatFpu64SwitchBack;
815 STAMCOUNTER StatDebug64SwitchBack;
816#endif
817
818#ifdef VBOX_WITH_STATISTICS
819 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
820 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
821 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
822 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
823#endif
824} HMCPU;
825/** Pointer to HM VM instance data. */
826typedef HMCPU *PHMCPU;
827
828
829#ifdef IN_RING0
830
831VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void);
832VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
833
834
835#ifdef VBOX_STRICT
836VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
837VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
838#else
839# define HMDumpRegs(a, b ,c) do { } while (0)
840# define HMR0DumpDescriptor(a, b, c) do { } while (0)
841#endif
842
843# ifdef VBOX_WITH_KERNEL_USING_XMM
844DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
845DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
846# endif
847
848# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
849/**
850 * Gets 64-bit GDTR and IDTR on darwin.
851 * @param pGdtr Where to store the 64-bit GDTR.
852 * @param pIdtr Where to store the 64-bit IDTR.
853 */
854DECLASM(void) hmR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
855
856/**
857 * Gets 64-bit CR3 on darwin.
858 * @returns CR3
859 */
860DECLASM(uint64_t) hmR0Get64bitCR3(void);
861# endif
862
863#endif /* IN_RING0 */
864
865/** @} */
866
867RT_C_DECLS_END
868
869#endif
870
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette