VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 45351

Last change on this file since 45351 was 45351, checked in by vboxsync, 12 years ago

VMM/VMMR0: HM bits, remove unused flags.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.0 KB
Line 
1/* $Id: HMInternal.h 45351 2013-04-04 20:24:29Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#ifdef VBOX_WITH_OLD_VTX_CODE
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
43# define VMX_USE_CACHED_VMCS_ACCESSES
44#endif
45
46#define HM_VMX_EMULATE_REALMODE
47
48/* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
49 * handle this MSR manually. See @bugref{6208}. This is clearly visible while
50 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
51 *
52 * Note: don't forget to update the assembly files while modifying this!
53 */
54# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
55
56RT_C_DECLS_BEGIN
57
58
59/** @defgroup grp_hm_int Internal
60 * @ingroup grp_hm
61 * @internal
62 * @{
63 */
64
65
66/** Maximum number of exit reason statistics counters. */
67#define MAX_EXITREASON_STAT 0x100
68#define MASK_EXITREASON_STAT 0xff
69#define MASK_INJECT_IRQ_STAT 0xff
70
71/** @name Changed flags
72 * These flags are used to keep track of which important registers that
73 * have been changed since last they were reset.
74 * @{
75 */
76#ifdef VBOX_WITH_OLD_VTX_CODE
77# define HM_CHANGED_GUEST_FPU RT_BIT(0)
78# define HM_CHANGED_GUEST_CR0 RT_BIT(1)
79# define HM_CHANGED_GUEST_CR3 RT_BIT(2)
80# define HM_CHANGED_GUEST_CR4 RT_BIT(3)
81# define HM_CHANGED_GUEST_GDTR RT_BIT(4)
82# define HM_CHANGED_GUEST_IDTR RT_BIT(5)
83# define HM_CHANGED_GUEST_LDTR RT_BIT(6)
84# define HM_CHANGED_GUEST_TR RT_BIT(7)
85# define HM_CHANGED_GUEST_MSR RT_BIT(8)
86# define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
87# define HM_CHANGED_GUEST_DEBUG RT_BIT(10)
88# define HM_CHANGED_HOST_CONTEXT RT_BIT(11)
89# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \
90 | HM_CHANGED_GUEST_CR0 \
91 | HM_CHANGED_GUEST_CR3 \
92 | HM_CHANGED_GUEST_CR4 \
93 | HM_CHANGED_GUEST_GDTR \
94 | HM_CHANGED_GUEST_IDTR \
95 | HM_CHANGED_GUEST_LDTR \
96 | HM_CHANGED_GUEST_TR \
97 | HM_CHANGED_GUEST_MSR \
98 | HM_CHANGED_GUEST_DEBUG \
99 | HM_CHANGED_GUEST_FPU)
100#else
101# define HM_CHANGED_GUEST_RIP RT_BIT(0)
102# define HM_CHANGED_GUEST_RSP RT_BIT(1)
103# define HM_CHANGED_GUEST_RFLAGS RT_BIT(2)
104# define HM_CHANGED_GUEST_CR0 RT_BIT(3)
105# define HM_CHANGED_GUEST_CR3 RT_BIT(4)
106# define HM_CHANGED_GUEST_CR4 RT_BIT(5)
107# define HM_CHANGED_GUEST_GDTR RT_BIT(6)
108# define HM_CHANGED_GUEST_IDTR RT_BIT(7)
109# define HM_CHANGED_GUEST_LDTR RT_BIT(8)
110# define HM_CHANGED_GUEST_TR RT_BIT(9)
111# define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(10)
112# define HM_CHANGED_GUEST_DEBUG RT_BIT(11)
113# define HM_CHANGED_GUEST_FS_BASE_MSR RT_BIT(12)
114# define HM_CHANGED_GUEST_GS_BASE_MSR RT_BIT(13)
115# define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
116# define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
117# define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
118# define HM_CHANGED_GUEST_INTR_STATE RT_BIT(17)
119# define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(18)
120# define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(19)
121# define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(20)
122# define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(21)
123# define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(22)
124
125# define HM_CHANGED_HOST_CONTEXT RT_BIT(23)
126
127# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_RIP \
128 | HM_CHANGED_GUEST_RSP \
129 | HM_CHANGED_GUEST_RFLAGS \
130 | HM_CHANGED_GUEST_CR0 \
131 | HM_CHANGED_GUEST_CR3 \
132 | HM_CHANGED_GUEST_CR4 \
133 | HM_CHANGED_GUEST_GDTR \
134 | HM_CHANGED_GUEST_IDTR \
135 | HM_CHANGED_GUEST_LDTR \
136 | HM_CHANGED_GUEST_TR \
137 | HM_CHANGED_GUEST_SEGMENT_REGS \
138 | HM_CHANGED_GUEST_DEBUG \
139 | HM_CHANGED_GUEST_FS_BASE_MSR \
140 | HM_CHANGED_GUEST_GS_BASE_MSR \
141 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
142 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
143 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
144 | HM_CHANGED_GUEST_INTR_STATE \
145 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
146 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
147 | HM_CHANGED_VMX_GUEST_APIC_STATE \
148 | HM_CHANGED_VMX_ENTRY_CTLS \
149 | HM_CHANGED_VMX_EXIT_CTLS)
150#endif
151
152#define HM_CHANGED_ALL (HM_CHANGED_ALL_GUEST | HM_CHANGED_HOST_CONTEXT)
153/** @} */
154
155/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
156#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
157
158/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
159#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
160/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
161#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
162/** Total guest mapped memory needed. */
163#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
164
165/** Enable for TPR guest patching. */
166#define VBOX_HM_WITH_GUEST_PATCHING
167
168/** HM SSM version
169 */
170#ifdef VBOX_HM_WITH_GUEST_PATCHING
171# define HM_SSM_VERSION 5
172# define HM_SSM_VERSION_NO_PATCHING 4
173#else
174# define HM_SSM_VERSION 4
175# define HM_SSM_VERSION_NO_PATCHING 4
176#endif
177#define HM_SSM_VERSION_2_0_X 3
178
179/**
180 * Global per-cpu information. (host)
181 */
182typedef struct HMGLOBLCPUINFO
183{
184 /** The CPU ID. */
185 RTCPUID idCpu;
186 /** The memory object */
187 RTR0MEMOBJ hMemObj;
188 /** Current ASID (AMD-V) / VPID (Intel). */
189 uint32_t uCurrentAsid;
190 /** TLB flush count. */
191 uint32_t cTlbFlushes;
192 /** Whether to flush each new ASID/VPID before use. */
193 bool fFlushAsidBeforeUse;
194 /** Configured for VT-x or AMD-V. */
195 bool fConfigured;
196 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
197 bool fIgnoreAMDVInUseError;
198 /** In use by our code. (for power suspend) */
199 volatile bool fInUse;
200} HMGLOBLCPUINFO;
201/** Pointer to the per-cpu global information. */
202typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
203
204typedef enum
205{
206 HMPENDINGIO_INVALID = 0,
207 HMPENDINGIO_PORT_READ,
208 HMPENDINGIO_PORT_WRITE,
209 HMPENDINGIO_STRING_READ,
210 HMPENDINGIO_STRING_WRITE,
211 /** The usual 32-bit paranoia. */
212 HMPENDINGIO_32BIT_HACK = 0x7fffffff
213} HMPENDINGIO;
214
215
216typedef enum
217{
218 HMTPRINSTR_INVALID,
219 HMTPRINSTR_READ,
220 HMTPRINSTR_READ_SHR4,
221 HMTPRINSTR_WRITE_REG,
222 HMTPRINSTR_WRITE_IMM,
223 HMTPRINSTR_JUMP_REPLACEMENT,
224 /** The usual 32-bit paranoia. */
225 HMTPRINSTR_32BIT_HACK = 0x7fffffff
226} HMTPRINSTR;
227
228typedef struct
229{
230 /** The key is the address of patched instruction. (32 bits GC ptr) */
231 AVLOU32NODECORE Core;
232 /** Original opcode. */
233 uint8_t aOpcode[16];
234 /** Instruction size. */
235 uint32_t cbOp;
236 /** Replacement opcode. */
237 uint8_t aNewOpcode[16];
238 /** Replacement instruction size. */
239 uint32_t cbNewOp;
240 /** Instruction type. */
241 HMTPRINSTR enmType;
242 /** Source operand. */
243 uint32_t uSrcOperand;
244 /** Destination operand. */
245 uint32_t uDstOperand;
246 /** Number of times the instruction caused a fault. */
247 uint32_t cFaults;
248 /** Patch address of the jump replacement. */
249 RTGCPTR32 pJumpTarget;
250} HMTPRPATCH;
251/** Pointer to HMTPRPATCH. */
252typedef HMTPRPATCH *PHMTPRPATCH;
253
254/**
255 * Switcher function, HC to RC.
256 *
257 * @param pVM Pointer to the VM.
258 * @param uOffsetVMCPU VMCPU offset from pVM
259 * @returns Return code indicating the action to take.
260 */
261typedef DECLCALLBACK (int) FNHMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
262/** Pointer to switcher function. */
263typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
264
265/**
266 * HM VM Instance data.
267 * Changes to this must checked against the padding of the hm union in VM!
268 */
269typedef struct HM
270{
271 /** Set when we've initialized VMX or SVM. */
272 bool fInitialized;
273
274 /** Set when hardware acceleration is allowed. */
275 bool fAllowed;
276
277 /** Set if nested paging is enabled. */
278 bool fNestedPaging;
279
280 /** Set if nested paging is allowed. */
281 bool fAllowNestedPaging;
282
283 /** Set if large pages are enabled (requires nested paging). */
284 bool fLargePages;
285
286 /** Set if we can support 64-bit guests or not. */
287 bool fAllow64BitGuests;
288
289 /** Set if an IO-APIC is configured for this VM. */
290 bool fHasIoApic;
291
292 /** Set when TPR patching is allowed. */
293 bool fTRPPatchingAllowed;
294
295 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
296 bool fGlobalInit;
297
298 /** Set when TPR patching is active. */
299 bool fTPRPatchingActive;
300 bool u8Alignment[6];
301
302 /** And mask for copying register contents. */
303 uint64_t u64RegisterMask;
304
305 /** Maximum ASID allowed. */
306 uint32_t uMaxAsid;
307
308 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
309 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
310 uint32_t cMaxResumeLoops;
311
312 /** Guest allocated memory for patching purposes. */
313 RTGCPTR pGuestPatchMem;
314 /** Current free pointer inside the patch block. */
315 RTGCPTR pFreeGuestPatchMem;
316 /** Size of the guest patch memory block. */
317 uint32_t cbGuestPatchMem;
318 uint32_t uPadding1;
319
320#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
321 /** 32 to 64 bits switcher entrypoint. */
322 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
323
324 /* AMD-V 64 bits vmrun handler */
325 RTRCPTR pfnSVMGCVMRun64;
326
327 /* VT-x 64 bits vmlaunch handler */
328 RTRCPTR pfnVMXGCStartVM64;
329
330 /* RC handler to setup the 64 bits FPU state. */
331 RTRCPTR pfnSaveGuestFPU64;
332
333 /* RC handler to setup the 64 bits debug state. */
334 RTRCPTR pfnSaveGuestDebug64;
335
336 /* Test handler */
337 RTRCPTR pfnTest64;
338
339 RTRCPTR uAlignment[2];
340/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
341 uint32_t u32Alignment[1]; */
342#endif
343
344 struct
345 {
346 /** Set by the ring-0 side of HM to indicate VMX is supported by the
347 * CPU. */
348 bool fSupported;
349
350 /** Set when we've enabled VMX. */
351 bool fEnabled;
352
353 /** Set if VPID is supported. */
354 bool fVpid;
355
356 /** Set if VT-x VPID is allowed. */
357 bool fAllowVpid;
358
359 /** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
360 bool fUnrestrictedGuest;
361
362 /** Whether we're using the preemption timer or not. */
363 bool fUsePreemptTimer;
364 /** The shift mask employed by the VMX-Preemption timer. */
365 uint8_t cPreemptTimerShift;
366
367 bool uAlignment[1];
368
369 /** Virtual address of the TSS page used for real mode emulation. */
370 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
371
372 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
373 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
374
375 /** R0 memory object for the APIC-access page. */
376 RTR0MEMOBJ hMemObjApicAccess;
377 /** Physical address of the APIC-access page. */
378 RTHCPHYS HCPhysApicAccess;
379 /** Virtual address of the APIC-access page. */
380 R0PTRTYPE(uint8_t *) pbApicAccess;
381
382#ifdef VBOX_WITH_CRASHDUMP_MAGIC
383 RTR0MEMOBJ hMemObjScratch;
384 RTHCPHYS HCPhysScratch;
385 R0PTRTYPE(uint8_t *) pbScratch;
386#endif
387 /** Ring 0 handlers for VT-x. */
388 DECLR0CALLBACKMEMBER(void, pfnFlushTaggedTlb, (PVM pVM, PVMCPU pVCpu));
389
390#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
391 uint32_t u32Alignment;
392#endif
393 /** Host CR4 value (set by ring-0 VMX init) */
394 uint64_t hostCR4;
395
396 /** Host EFER value (set by ring-0 VMX init) */
397 uint64_t hostEFER;
398
399 /** VMX MSR values */
400 struct
401 {
402 uint64_t feature_ctrl;
403 uint64_t vmx_basic_info;
404 VMX_CAPABILITY vmx_pin_ctls;
405 VMX_CAPABILITY vmx_proc_ctls;
406 VMX_CAPABILITY vmx_proc_ctls2;
407 VMX_CAPABILITY vmx_exit;
408 VMX_CAPABILITY vmx_entry;
409 uint64_t vmx_misc;
410 uint64_t vmx_cr0_fixed0;
411 uint64_t vmx_cr0_fixed1;
412 uint64_t vmx_cr4_fixed0;
413 uint64_t vmx_cr4_fixed1;
414 uint64_t vmx_vmcs_enum;
415 uint64_t vmx_ept_vpid_caps;
416 } msr;
417
418 /** Flush types for invept & invvpid; they depend on capabilities. */
419 VMX_FLUSH_EPT enmFlushEpt;
420 VMX_FLUSH_VPID enmFlushVpid;
421 } vmx;
422
423 struct
424 {
425 /** Set by the ring-0 side of HM to indicate SVM is supported by the
426 * CPU. */
427 bool fSupported;
428 /** Set when we've enabled SVM. */
429 bool fEnabled;
430 /** Set if erratum 170 affects the AMD cpu. */
431 bool fAlwaysFlushTLB;
432 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
433 bool fIgnoreInUseError;
434
435 /** R0 memory object for the IO bitmap (12kb). */
436 RTR0MEMOBJ hMemObjIOBitmap;
437 /** Physical address of the IO bitmap (12kb). */
438 RTHCPHYS HCPhysIOBitmap;
439 /** Virtual address of the IO bitmap. */
440 R0PTRTYPE(void *) pvIOBitmap;
441
442 /* HWCR MSR (for diagnostics) */
443 uint64_t msrHwcr;
444
445 /** SVM revision. */
446 uint32_t u32Rev;
447
448 /** SVM feature bits from cpuid 0x8000000a */
449 uint32_t u32Features;
450 } svm;
451
452 /**
453 * AVL tree with all patches (active or disabled) sorted by guest instruction address
454 */
455 AVLOU32TREE PatchTree;
456 uint32_t cPatches;
457 HMTPRPATCH aPatches[64];
458
459 struct
460 {
461 uint32_t u32AMDFeatureECX;
462 uint32_t u32AMDFeatureEDX;
463 } cpuid;
464
465 /** Saved error from detection */
466 int32_t lLastError;
467
468 /** HMR0Init was run */
469 bool fHMR0Init;
470 bool u8Alignment1[7];
471
472 STAMCOUNTER StatTprPatchSuccess;
473 STAMCOUNTER StatTprPatchFailure;
474 STAMCOUNTER StatTprReplaceSuccess;
475 STAMCOUNTER StatTprReplaceFailure;
476} HM;
477/** Pointer to HM VM instance data. */
478typedef HM *PHM;
479
480/* Maximum number of cached entries. */
481#define VMCSCACHE_MAX_ENTRY 128
482
483/* Structure for storing read and write VMCS actions. */
484typedef struct VMCSCACHE
485{
486#ifdef VBOX_WITH_CRASHDUMP_MAGIC
487 /* Magic marker for searching in crash dumps. */
488 uint8_t aMagic[16];
489 uint64_t uMagic;
490 uint64_t u64TimeEntry;
491 uint64_t u64TimeSwitch;
492 uint64_t cResume;
493 uint64_t interPD;
494 uint64_t pSwitcher;
495 uint32_t uPos;
496 uint32_t idCpu;
497#endif
498 /* CR2 is saved here for EPT syncing. */
499 uint64_t cr2;
500 struct
501 {
502 uint32_t cValidEntries;
503 uint32_t uAlignment;
504 uint32_t aField[VMCSCACHE_MAX_ENTRY];
505 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
506 } Write;
507 struct
508 {
509 uint32_t cValidEntries;
510 uint32_t uAlignment;
511 uint32_t aField[VMCSCACHE_MAX_ENTRY];
512 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
513 } Read;
514#ifdef DEBUG
515 struct
516 {
517 RTHCPHYS HCPhysCpuPage;
518 RTHCPHYS HCPhysVmcs;
519 RTGCPTR pCache;
520 RTGCPTR pCtx;
521 } TestIn;
522 struct
523 {
524 RTHCPHYS HCPhysVmcs;
525 RTGCPTR pCache;
526 RTGCPTR pCtx;
527 uint64_t eflags;
528 uint64_t cr8;
529 } TestOut;
530 struct
531 {
532 uint64_t param1;
533 uint64_t param2;
534 uint64_t param3;
535 uint64_t param4;
536 } ScratchPad;
537#endif
538} VMCSCACHE;
539/** Pointer to VMCSCACHE. */
540typedef VMCSCACHE *PVMCSCACHE;
541
542/** VMX StartVM function. */
543typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
544/** Pointer to a VMX StartVM function. */
545typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
546
547/** SVM VMRun function. */
548typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
549/** Pointer to a SVM VMRun function. */
550typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
551
552/**
553 * HM VMCPU Instance data.
554 */
555typedef struct HMCPU
556{
557 /** Old style FPU reporting trap mask override performed (optimization) */
558 bool fFPUOldStyleOverride;
559 /** Set if we don't have to flush the TLB on VM entry. */
560 bool fResumeVM;
561 /** Set if we need to flush the TLB during the world switch. */
562 bool fForceTLBFlush;
563 /** Set when we're using VT-x or AMD-V at that moment. */
564 bool fActive;
565 /** Set when the TLB has been checked until we return from the world switch. */
566 volatile bool fCheckedTLBFlush;
567 uint8_t u8Alignment[3];
568
569 /** World switch exit counter. */
570 volatile uint32_t cWorldSwitchExits;
571 /** HM_CHANGED_* flags. */
572 uint32_t fContextUseFlags;
573 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
574 RTCPUID idLastCpu;
575 /** TLB flush count */
576 uint32_t cTlbFlushes;
577 /** Current ASID in use by the VM */
578 uint32_t uCurrentAsid;
579 uint32_t u32Alignment;
580
581 /* Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
582 uint64_t u64HostTscAux;
583
584 struct
585 {
586 /** Physical address of the VM control structure (VMCS). */
587 RTHCPHYS HCPhysVmcs;
588 /** R0 memory object for the VM control structure (VMCS). */
589 RTR0MEMOBJ hMemObjVmcs;
590 /** Virtual address of the VM control structure (VMCS). */
591 R0PTRTYPE(void *) pvVmcs;
592 /** Ring 0 handlers for VT-x. */
593 PFNHMVMXSTARTVM pfnStartVM;
594#if HC_ARCH_BITS == 32
595 uint32_t u32Alignment1;
596#endif
597
598 /** Current VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS. */
599 uint32_t u32PinCtls;
600 /** Current VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS. */
601 uint32_t u32ProcCtls;
602 /** Current VMX_VMCS32_CTRL_PROC_EXEC2_CONTROLS. */
603 uint32_t u32ProcCtls2;
604 /** Current VMX_VMCS32_CTRL_EXIT_CONTROLS. */
605 uint32_t u32ExitCtls;
606 /** Current VMX_VMCS32_CTRL_ENTRY_CONTROLS. */
607 uint32_t u32EntryCtls;
608 /** Physical address of the virtual APIC page for TPR caching. */
609 RTHCPHYS HCPhysVirtApic;
610 /** R0 memory object for the virtual APIC page for TPR caching. */
611 RTR0MEMOBJ hMemObjVirtApic;
612 /** Virtual address of the virtual APIC page for TPR caching. */
613 R0PTRTYPE(uint8_t *) pbVirtApic;
614#if HC_ARCH_BITS == 32
615 uint32_t u32Alignment2;
616#endif
617
618 /** Current CR0 mask. */
619 uint64_t cr0_mask;
620 /** Current CR4 mask. */
621 uint64_t cr4_mask;
622 /** Current exception bitmap. */
623 uint32_t u32XcptBitmap;
624 /** The updated-guest-state mask. */
625 uint32_t fUpdatedGuestState;
626 /** Current EPTP. */
627 RTHCPHYS GCPhysEPTP;
628
629 /** Physical address of the MSR bitmap. */
630 RTHCPHYS HCPhysMsrBitmap;
631 /** R0 memory object for the MSR bitmap. */
632 RTR0MEMOBJ hMemObjMsrBitmap;
633 /** Virtual address of the MSR bitmap. */
634 R0PTRTYPE(void *) pvMsrBitmap;
635
636#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
637 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
638 * for guest MSRs). */
639 RTHCPHYS HCPhysGuestMsr;
640 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
641 * (used for guest MSRs). */
642 RTR0MEMOBJ hMemObjGuestMsr;
643 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
644 * for guest MSRs). */
645 R0PTRTYPE(void *) pvGuestMsr;
646
647 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
648 RTHCPHYS HCPhysHostMsr;
649 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
650 RTR0MEMOBJ hMemObjHostMsr;
651 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
652 R0PTRTYPE(void *) pvHostMsr;
653
654 /* Number of automatically loaded/restored guest MSRs during the world switch. */
655 uint32_t cGuestMsrs;
656 uint32_t uAlignment;
657#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
658
659 /* The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
660 uint64_t u64MsrApicBase;
661 /* Last use TSC offset value. (cached) */
662 uint64_t u64TSCOffset;
663 /** VMCS cache. */
664 VMCSCACHE VMCSCache;
665
666 /** Real-mode emulation state. */
667 struct
668 {
669 X86DESCATTR uAttrCS;
670 X86DESCATTR uAttrDS;
671 X86DESCATTR uAttrES;
672 X86DESCATTR uAttrFS;
673 X86DESCATTR uAttrGS;
674 X86DESCATTR uAttrSS;
675 X86EFLAGS eflags;
676 uint32_t fRealOnV86Active;
677 } RealMode;
678
679 struct
680 {
681 uint64_t u64VMCSPhys;
682 uint32_t u32VMCSRevision;
683 uint32_t u32InstrError;
684 uint32_t u32ExitReason;
685 RTCPUID idEnteredCpu;
686 RTCPUID idCurrentCpu;
687 uint32_t padding;
688 } lasterror;
689
690 /** The last seen guest paging mode (by VT-x). */
691 PGMMODE enmLastSeenGuestMode;
692 /** Current guest paging mode (as seen by HMR3PagingModeChanged). */
693 PGMMODE enmCurrGuestMode;
694 /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
695 PGMMODE enmPrevGuestMode;
696 } vmx;
697
698 struct
699 {
700 /** R0 memory object for the host VM control block (VMCB). */
701 RTR0MEMOBJ hMemObjVMCBHost;
702 /** Physical address of the host VM control block (VMCB). */
703 RTHCPHYS HCPhysVMCBHost;
704 /** Virtual address of the host VM control block (VMCB). */
705 R0PTRTYPE(void *) pvVMCBHost;
706
707 /** R0 memory object for the VM control block (VMCB). */
708 RTR0MEMOBJ hMemObjVMCB;
709 /** Physical address of the VM control block (VMCB). */
710 RTHCPHYS HCPhysVMCB;
711 /** Virtual address of the VM control block (VMCB). */
712 R0PTRTYPE(void *) pvVMCB;
713
714 /** Ring 0 handlers for VT-x. */
715 PFNHMSVMVMRUN pfnVMRun;
716
717 /** R0 memory object for the MSR bitmap (8kb). */
718 RTR0MEMOBJ hMemObjMsrBitmap;
719 /** Physical address of the MSR bitmap (8kb). */
720 RTHCPHYS HCPhysMsrBitmap;
721 /** Virtual address of the MSR bitmap. */
722 R0PTRTYPE(void *) pvMsrBitmap;
723 } svm;
724
725 /** Event injection state. */
726 struct
727 {
728 uint32_t fPending;
729 uint32_t u32ErrCode;
730 uint64_t u64IntrInfo;
731 } Event;
732
733 /** IO Block emulation state. */
734 struct
735 {
736 bool fEnabled;
737 uint8_t u8Align[7];
738
739 /** RIP at the start of the io code we wish to emulate in the recompiler. */
740 RTGCPTR GCPtrFunctionEip;
741
742 uint64_t cr0;
743 } EmulateIoBlock;
744
745 struct
746 {
747 /* Pending IO operation type. */
748 HMPENDINGIO enmType;
749 uint32_t uPadding;
750 RTGCPTR GCPtrRip;
751 RTGCPTR GCPtrRipNext;
752 union
753 {
754 struct
755 {
756 unsigned uPort;
757 unsigned uAndVal;
758 unsigned cbSize;
759 } Port;
760 uint64_t aRaw[2];
761 } s;
762 } PendingIO;
763
764 /** The PAE PDPEs used with Nested Paging (only valid when
765 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
766 X86PDPE aPdpes[4];
767
768 /** Current shadow paging mode. */
769 PGMMODE enmShadowMode;
770
771 /** The CPU ID of the CPU currently owning the VMCS. Set in
772 * HMR0Enter and cleared in HMR0Leave. */
773 RTCPUID idEnteredCpu;
774
775 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
776 struct
777 {
778 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
779 unsigned cPages;
780 } TlbShootdown;
781
782 /** For saving stack space, the disassembler state is allocated here instead of
783 * on the stack. */
784 DISCPUSTATE DisState;
785
786 uint32_t padding2[1];
787
788 STAMPROFILEADV StatEntry;
789 STAMPROFILEADV StatExit1;
790 STAMPROFILEADV StatExit2;
791#if 1 /* temporary for tracking down darwin issues. */
792 STAMPROFILEADV StatExit2Sub1;
793 STAMPROFILEADV StatExit2Sub2;
794 STAMPROFILEADV StatExit2Sub3;
795#endif
796 STAMPROFILEADV StatInGC;
797
798#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
799 STAMPROFILEADV StatWorldSwitch3264;
800#endif
801 STAMPROFILEADV StatPoke;
802 STAMPROFILEADV StatSpinPoke;
803 STAMPROFILEADV StatSpinPokeFailed;
804
805 STAMCOUNTER StatIntInject;
806
807 STAMCOUNTER StatExitShadowNM;
808 STAMCOUNTER StatExitGuestNM;
809 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
810 STAMCOUNTER StatExitShadowPFEM;
811 STAMCOUNTER StatExitGuestPF;
812 STAMCOUNTER StatExitGuestUD;
813 STAMCOUNTER StatExitGuestSS;
814 STAMCOUNTER StatExitGuestNP;
815 STAMCOUNTER StatExitGuestGP;
816 STAMCOUNTER StatExitGuestDE;
817 STAMCOUNTER StatExitGuestDB;
818 STAMCOUNTER StatExitGuestMF;
819 STAMCOUNTER StatExitGuestBP;
820 STAMCOUNTER StatExitGuestXF;
821 STAMCOUNTER StatExitGuestXcpUnk;
822 STAMCOUNTER StatExitInvlpg;
823 STAMCOUNTER StatExitInvd;
824 STAMCOUNTER StatExitWbinvd;
825 STAMCOUNTER StatExitPause;
826 STAMCOUNTER StatExitCpuid;
827 STAMCOUNTER StatExitRdtsc;
828 STAMCOUNTER StatExitRdtscp;
829 STAMCOUNTER StatExitRdpmc;
830 STAMCOUNTER StatExitRdrand;
831 STAMCOUNTER StatExitCli;
832 STAMCOUNTER StatExitSti;
833 STAMCOUNTER StatExitPushf;
834 STAMCOUNTER StatExitPopf;
835 STAMCOUNTER StatExitIret;
836 STAMCOUNTER StatExitInt;
837 STAMCOUNTER StatExitCRxWrite[16];
838 STAMCOUNTER StatExitCRxRead[16];
839 STAMCOUNTER StatExitDRxWrite;
840 STAMCOUNTER StatExitDRxRead;
841 STAMCOUNTER StatExitRdmsr;
842 STAMCOUNTER StatExitWrmsr;
843 STAMCOUNTER StatExitClts;
844 STAMCOUNTER StatExitXdtrAccess;
845 STAMCOUNTER StatExitHlt;
846 STAMCOUNTER StatExitMwait;
847 STAMCOUNTER StatExitMonitor;
848 STAMCOUNTER StatExitLmsw;
849 STAMCOUNTER StatExitIOWrite;
850 STAMCOUNTER StatExitIORead;
851 STAMCOUNTER StatExitIOStringWrite;
852 STAMCOUNTER StatExitIOStringRead;
853 STAMCOUNTER StatExitIntWindow;
854 STAMCOUNTER StatExitMaxResume;
855 STAMCOUNTER StatExitPreemptPending;
856 STAMCOUNTER StatExitPreemptTimer;
857 STAMCOUNTER StatExitTprBelowThreshold;
858 STAMCOUNTER StatExitTaskSwitch;
859 STAMCOUNTER StatExitMtf;
860 STAMCOUNTER StatExitApicAccess;
861 STAMCOUNTER StatIntReinject;
862 STAMCOUNTER StatPendingHostIrq;
863
864 STAMCOUNTER StatFlushPage;
865 STAMCOUNTER StatFlushPageManual;
866 STAMCOUNTER StatFlushPhysPageManual;
867 STAMCOUNTER StatFlushTlb;
868 STAMCOUNTER StatFlushTlbManual;
869 STAMCOUNTER StatFlushTlbWorldSwitch;
870 STAMCOUNTER StatNoFlushTlbWorldSwitch;
871 STAMCOUNTER StatFlushAsid;
872 STAMCOUNTER StatFlushNestedPaging;
873 STAMCOUNTER StatFlushTlbInvlpgVirt;
874 STAMCOUNTER StatFlushTlbInvlpgPhys;
875 STAMCOUNTER StatTlbShootdown;
876 STAMCOUNTER StatTlbShootdownFlush;
877
878 STAMCOUNTER StatSwitchGuestIrq;
879 STAMCOUNTER StatSwitchHmToR3FF;
880 STAMCOUNTER StatSwitchExitToR3;
881 STAMCOUNTER StatSwitchLongJmpToR3;
882
883 STAMCOUNTER StatTscOffset;
884 STAMCOUNTER StatTscIntercept;
885 STAMCOUNTER StatTscInterceptOverFlow;
886
887 STAMCOUNTER StatExitReasonNpf;
888 STAMCOUNTER StatDRxArmed;
889 STAMCOUNTER StatDRxContextSwitch;
890 STAMCOUNTER StatDRxIoCheck;
891
892 STAMCOUNTER StatLoadMinimal;
893 STAMCOUNTER StatLoadFull;
894
895#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
896 STAMCOUNTER StatFpu64SwitchBack;
897 STAMCOUNTER StatDebug64SwitchBack;
898#endif
899
900#ifdef VBOX_WITH_STATISTICS
901 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
902 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
903 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
904 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
905#endif
906} HMCPU;
907/** Pointer to HM VM instance data. */
908typedef HMCPU *PHMCPU;
909
910
911#ifdef IN_RING0
912
913VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void);
914VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
915
916
917#ifdef VBOX_STRICT
918VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
919VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
920#else
921# define HMDumpRegs(a, b ,c) do { } while (0)
922# define HMR0DumpDescriptor(a, b, c) do { } while (0)
923#endif
924
925# ifdef VBOX_WITH_KERNEL_USING_XMM
926DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
927DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
928# endif
929
930# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
931/**
932 * Gets 64-bit GDTR and IDTR on darwin.
933 * @param pGdtr Where to store the 64-bit GDTR.
934 * @param pIdtr Where to store the 64-bit IDTR.
935 */
936DECLASM(void) hmR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
937
938/**
939 * Gets 64-bit CR3 on darwin.
940 * @returns CR3
941 */
942DECLASM(uint64_t) hmR0Get64bitCR3(void);
943# endif
944
945#endif /* IN_RING0 */
946
947/** @} */
948
949RT_C_DECLS_END
950
951#endif
952
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette