VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 46441

Last change on this file since 46441 was 46441, checked in by vboxsync, 12 years ago

VMM/HMSVMR0: AMD-V bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.6 KB
Line 
1/* $Id: HMInternal.h 46441 2013-06-07 13:38:58Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#ifdef VBOX_WITH_OLD_VTX_CODE
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
43# define VMX_USE_CACHED_VMCS_ACCESSES
44#endif
45
46/** @def HM_PROFILE_EXIT_DISPATCH
47 * Enables profiling of the VM exit handler dispatching. */
48#if 0
49# define HM_PROFILE_EXIT_DISPATCH
50#endif
51
52/* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
53 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
54 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
55 * always auto load/store the KERNEL_GS_BASE MSR.
56 *
57 * Note: don't forget to update the assembly files while modifying this!
58 */
59/** @todo This define should always be in effect and the define itself removed
60 after 'sufficient' testing. */
61# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
62
63RT_C_DECLS_BEGIN
64
65
66/** @defgroup grp_hm_int Internal
67 * @ingroup grp_hm
68 * @internal
69 * @{
70 */
71
72
73/** Maximum number of exit reason statistics counters. */
74#define MAX_EXITREASON_STAT 0x100
75#define MASK_EXITREASON_STAT 0xff
76#define MASK_INJECT_IRQ_STAT 0xff
77
78/** @name HM changed flags.
79 * These flags are used to keep track of which important registers that
80 * have been changed since last they were reset.
81 * @{
82 */
83#ifdef VBOX_WITH_OLD_VTX_CODE
84# define HM_CHANGED_GUEST_FPU RT_BIT(0)
85# define HM_CHANGED_GUEST_CR0 RT_BIT(1)
86# define HM_CHANGED_GUEST_CR3 RT_BIT(2)
87# define HM_CHANGED_GUEST_CR4 RT_BIT(3)
88# define HM_CHANGED_GUEST_GDTR RT_BIT(4)
89# define HM_CHANGED_GUEST_IDTR RT_BIT(5)
90# define HM_CHANGED_GUEST_LDTR RT_BIT(6)
91# define HM_CHANGED_GUEST_TR RT_BIT(7)
92# define HM_CHANGED_GUEST_MSR RT_BIT(8)
93# define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
94# define HM_CHANGED_GUEST_DEBUG RT_BIT(10)
95# define HM_CHANGED_HOST_CONTEXT RT_BIT(11)
96# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \
97 | HM_CHANGED_GUEST_CR0 \
98 | HM_CHANGED_GUEST_CR3 \
99 | HM_CHANGED_GUEST_CR4 \
100 | HM_CHANGED_GUEST_GDTR \
101 | HM_CHANGED_GUEST_IDTR \
102 | HM_CHANGED_GUEST_LDTR \
103 | HM_CHANGED_GUEST_TR \
104 | HM_CHANGED_GUEST_MSR \
105 | HM_CHANGED_GUEST_DEBUG \
106 | HM_CHANGED_GUEST_FPU)
107#else
108# define HM_CHANGED_GUEST_RIP RT_BIT(0)
109# define HM_CHANGED_GUEST_RSP RT_BIT(1)
110# define HM_CHANGED_GUEST_RFLAGS RT_BIT(2)
111# define HM_CHANGED_GUEST_CR0 RT_BIT(3)
112# define HM_CHANGED_GUEST_CR3 RT_BIT(4)
113# define HM_CHANGED_GUEST_CR4 RT_BIT(5)
114# define HM_CHANGED_GUEST_GDTR RT_BIT(6)
115# define HM_CHANGED_GUEST_IDTR RT_BIT(7)
116# define HM_CHANGED_GUEST_LDTR RT_BIT(8)
117# define HM_CHANGED_GUEST_TR RT_BIT(9)
118# define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(10)
119# define HM_CHANGED_GUEST_DEBUG RT_BIT(11)
120# define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
121# define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
122# define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
123/* VT-x specific state. */
124# define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(15)
125# define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(16)
126# define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(17)
127# define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(18)
128# define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(19)
129# define HM_CHANGED_VMX_RESERVED1 RT_BIT(20)
130# define HM_CHANGED_VMX_RESERVED2 RT_BIT(21)
131/* AMD-V specific state. */
132# define HM_CHANGED_SVM_INTERCEPT_VECTORS RT_BIT(15)
133# define HM_CHANGED_SVM_IOPM_MSRPM_BITMAPS RT_BIT(16)
134# define HM_CHANGED_SVM_GUEST_ASID RT_BIT(17)
135# define HM_CHANGED_SVM_GUEST_TPR RT_BIT(18)
136# define HM_CHANGED_SVM_GUEST_NP RT_BIT(19)
137# define HM_CHANGED_SVM_LBR RT_BIT(20)
138# define HM_CHANGED_SVM_AVIC RT_BIT(21)
139
140# define HM_CHANGED_HOST_CONTEXT RT_BIT(22)
141# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_RIP \
142 | HM_CHANGED_GUEST_RSP \
143 | HM_CHANGED_GUEST_RFLAGS \
144 | HM_CHANGED_GUEST_CR0 \
145 | HM_CHANGED_GUEST_CR3 \
146 | HM_CHANGED_GUEST_CR4 \
147 | HM_CHANGED_GUEST_GDTR \
148 | HM_CHANGED_GUEST_IDTR \
149 | HM_CHANGED_GUEST_LDTR \
150 | HM_CHANGED_GUEST_TR \
151 | HM_CHANGED_GUEST_SEGMENT_REGS \
152 | HM_CHANGED_GUEST_DEBUG \
153 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
154 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
155 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
156 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
157 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
158 | HM_CHANGED_VMX_GUEST_APIC_STATE \
159 | HM_CHANGED_VMX_ENTRY_CTLS \
160 | HM_CHANGED_VMX_EXIT_CTLS \
161 | HM_CHANGED_VMX_RESERVED1 \
162 | HM_CHANGED_VMX_RESERVED2)
163#endif
164
165#define HM_CHANGED_ALL (HM_CHANGED_ALL_GUEST | HM_CHANGED_HOST_CONTEXT)
166/** @} */
167
168/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
169#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
170
171/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
172#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
173/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
174#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
175/** Total guest mapped memory needed. */
176#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
177
178/** Enable for TPR guest patching. */
179#define VBOX_HM_WITH_GUEST_PATCHING
180
181/** HM SSM version
182 */
183#ifdef VBOX_HM_WITH_GUEST_PATCHING
184# define HM_SSM_VERSION 5
185# define HM_SSM_VERSION_NO_PATCHING 4
186#else
187# define HM_SSM_VERSION 4
188# define HM_SSM_VERSION_NO_PATCHING 4
189#endif
190#define HM_SSM_VERSION_2_0_X 3
191
192/**
193 * Global per-cpu information. (host)
194 */
195typedef struct HMGLOBLCPUINFO
196{
197 /** The CPU ID. */
198 RTCPUID idCpu;
199 /** The memory object */
200 RTR0MEMOBJ hMemObj;
201 /** Current ASID (AMD-V) / VPID (Intel). */
202 uint32_t uCurrentAsid;
203 /** TLB flush count. */
204 uint32_t cTlbFlushes;
205 /** Whether to flush each new ASID/VPID before use. */
206 bool fFlushAsidBeforeUse;
207 /** Configured for VT-x or AMD-V. */
208 bool fConfigured;
209 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
210 bool fIgnoreAMDVInUseError;
211 /** In use by our code. (for power suspend) */
212 volatile bool fInUse;
213} HMGLOBLCPUINFO;
214/** Pointer to the per-cpu global information. */
215typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
216
217typedef enum
218{
219 HMPENDINGIO_INVALID = 0,
220 HMPENDINGIO_PORT_READ,
221 HMPENDINGIO_PORT_WRITE,
222 HMPENDINGIO_STRING_READ,
223 HMPENDINGIO_STRING_WRITE,
224 /** The usual 32-bit paranoia. */
225 HMPENDINGIO_32BIT_HACK = 0x7fffffff
226} HMPENDINGIO;
227
228
229typedef enum
230{
231 HMTPRINSTR_INVALID,
232 HMTPRINSTR_READ,
233 HMTPRINSTR_READ_SHR4,
234 HMTPRINSTR_WRITE_REG,
235 HMTPRINSTR_WRITE_IMM,
236 HMTPRINSTR_JUMP_REPLACEMENT,
237 /** The usual 32-bit paranoia. */
238 HMTPRINSTR_32BIT_HACK = 0x7fffffff
239} HMTPRINSTR;
240
241typedef struct
242{
243 /** The key is the address of patched instruction. (32 bits GC ptr) */
244 AVLOU32NODECORE Core;
245 /** Original opcode. */
246 uint8_t aOpcode[16];
247 /** Instruction size. */
248 uint32_t cbOp;
249 /** Replacement opcode. */
250 uint8_t aNewOpcode[16];
251 /** Replacement instruction size. */
252 uint32_t cbNewOp;
253 /** Instruction type. */
254 HMTPRINSTR enmType;
255 /** Source operand. */
256 uint32_t uSrcOperand;
257 /** Destination operand. */
258 uint32_t uDstOperand;
259 /** Number of times the instruction caused a fault. */
260 uint32_t cFaults;
261 /** Patch address of the jump replacement. */
262 RTGCPTR32 pJumpTarget;
263} HMTPRPATCH;
264/** Pointer to HMTPRPATCH. */
265typedef HMTPRPATCH *PHMTPRPATCH;
266
267/**
268 * Switcher function, HC to the special 64-bit RC.
269 *
270 * @param pVM Pointer to the VM.
271 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
272 * @returns Return code indicating the action to take.
273 */
274typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
275/** Pointer to switcher function. */
276typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
277
278/**
279 * HM VM Instance data.
280 * Changes to this must checked against the padding of the hm union in VM!
281 */
282typedef struct HM
283{
284 /** Set when we've initialized VMX or SVM. */
285 bool fInitialized;
286
287 /** Set if nested paging is enabled. */
288 bool fNestedPaging;
289
290 /** Set if nested paging is allowed. */
291 bool fAllowNestedPaging;
292
293 /** Set if large pages are enabled (requires nested paging). */
294 bool fLargePages;
295
296 /** Set if we can support 64-bit guests or not. */
297 bool fAllow64BitGuests;
298
299 /** Set if an IO-APIC is configured for this VM. */
300 bool fHasIoApic;
301
302 /** Set when TPR patching is allowed. */
303 bool fTRPPatchingAllowed;
304
305 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
306 bool fGlobalInit;
307
308 /** Set when TPR patching is active. */
309 bool fTPRPatchingActive;
310 bool u8Alignment[7];
311
312 /** Maximum ASID allowed. */
313 uint32_t uMaxAsid;
314
315 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
316 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
317 uint32_t cMaxResumeLoops;
318
319 /** Guest allocated memory for patching purposes. */
320 RTGCPTR pGuestPatchMem;
321 /** Current free pointer inside the patch block. */
322 RTGCPTR pFreeGuestPatchMem;
323 /** Size of the guest patch memory block. */
324 uint32_t cbGuestPatchMem;
325 uint32_t uPadding1;
326
327#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
328 /** 32 to 64 bits switcher entrypoint. */
329 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
330 RTR0PTR uPadding2;
331#endif
332
333 struct
334 {
335 /** Set by the ring-0 side of HM to indicate VMX is supported by the
336 * CPU. */
337 bool fSupported;
338
339 /** Set when we've enabled VMX. */
340 bool fEnabled;
341
342 /** Set if VPID is supported. */
343 bool fVpid;
344
345 /** Set if VT-x VPID is allowed. */
346 bool fAllowVpid;
347
348 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
349 bool fUnrestrictedGuest;
350
351 /** Set if unrestricted guest execution is allowed to be used. */
352 bool fAllowUnrestricted;
353
354 /** Whether we're using the preemption timer or not. */
355 bool fUsePreemptTimer;
356 /** The shift mask employed by the VMX-Preemption timer. */
357 uint8_t cPreemptTimerShift;
358
359 /** Virtual address of the TSS page used for real mode emulation. */
360 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
361
362 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
363 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
364
365 /** R0 memory object for the APIC-access page. */
366 RTR0MEMOBJ hMemObjApicAccess;
367 /** Physical address of the APIC-access page. */
368 RTHCPHYS HCPhysApicAccess;
369 /** Virtual address of the APIC-access page. */
370 R0PTRTYPE(uint8_t *) pbApicAccess;
371
372#ifdef VBOX_WITH_CRASHDUMP_MAGIC
373 RTR0MEMOBJ hMemObjScratch;
374 RTHCPHYS HCPhysScratch;
375 R0PTRTYPE(uint8_t *) pbScratch;
376#endif
377
378#ifndef VBOX_WITH_OLD_VTX_CODE
379 unsigned uFlushTaggedTlb;
380#else
381 /** Ring 0 handlers for VT-x. */
382 DECLR0CALLBACKMEMBER(void, pfnFlushTaggedTlb, (PVM pVM, PVMCPU pVCpu));
383#endif
384
385#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
386 uint32_t u32Alignment;
387#endif
388 /** Host CR4 value (set by ring-0 VMX init) */
389 uint64_t hostCR4;
390
391 /** Host EFER value (set by ring-0 VMX init) */
392 uint64_t hostEFER;
393
394 /** VMX MSR values */
395 struct
396 {
397 uint64_t feature_ctrl;
398 uint64_t vmx_basic_info;
399 VMX_CAPABILITY vmx_pin_ctls;
400 VMX_CAPABILITY vmx_proc_ctls;
401 VMX_CAPABILITY vmx_proc_ctls2;
402 VMX_CAPABILITY vmx_exit;
403 VMX_CAPABILITY vmx_entry;
404 uint64_t vmx_misc;
405 uint64_t vmx_cr0_fixed0;
406 uint64_t vmx_cr0_fixed1;
407 uint64_t vmx_cr4_fixed0;
408 uint64_t vmx_cr4_fixed1;
409 uint64_t vmx_vmcs_enum;
410 uint64_t vmx_vmfunc;
411 uint64_t vmx_ept_vpid_caps;
412 } msr;
413
414 /** Flush types for invept & invvpid; they depend on capabilities. */
415 VMX_FLUSH_EPT enmFlushEpt;
416 VMX_FLUSH_VPID enmFlushVpid;
417 } vmx;
418
419 struct
420 {
421 /** Set by the ring-0 side of HM to indicate SVM is supported by the
422 * CPU. */
423 bool fSupported;
424 /** Set when we've enabled SVM. */
425 bool fEnabled;
426 /** Set if erratum 170 affects the AMD cpu. */
427 bool fAlwaysFlushTLB;
428 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
429 bool fIgnoreInUseError;
430
431 /** R0 memory object for the IO bitmap (12kb). */
432 RTR0MEMOBJ hMemObjIOBitmap;
433 /** Physical address of the IO bitmap (12kb). */
434 RTHCPHYS HCPhysIOBitmap;
435 /** Virtual address of the IO bitmap. */
436 R0PTRTYPE(void *) pvIOBitmap;
437
438 /* HWCR MSR (for diagnostics) */
439 uint64_t msrHwcr;
440
441 /** SVM revision. */
442 uint32_t u32Rev;
443
444 /** SVM feature bits from cpuid 0x8000000a */
445 uint32_t u32Features;
446 } svm;
447
448 /**
449 * AVL tree with all patches (active or disabled) sorted by guest instruction address
450 */
451 AVLOU32TREE PatchTree;
452 uint32_t cPatches;
453 HMTPRPATCH aPatches[64];
454
455 struct
456 {
457 uint32_t u32AMDFeatureECX;
458 uint32_t u32AMDFeatureEDX;
459 } cpuid;
460
461 /** Saved error from detection */
462 int32_t lLastError;
463
464 /** HMR0Init was run */
465 bool fHMR0Init;
466 bool u8Alignment1[7];
467
468 STAMCOUNTER StatTprPatchSuccess;
469 STAMCOUNTER StatTprPatchFailure;
470 STAMCOUNTER StatTprReplaceSuccess;
471 STAMCOUNTER StatTprReplaceFailure;
472} HM;
473/** Pointer to HM VM instance data. */
474typedef HM *PHM;
475
476/* Maximum number of cached entries. */
477#define VMCSCACHE_MAX_ENTRY 128
478
479/* Structure for storing read and write VMCS actions. */
480typedef struct VMCSCACHE
481{
482#ifdef VBOX_WITH_CRASHDUMP_MAGIC
483 /* Magic marker for searching in crash dumps. */
484 uint8_t aMagic[16];
485 uint64_t uMagic;
486 uint64_t u64TimeEntry;
487 uint64_t u64TimeSwitch;
488 uint64_t cResume;
489 uint64_t interPD;
490 uint64_t pSwitcher;
491 uint32_t uPos;
492 uint32_t idCpu;
493#endif
494 /* CR2 is saved here for EPT syncing. */
495 uint64_t cr2;
496 struct
497 {
498 uint32_t cValidEntries;
499 uint32_t uAlignment;
500 uint32_t aField[VMCSCACHE_MAX_ENTRY];
501 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
502 } Write;
503 struct
504 {
505 uint32_t cValidEntries;
506 uint32_t uAlignment;
507 uint32_t aField[VMCSCACHE_MAX_ENTRY];
508 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
509 } Read;
510#ifdef VBOX_STRICT
511 struct
512 {
513 RTHCPHYS HCPhysCpuPage;
514 RTHCPHYS HCPhysVmcs;
515 RTGCPTR pCache;
516 RTGCPTR pCtx;
517 } TestIn;
518 struct
519 {
520 RTHCPHYS HCPhysVmcs;
521 RTGCPTR pCache;
522 RTGCPTR pCtx;
523 uint64_t eflags;
524 uint64_t cr8;
525 } TestOut;
526 struct
527 {
528 uint64_t param1;
529 uint64_t param2;
530 uint64_t param3;
531 uint64_t param4;
532 } ScratchPad;
533#endif
534} VMCSCACHE;
535/** Pointer to VMCSCACHE. */
536typedef VMCSCACHE *PVMCSCACHE;
537
538/** VMX StartVM function. */
539typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
540/** Pointer to a VMX StartVM function. */
541typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
542
543/** SVM VMRun function. */
544typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
545/** Pointer to a SVM VMRun function. */
546typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
547
548/**
549 * HM VMCPU Instance data.
550 */
551typedef struct HMCPU
552{
553 /** Old style FPU reporting trap mask override performed (optimization) */
554 bool fFPUOldStyleOverride;
555 /** Set if we don't have to flush the TLB on VM entry. */
556 bool fResumeVM;
557 /** Set if we need to flush the TLB during the world switch. */
558 bool fForceTLBFlush;
559 /** Set when we're using VT-x or AMD-V at that moment. */
560 bool fActive;
561 /** Set when the TLB has been checked until we return from the world switch. */
562 volatile bool fCheckedTLBFlush;
563 uint8_t u8Alignment[3];
564
565 /** World switch exit counter. */
566 volatile uint32_t cWorldSwitchExits;
567 /** HM_CHANGED_* flags. */
568 uint32_t fContextUseFlags;
569 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
570 RTCPUID idLastCpu;
571 /** TLB flush count */
572 uint32_t cTlbFlushes;
573 /** Current ASID in use by the VM */
574 uint32_t uCurrentAsid;
575 uint32_t u32Alignment;
576
577 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
578 uint64_t u64HostTscAux;
579
580 struct
581 {
582 /** Physical address of the VM control structure (VMCS). */
583 RTHCPHYS HCPhysVmcs;
584 /** R0 memory object for the VM control structure (VMCS). */
585 RTR0MEMOBJ hMemObjVmcs;
586 /** Virtual address of the VM control structure (VMCS). */
587 R0PTRTYPE(void *) pvVmcs;
588 /** Ring 0 handlers for VT-x. */
589 PFNHMVMXSTARTVM pfnStartVM;
590#if HC_ARCH_BITS == 32
591 uint32_t u32Alignment1;
592#endif
593
594 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
595 uint32_t u32PinCtls;
596 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
597 uint32_t u32ProcCtls;
598 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
599 uint32_t u32ProcCtls2;
600 /** Current VMX_VMCS32_CTRL_EXIT. */
601 uint32_t u32ExitCtls;
602 /** Current VMX_VMCS32_CTRL_ENTRY. */
603 uint32_t u32EntryCtls;
604 /** Physical address of the virtual APIC page for TPR caching. */
605 RTHCPHYS HCPhysVirtApic;
606 /** R0 memory object for the virtual APIC page for TPR caching. */
607 RTR0MEMOBJ hMemObjVirtApic;
608 /** Virtual address of the virtual APIC page for TPR caching. */
609 R0PTRTYPE(uint8_t *) pbVirtApic;
610#if HC_ARCH_BITS == 32
611 uint32_t u32Alignment2;
612#endif
613
614 /** Current CR0 mask. */
615 uint32_t u32CR0Mask;
616 /** Current CR4 mask. */
617 uint32_t u32CR4Mask;
618 /** Current exception bitmap. */
619 uint32_t u32XcptBitmap;
620 /** The updated-guest-state mask. */
621 uint32_t fUpdatedGuestState;
622 /** Current EPTP. */
623 RTHCPHYS HCPhysEPTP;
624
625 /** Physical address of the MSR bitmap. */
626 RTHCPHYS HCPhysMsrBitmap;
627 /** R0 memory object for the MSR bitmap. */
628 RTR0MEMOBJ hMemObjMsrBitmap;
629 /** Virtual address of the MSR bitmap. */
630 R0PTRTYPE(void *) pvMsrBitmap;
631
632#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
633 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
634 * for guest MSRs). */
635 RTHCPHYS HCPhysGuestMsr;
636 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
637 * (used for guest MSRs). */
638 RTR0MEMOBJ hMemObjGuestMsr;
639 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
640 * for guest MSRs). */
641 R0PTRTYPE(void *) pvGuestMsr;
642
643 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
644 RTHCPHYS HCPhysHostMsr;
645 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
646 RTR0MEMOBJ hMemObjHostMsr;
647 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
648 R0PTRTYPE(void *) pvHostMsr;
649
650 /** Number of automatically loaded/restored guest MSRs during the world switch. */
651 uint32_t cGuestMsrs;
652 uint32_t uAlignment;
653#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
654
655 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
656 uint64_t u64MsrApicBase;
657 /** Last use TSC offset value. (cached) */
658 uint64_t u64TSCOffset;
659 /** VMCS cache. */
660 VMCSCACHE VMCSCache;
661
662 /** Real-mode emulation state. */
663 struct
664 {
665 X86DESCATTR uAttrCS;
666 X86DESCATTR uAttrDS;
667 X86DESCATTR uAttrES;
668 X86DESCATTR uAttrFS;
669 X86DESCATTR uAttrGS;
670 X86DESCATTR uAttrSS;
671 X86EFLAGS eflags;
672 uint32_t fRealOnV86Active;
673 } RealMode;
674
675 struct
676 {
677 uint64_t u64VMCSPhys;
678 uint32_t u32VMCSRevision;
679 uint32_t u32InstrError;
680 uint32_t u32ExitReason;
681 RTCPUID idEnteredCpu;
682 RTCPUID idCurrentCpu;
683 uint32_t padding;
684 } lasterror;
685
686#ifdef VBOX_WITH_OLD_VTX_CODE
687 /** The last seen guest paging mode (by VT-x). */
688 PGMMODE enmLastSeenGuestMode;
689 /** Current guest paging mode (as seen by HMR3PagingModeChanged). */
690 PGMMODE enmCurrGuestMode;
691 /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
692 PGMMODE enmPrevGuestMode;
693#else
694 /** Which host-state bits to restore before being preempted. */
695 uint32_t fRestoreHostFlags;
696 /** The host-state restoration structure. */
697 VMXRESTOREHOST RestoreHost;
698 /** Set if guest was executing in real mode (extra checks). */
699 bool fWasInRealMode;
700#endif
701 } vmx;
702
703 struct
704 {
705 /** R0 memory object for the host VMCB which holds additional host-state. */
706 RTR0MEMOBJ hMemObjVmcbHost;
707 /** Physical address of the host VMCB which holds additional host-state. */
708 RTHCPHYS HCPhysVmcbHost;
709 /** Virtual address of the host VMCB which holds additional host-state. */
710 R0PTRTYPE(void *) pvVmcbHost;
711
712 /** R0 memory object for the guest VMCB. */
713 RTR0MEMOBJ hMemObjVmcb;
714 /** Physical address of the guest VMCB. */
715 RTHCPHYS HCPhysVmcb;
716 /** Virtual address of the guest VMCB. */
717 R0PTRTYPE(void *) pvVmcb;
718
719 /** Ring 0 handlers for VT-x. */
720 PFNHMSVMVMRUN pfnVMRun;
721
722 /** R0 memory object for the MSR bitmap (8 KB). */
723 RTR0MEMOBJ hMemObjMsrBitmap;
724 /** Physical address of the MSR bitmap (8 KB). */
725 RTHCPHYS HCPhysMsrBitmap;
726 /** Virtual address of the MSR bitmap. */
727 R0PTRTYPE(void *) pvMsrBitmap;
728 } svm;
729
730 /** Event injection state. */
731 struct
732 {
733 uint32_t fPending;
734 uint32_t u32ErrCode;
735 uint32_t cbInstr;
736 uint32_t u32Padding; /**< Explicit alignment padding. */
737 uint64_t u64IntrInfo;
738 RTGCUINTPTR GCPtrFaultAddress;
739 } Event;
740
741 /** IO Block emulation state. */
742 struct
743 {
744 bool fEnabled;
745 uint8_t u8Align[7];
746
747 /** RIP at the start of the io code we wish to emulate in the recompiler. */
748 RTGCPTR GCPtrFunctionEip;
749
750 uint64_t cr0;
751 } EmulateIoBlock;
752
753 struct
754 {
755 /** Pending IO operation type. */
756 HMPENDINGIO enmType;
757 uint32_t uPadding;
758 RTGCPTR GCPtrRip;
759 RTGCPTR GCPtrRipNext;
760 union
761 {
762 struct
763 {
764 uint32_t uPort;
765 uint32_t uAndVal;
766 uint32_t cbSize;
767 } Port;
768 uint64_t aRaw[2];
769 } s;
770 } PendingIO;
771
772 /** The PAE PDPEs used with Nested Paging (only valid when
773 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
774 X86PDPE aPdpes[4];
775
776 /** Current shadow paging mode. */
777 PGMMODE enmShadowMode;
778
779 /** The CPU ID of the CPU currently owning the VMCS. Set in
780 * HMR0Enter and cleared in HMR0Leave. */
781 RTCPUID idEnteredCpu;
782
783 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
784 struct
785 {
786 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
787 uint32_t cPages;
788 uint32_t u32Padding; /**< Explicit alignment padding. */
789 } TlbShootdown;
790
791 /** For saving stack space, the disassembler state is allocated here instead of
792 * on the stack. */
793 DISCPUSTATE DisState;
794
795 STAMPROFILEADV StatEntry;
796 STAMPROFILEADV StatExit1;
797 STAMPROFILEADV StatExit2;
798 STAMPROFILEADV StatExitIO;
799 STAMPROFILEADV StatExitMovCRx;
800 STAMPROFILEADV StatExitXcptNmi;
801 STAMPROFILEADV StatLoadGuestState;
802 STAMPROFILEADV StatInGC;
803
804#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
805 STAMPROFILEADV StatWorldSwitch3264;
806#endif
807 STAMPROFILEADV StatPoke;
808 STAMPROFILEADV StatSpinPoke;
809 STAMPROFILEADV StatSpinPokeFailed;
810
811 STAMCOUNTER StatIntInject;
812
813 STAMCOUNTER StatExitShadowNM;
814 STAMCOUNTER StatExitGuestNM;
815 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
816 STAMCOUNTER StatExitShadowPFEM;
817 STAMCOUNTER StatExitGuestPF;
818 STAMCOUNTER StatExitGuestUD;
819 STAMCOUNTER StatExitGuestSS;
820 STAMCOUNTER StatExitGuestNP;
821 STAMCOUNTER StatExitGuestGP;
822 STAMCOUNTER StatExitGuestDE;
823 STAMCOUNTER StatExitGuestDB;
824 STAMCOUNTER StatExitGuestMF;
825 STAMCOUNTER StatExitGuestBP;
826 STAMCOUNTER StatExitGuestXF;
827 STAMCOUNTER StatExitGuestXcpUnk;
828 STAMCOUNTER StatExitInvlpg;
829 STAMCOUNTER StatExitInvd;
830 STAMCOUNTER StatExitWbinvd;
831 STAMCOUNTER StatExitPause;
832 STAMCOUNTER StatExitCpuid;
833 STAMCOUNTER StatExitRdtsc;
834 STAMCOUNTER StatExitRdtscp;
835 STAMCOUNTER StatExitRdpmc;
836 STAMCOUNTER StatExitRdrand;
837 STAMCOUNTER StatExitCli;
838 STAMCOUNTER StatExitSti;
839 STAMCOUNTER StatExitPushf;
840 STAMCOUNTER StatExitPopf;
841 STAMCOUNTER StatExitIret;
842 STAMCOUNTER StatExitInt;
843 STAMCOUNTER StatExitCRxWrite[16];
844 STAMCOUNTER StatExitCRxRead[16];
845 STAMCOUNTER StatExitDRxWrite;
846 STAMCOUNTER StatExitDRxRead;
847 STAMCOUNTER StatExitRdmsr;
848 STAMCOUNTER StatExitWrmsr;
849 STAMCOUNTER StatExitClts;
850 STAMCOUNTER StatExitXdtrAccess;
851 STAMCOUNTER StatExitHlt;
852 STAMCOUNTER StatExitMwait;
853 STAMCOUNTER StatExitMonitor;
854 STAMCOUNTER StatExitLmsw;
855 STAMCOUNTER StatExitIOWrite;
856 STAMCOUNTER StatExitIORead;
857 STAMCOUNTER StatExitIOStringWrite;
858 STAMCOUNTER StatExitIOStringRead;
859 STAMCOUNTER StatExitIntWindow;
860 STAMCOUNTER StatExitMaxResume;
861 STAMCOUNTER StatExitExtInt;
862 STAMCOUNTER StatExitPreemptTimer;
863 STAMCOUNTER StatExitTprBelowThreshold;
864 STAMCOUNTER StatExitTaskSwitch;
865 STAMCOUNTER StatExitMtf;
866 STAMCOUNTER StatExitApicAccess;
867 STAMCOUNTER StatIntReinject;
868 STAMCOUNTER StatPendingHostIrq;
869
870 STAMCOUNTER StatFlushPage;
871 STAMCOUNTER StatFlushPageManual;
872 STAMCOUNTER StatFlushPhysPageManual;
873 STAMCOUNTER StatFlushTlb;
874 STAMCOUNTER StatFlushTlbManual;
875 STAMCOUNTER StatFlushTlbWorldSwitch;
876 STAMCOUNTER StatNoFlushTlbWorldSwitch;
877 STAMCOUNTER StatFlushAsid;
878 STAMCOUNTER StatFlushNestedPaging;
879 STAMCOUNTER StatFlushTlbInvlpgVirt;
880 STAMCOUNTER StatFlushTlbInvlpgPhys;
881 STAMCOUNTER StatTlbShootdown;
882 STAMCOUNTER StatTlbShootdownFlush;
883
884 STAMCOUNTER StatSwitchGuestIrq;
885 STAMCOUNTER StatSwitchHmToR3FF;
886 STAMCOUNTER StatSwitchExitToR3;
887 STAMCOUNTER StatSwitchLongJmpToR3;
888
889 STAMCOUNTER StatTscOffset;
890 STAMCOUNTER StatTscIntercept;
891 STAMCOUNTER StatTscInterceptOverFlow;
892
893 STAMCOUNTER StatExitReasonNpf;
894 STAMCOUNTER StatDRxArmed;
895 STAMCOUNTER StatDRxContextSwitch;
896 STAMCOUNTER StatDRxIoCheck;
897
898 STAMCOUNTER StatLoadMinimal;
899 STAMCOUNTER StatLoadFull;
900
901 STAMCOUNTER StatVmxCheckBadRmSelBase;
902 STAMCOUNTER StatVmxCheckBadRmSelLimit;
903 STAMCOUNTER StatVmxCheckRmOk;
904
905 STAMCOUNTER StatVmxCheckBadSel;
906 STAMCOUNTER StatVmxCheckBadRpl;
907 STAMCOUNTER StatVmxCheckBadLdt;
908 STAMCOUNTER StatVmxCheckBadTr;
909 STAMCOUNTER StatVmxCheckPmOk;
910
911#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
912 STAMCOUNTER StatFpu64SwitchBack;
913 STAMCOUNTER StatDebug64SwitchBack;
914#endif
915
916#ifdef VBOX_WITH_STATISTICS
917 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
918 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
919 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
920 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
921#endif
922#ifdef HM_PROFILE_EXIT_DISPATCH
923 STAMPROFILEADV StatExitDispatch;
924#endif
925} HMCPU;
926/** Pointer to HM VM instance data. */
927typedef HMCPU *PHMCPU;
928
929
930#ifdef IN_RING0
931
932VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void);
933VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
934
935
936#ifdef VBOX_STRICT
937VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
938VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
939#else
940# define HMDumpRegs(a, b ,c) do { } while (0)
941# define HMR0DumpDescriptor(a, b, c) do { } while (0)
942#endif
943
944# ifdef VBOX_WITH_KERNEL_USING_XMM
945DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
946DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
947# endif
948
949# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
950/**
951 * Gets 64-bit GDTR and IDTR on darwin.
952 * @param pGdtr Where to store the 64-bit GDTR.
953 * @param pIdtr Where to store the 64-bit IDTR.
954 */
955DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
956
957/**
958 * Gets 64-bit CR3 on darwin.
959 * @returns CR3
960 */
961DECLASM(uint64_t) HMR0Get64bitCR3(void);
962# endif
963
964#endif /* IN_RING0 */
965
966/** @} */
967
968RT_C_DECLS_END
969
970#endif
971
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette