VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HMInternal.h@ 46364

Last change on this file since 46364 was 46358, checked in by vboxsync, 12 years ago

VMM: Remove u64RegisterMask HM member which is never really used anywhere.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.5 KB
Line 
1/* $Id: HMInternal.h 46358 2013-06-03 10:21:12Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HMInternal_h
19#define ___HMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#ifdef VBOX_WITH_OLD_VTX_CODE
41# define VMX_USE_CACHED_VMCS_ACCESSES
42#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
43# define VMX_USE_CACHED_VMCS_ACCESSES
44#endif
45
46/** @def HM_PROFILE_EXIT_DISPATCH
47 * Enables profiling of the VM exit handler dispatching. */
48#if 0
49# define HM_PROFILE_EXIT_DISPATCH
50#endif
51
52/* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
53 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
54 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
55 * always auto load/store the KERNEL_GS_BASE MSR.
56 *
57 * Note: don't forget to update the assembly files while modifying this!
58 */
59/** @todo This define should always be in effect and the define itself removed
60 after 'sufficient' testing. */
61# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
62
63RT_C_DECLS_BEGIN
64
65
66/** @defgroup grp_hm_int Internal
67 * @ingroup grp_hm
68 * @internal
69 * @{
70 */
71
72
73/** Maximum number of exit reason statistics counters. */
74#define MAX_EXITREASON_STAT 0x100
75#define MASK_EXITREASON_STAT 0xff
76#define MASK_INJECT_IRQ_STAT 0xff
77
78/** @name Changed flags
79 * These flags are used to keep track of which important registers that
80 * have been changed since last they were reset.
81 * @{
82 */
83#ifdef VBOX_WITH_OLD_VTX_CODE
84# define HM_CHANGED_GUEST_FPU RT_BIT(0)
85# define HM_CHANGED_GUEST_CR0 RT_BIT(1)
86# define HM_CHANGED_GUEST_CR3 RT_BIT(2)
87# define HM_CHANGED_GUEST_CR4 RT_BIT(3)
88# define HM_CHANGED_GUEST_GDTR RT_BIT(4)
89# define HM_CHANGED_GUEST_IDTR RT_BIT(5)
90# define HM_CHANGED_GUEST_LDTR RT_BIT(6)
91# define HM_CHANGED_GUEST_TR RT_BIT(7)
92# define HM_CHANGED_GUEST_MSR RT_BIT(8)
93# define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
94# define HM_CHANGED_GUEST_DEBUG RT_BIT(10)
95# define HM_CHANGED_HOST_CONTEXT RT_BIT(11)
96# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \
97 | HM_CHANGED_GUEST_CR0 \
98 | HM_CHANGED_GUEST_CR3 \
99 | HM_CHANGED_GUEST_CR4 \
100 | HM_CHANGED_GUEST_GDTR \
101 | HM_CHANGED_GUEST_IDTR \
102 | HM_CHANGED_GUEST_LDTR \
103 | HM_CHANGED_GUEST_TR \
104 | HM_CHANGED_GUEST_MSR \
105 | HM_CHANGED_GUEST_DEBUG \
106 | HM_CHANGED_GUEST_FPU)
107#else
108# define HM_CHANGED_GUEST_RIP RT_BIT(0)
109# define HM_CHANGED_GUEST_RSP RT_BIT(1)
110# define HM_CHANGED_GUEST_RFLAGS RT_BIT(2)
111# define HM_CHANGED_GUEST_CR0 RT_BIT(3)
112# define HM_CHANGED_GUEST_CR3 RT_BIT(4)
113# define HM_CHANGED_GUEST_CR4 RT_BIT(5)
114# define HM_CHANGED_GUEST_GDTR RT_BIT(6)
115# define HM_CHANGED_GUEST_IDTR RT_BIT(7)
116# define HM_CHANGED_GUEST_LDTR RT_BIT(8)
117# define HM_CHANGED_GUEST_TR RT_BIT(9)
118# define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(10)
119# define HM_CHANGED_GUEST_DEBUG RT_BIT(11)
120# define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
121# define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
122# define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
123# define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(15)
124# define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(16)
125# define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(17)
126# define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(18)
127# define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(19)
128
129# define HM_CHANGED_HOST_CONTEXT RT_BIT(20)
130
131# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_RIP \
132 | HM_CHANGED_GUEST_RSP \
133 | HM_CHANGED_GUEST_RFLAGS \
134 | HM_CHANGED_GUEST_CR0 \
135 | HM_CHANGED_GUEST_CR3 \
136 | HM_CHANGED_GUEST_CR4 \
137 | HM_CHANGED_GUEST_GDTR \
138 | HM_CHANGED_GUEST_IDTR \
139 | HM_CHANGED_GUEST_LDTR \
140 | HM_CHANGED_GUEST_TR \
141 | HM_CHANGED_GUEST_SEGMENT_REGS \
142 | HM_CHANGED_GUEST_DEBUG \
143 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \
144 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
145 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \
146 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \
147 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
148 | HM_CHANGED_VMX_GUEST_APIC_STATE \
149 | HM_CHANGED_VMX_ENTRY_CTLS \
150 | HM_CHANGED_VMX_EXIT_CTLS)
151#endif
152
153#define HM_CHANGED_ALL (HM_CHANGED_ALL_GUEST | HM_CHANGED_HOST_CONTEXT)
154/** @} */
155
156/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
157#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
158
159/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
160#define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
161/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
162#define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2 * PAGE_SIZE + 1)
163/** Total guest mapped memory needed. */
164#define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
165
166/** Enable for TPR guest patching. */
167#define VBOX_HM_WITH_GUEST_PATCHING
168
169/** HM SSM version
170 */
171#ifdef VBOX_HM_WITH_GUEST_PATCHING
172# define HM_SSM_VERSION 5
173# define HM_SSM_VERSION_NO_PATCHING 4
174#else
175# define HM_SSM_VERSION 4
176# define HM_SSM_VERSION_NO_PATCHING 4
177#endif
178#define HM_SSM_VERSION_2_0_X 3
179
180/**
181 * Global per-cpu information. (host)
182 */
183typedef struct HMGLOBLCPUINFO
184{
185 /** The CPU ID. */
186 RTCPUID idCpu;
187 /** The memory object */
188 RTR0MEMOBJ hMemObj;
189 /** Current ASID (AMD-V) / VPID (Intel). */
190 uint32_t uCurrentAsid;
191 /** TLB flush count. */
192 uint32_t cTlbFlushes;
193 /** Whether to flush each new ASID/VPID before use. */
194 bool fFlushAsidBeforeUse;
195 /** Configured for VT-x or AMD-V. */
196 bool fConfigured;
197 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
198 bool fIgnoreAMDVInUseError;
199 /** In use by our code. (for power suspend) */
200 volatile bool fInUse;
201} HMGLOBLCPUINFO;
202/** Pointer to the per-cpu global information. */
203typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
204
205typedef enum
206{
207 HMPENDINGIO_INVALID = 0,
208 HMPENDINGIO_PORT_READ,
209 HMPENDINGIO_PORT_WRITE,
210 HMPENDINGIO_STRING_READ,
211 HMPENDINGIO_STRING_WRITE,
212 /** The usual 32-bit paranoia. */
213 HMPENDINGIO_32BIT_HACK = 0x7fffffff
214} HMPENDINGIO;
215
216
217typedef enum
218{
219 HMTPRINSTR_INVALID,
220 HMTPRINSTR_READ,
221 HMTPRINSTR_READ_SHR4,
222 HMTPRINSTR_WRITE_REG,
223 HMTPRINSTR_WRITE_IMM,
224 HMTPRINSTR_JUMP_REPLACEMENT,
225 /** The usual 32-bit paranoia. */
226 HMTPRINSTR_32BIT_HACK = 0x7fffffff
227} HMTPRINSTR;
228
229typedef struct
230{
231 /** The key is the address of patched instruction. (32 bits GC ptr) */
232 AVLOU32NODECORE Core;
233 /** Original opcode. */
234 uint8_t aOpcode[16];
235 /** Instruction size. */
236 uint32_t cbOp;
237 /** Replacement opcode. */
238 uint8_t aNewOpcode[16];
239 /** Replacement instruction size. */
240 uint32_t cbNewOp;
241 /** Instruction type. */
242 HMTPRINSTR enmType;
243 /** Source operand. */
244 uint32_t uSrcOperand;
245 /** Destination operand. */
246 uint32_t uDstOperand;
247 /** Number of times the instruction caused a fault. */
248 uint32_t cFaults;
249 /** Patch address of the jump replacement. */
250 RTGCPTR32 pJumpTarget;
251} HMTPRPATCH;
252/** Pointer to HMTPRPATCH. */
253typedef HMTPRPATCH *PHMTPRPATCH;
254
255/**
256 * Switcher function, HC to the special 64-bit RC.
257 *
258 * @param pVM Pointer to the VM.
259 * @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
260 * @returns Return code indicating the action to take.
261 */
262typedef DECLCALLBACK(int) FNHMSWITCHERHC(PVM pVM, uint32_t offCpumVCpu);
263/** Pointer to switcher function. */
264typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
265
266/**
267 * HM VM Instance data.
268 * Changes to this must checked against the padding of the hm union in VM!
269 */
270typedef struct HM
271{
272 /** Set when we've initialized VMX or SVM. */
273 bool fInitialized;
274
275 /** Set if nested paging is enabled. */
276 bool fNestedPaging;
277
278 /** Set if nested paging is allowed. */
279 bool fAllowNestedPaging;
280
281 /** Set if large pages are enabled (requires nested paging). */
282 bool fLargePages;
283
284 /** Set if we can support 64-bit guests or not. */
285 bool fAllow64BitGuests;
286
287 /** Set if an IO-APIC is configured for this VM. */
288 bool fHasIoApic;
289
290 /** Set when TPR patching is allowed. */
291 bool fTRPPatchingAllowed;
292
293 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
294 bool fGlobalInit;
295
296 /** Set when TPR patching is active. */
297 bool fTPRPatchingActive;
298 bool u8Alignment[7];
299
300 /** Maximum ASID allowed. */
301 uint32_t uMaxAsid;
302
303 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
304 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
305 uint32_t cMaxResumeLoops;
306
307 /** Guest allocated memory for patching purposes. */
308 RTGCPTR pGuestPatchMem;
309 /** Current free pointer inside the patch block. */
310 RTGCPTR pFreeGuestPatchMem;
311 /** Size of the guest patch memory block. */
312 uint32_t cbGuestPatchMem;
313 uint32_t uPadding1;
314
315#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
316 /** 32 to 64 bits switcher entrypoint. */
317 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
318 RTR0PTR uPadding2;
319#endif
320
321 struct
322 {
323 /** Set by the ring-0 side of HM to indicate VMX is supported by the
324 * CPU. */
325 bool fSupported;
326
327 /** Set when we've enabled VMX. */
328 bool fEnabled;
329
330 /** Set if VPID is supported. */
331 bool fVpid;
332
333 /** Set if VT-x VPID is allowed. */
334 bool fAllowVpid;
335
336 /** Set if unrestricted guest execution is in use (real and protected mode without paging). */
337 bool fUnrestrictedGuest;
338
339 /** Set if unrestricted guest execution is allowed to be used. */
340 bool fAllowUnrestricted;
341
342 /** Whether we're using the preemption timer or not. */
343 bool fUsePreemptTimer;
344 /** The shift mask employed by the VMX-Preemption timer. */
345 uint8_t cPreemptTimerShift;
346
347 /** Virtual address of the TSS page used for real mode emulation. */
348 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
349
350 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
351 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
352
353 /** R0 memory object for the APIC-access page. */
354 RTR0MEMOBJ hMemObjApicAccess;
355 /** Physical address of the APIC-access page. */
356 RTHCPHYS HCPhysApicAccess;
357 /** Virtual address of the APIC-access page. */
358 R0PTRTYPE(uint8_t *) pbApicAccess;
359
360#ifdef VBOX_WITH_CRASHDUMP_MAGIC
361 RTR0MEMOBJ hMemObjScratch;
362 RTHCPHYS HCPhysScratch;
363 R0PTRTYPE(uint8_t *) pbScratch;
364#endif
365
366#ifndef VBOX_WITH_OLD_VTX_CODE
367 unsigned uFlushTaggedTlb;
368#else
369 /** Ring 0 handlers for VT-x. */
370 DECLR0CALLBACKMEMBER(void, pfnFlushTaggedTlb, (PVM pVM, PVMCPU pVCpu));
371#endif
372
373#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
374 uint32_t u32Alignment;
375#endif
376 /** Host CR4 value (set by ring-0 VMX init) */
377 uint64_t hostCR4;
378
379 /** Host EFER value (set by ring-0 VMX init) */
380 uint64_t hostEFER;
381
382 /** VMX MSR values */
383 struct
384 {
385 uint64_t feature_ctrl;
386 uint64_t vmx_basic_info;
387 VMX_CAPABILITY vmx_pin_ctls;
388 VMX_CAPABILITY vmx_proc_ctls;
389 VMX_CAPABILITY vmx_proc_ctls2;
390 VMX_CAPABILITY vmx_exit;
391 VMX_CAPABILITY vmx_entry;
392 uint64_t vmx_misc;
393 uint64_t vmx_cr0_fixed0;
394 uint64_t vmx_cr0_fixed1;
395 uint64_t vmx_cr4_fixed0;
396 uint64_t vmx_cr4_fixed1;
397 uint64_t vmx_vmcs_enum;
398 uint64_t vmx_ept_vpid_caps;
399 } msr;
400
401 /** Flush types for invept & invvpid; they depend on capabilities. */
402 VMX_FLUSH_EPT enmFlushEpt;
403 VMX_FLUSH_VPID enmFlushVpid;
404 } vmx;
405
406 struct
407 {
408 /** Set by the ring-0 side of HM to indicate SVM is supported by the
409 * CPU. */
410 bool fSupported;
411 /** Set when we've enabled SVM. */
412 bool fEnabled;
413 /** Set if erratum 170 affects the AMD cpu. */
414 bool fAlwaysFlushTLB;
415 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
416 bool fIgnoreInUseError;
417
418 /** R0 memory object for the IO bitmap (12kb). */
419 RTR0MEMOBJ hMemObjIOBitmap;
420 /** Physical address of the IO bitmap (12kb). */
421 RTHCPHYS HCPhysIOBitmap;
422 /** Virtual address of the IO bitmap. */
423 R0PTRTYPE(void *) pvIOBitmap;
424
425 /* HWCR MSR (for diagnostics) */
426 uint64_t msrHwcr;
427
428 /** SVM revision. */
429 uint32_t u32Rev;
430
431 /** SVM feature bits from cpuid 0x8000000a */
432 uint32_t u32Features;
433 } svm;
434
435 /**
436 * AVL tree with all patches (active or disabled) sorted by guest instruction address
437 */
438 AVLOU32TREE PatchTree;
439 uint32_t cPatches;
440 HMTPRPATCH aPatches[64];
441
442 struct
443 {
444 uint32_t u32AMDFeatureECX;
445 uint32_t u32AMDFeatureEDX;
446 } cpuid;
447
448 /** Saved error from detection */
449 int32_t lLastError;
450
451 /** HMR0Init was run */
452 bool fHMR0Init;
453 bool u8Alignment1[7];
454
455 STAMCOUNTER StatTprPatchSuccess;
456 STAMCOUNTER StatTprPatchFailure;
457 STAMCOUNTER StatTprReplaceSuccess;
458 STAMCOUNTER StatTprReplaceFailure;
459} HM;
460/** Pointer to HM VM instance data. */
461typedef HM *PHM;
462
463/* Maximum number of cached entries. */
464#define VMCSCACHE_MAX_ENTRY 128
465
466/* Structure for storing read and write VMCS actions. */
467typedef struct VMCSCACHE
468{
469#ifdef VBOX_WITH_CRASHDUMP_MAGIC
470 /* Magic marker for searching in crash dumps. */
471 uint8_t aMagic[16];
472 uint64_t uMagic;
473 uint64_t u64TimeEntry;
474 uint64_t u64TimeSwitch;
475 uint64_t cResume;
476 uint64_t interPD;
477 uint64_t pSwitcher;
478 uint32_t uPos;
479 uint32_t idCpu;
480#endif
481 /* CR2 is saved here for EPT syncing. */
482 uint64_t cr2;
483 struct
484 {
485 uint32_t cValidEntries;
486 uint32_t uAlignment;
487 uint32_t aField[VMCSCACHE_MAX_ENTRY];
488 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
489 } Write;
490 struct
491 {
492 uint32_t cValidEntries;
493 uint32_t uAlignment;
494 uint32_t aField[VMCSCACHE_MAX_ENTRY];
495 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
496 } Read;
497#ifdef VBOX_STRICT
498 struct
499 {
500 RTHCPHYS HCPhysCpuPage;
501 RTHCPHYS HCPhysVmcs;
502 RTGCPTR pCache;
503 RTGCPTR pCtx;
504 } TestIn;
505 struct
506 {
507 RTHCPHYS HCPhysVmcs;
508 RTGCPTR pCache;
509 RTGCPTR pCtx;
510 uint64_t eflags;
511 uint64_t cr8;
512 } TestOut;
513 struct
514 {
515 uint64_t param1;
516 uint64_t param2;
517 uint64_t param3;
518 uint64_t param4;
519 } ScratchPad;
520#endif
521} VMCSCACHE;
522/** Pointer to VMCSCACHE. */
523typedef VMCSCACHE *PVMCSCACHE;
524
525/** VMX StartVM function. */
526typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
527/** Pointer to a VMX StartVM function. */
528typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
529
530/** SVM VMRun function. */
531typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
532/** Pointer to a SVM VMRun function. */
533typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
534
535/**
536 * HM VMCPU Instance data.
537 */
538typedef struct HMCPU
539{
540 /** Old style FPU reporting trap mask override performed (optimization) */
541 bool fFPUOldStyleOverride;
542 /** Set if we don't have to flush the TLB on VM entry. */
543 bool fResumeVM;
544 /** Set if we need to flush the TLB during the world switch. */
545 bool fForceTLBFlush;
546 /** Set when we're using VT-x or AMD-V at that moment. */
547 bool fActive;
548 /** Set when the TLB has been checked until we return from the world switch. */
549 volatile bool fCheckedTLBFlush;
550 uint8_t u8Alignment[3];
551
552 /** World switch exit counter. */
553 volatile uint32_t cWorldSwitchExits;
554 /** HM_CHANGED_* flags. */
555 uint32_t fContextUseFlags;
556 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
557 RTCPUID idLastCpu;
558 /** TLB flush count */
559 uint32_t cTlbFlushes;
560 /** Current ASID in use by the VM */
561 uint32_t uCurrentAsid;
562 uint32_t u32Alignment;
563
564 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
565 uint64_t u64HostTscAux;
566
567 struct
568 {
569 /** Physical address of the VM control structure (VMCS). */
570 RTHCPHYS HCPhysVmcs;
571 /** R0 memory object for the VM control structure (VMCS). */
572 RTR0MEMOBJ hMemObjVmcs;
573 /** Virtual address of the VM control structure (VMCS). */
574 R0PTRTYPE(void *) pvVmcs;
575 /** Ring 0 handlers for VT-x. */
576 PFNHMVMXSTARTVM pfnStartVM;
577#if HC_ARCH_BITS == 32
578 uint32_t u32Alignment1;
579#endif
580
581 /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
582 uint32_t u32PinCtls;
583 /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
584 uint32_t u32ProcCtls;
585 /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
586 uint32_t u32ProcCtls2;
587 /** Current VMX_VMCS32_CTRL_EXIT. */
588 uint32_t u32ExitCtls;
589 /** Current VMX_VMCS32_CTRL_ENTRY. */
590 uint32_t u32EntryCtls;
591 /** Physical address of the virtual APIC page for TPR caching. */
592 RTHCPHYS HCPhysVirtApic;
593 /** R0 memory object for the virtual APIC page for TPR caching. */
594 RTR0MEMOBJ hMemObjVirtApic;
595 /** Virtual address of the virtual APIC page for TPR caching. */
596 R0PTRTYPE(uint8_t *) pbVirtApic;
597#if HC_ARCH_BITS == 32
598 uint32_t u32Alignment2;
599#endif
600
601 /** Current CR0 mask. */
602 uint32_t u32CR0Mask;
603 /** Current CR4 mask. */
604 uint32_t u32CR4Mask;
605 /** Current exception bitmap. */
606 uint32_t u32XcptBitmap;
607 /** The updated-guest-state mask. */
608 uint32_t fUpdatedGuestState;
609 /** Current EPTP. */
610 RTHCPHYS HCPhysEPTP;
611
612 /** Physical address of the MSR bitmap. */
613 RTHCPHYS HCPhysMsrBitmap;
614 /** R0 memory object for the MSR bitmap. */
615 RTR0MEMOBJ hMemObjMsrBitmap;
616 /** Virtual address of the MSR bitmap. */
617 R0PTRTYPE(void *) pvMsrBitmap;
618
619#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
620 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
621 * for guest MSRs). */
622 RTHCPHYS HCPhysGuestMsr;
623 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
624 * (used for guest MSRs). */
625 RTR0MEMOBJ hMemObjGuestMsr;
626 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
627 * for guest MSRs). */
628 R0PTRTYPE(void *) pvGuestMsr;
629
630 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
631 RTHCPHYS HCPhysHostMsr;
632 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
633 RTR0MEMOBJ hMemObjHostMsr;
634 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
635 R0PTRTYPE(void *) pvHostMsr;
636
637 /** Number of automatically loaded/restored guest MSRs during the world switch. */
638 uint32_t cGuestMsrs;
639 uint32_t uAlignment;
640#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
641
642 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
643 uint64_t u64MsrApicBase;
644 /** Last use TSC offset value. (cached) */
645 uint64_t u64TSCOffset;
646 /** VMCS cache. */
647 VMCSCACHE VMCSCache;
648
649 /** Real-mode emulation state. */
650 struct
651 {
652 X86DESCATTR uAttrCS;
653 X86DESCATTR uAttrDS;
654 X86DESCATTR uAttrES;
655 X86DESCATTR uAttrFS;
656 X86DESCATTR uAttrGS;
657 X86DESCATTR uAttrSS;
658 X86EFLAGS eflags;
659 uint32_t fRealOnV86Active;
660 } RealMode;
661
662 struct
663 {
664 uint64_t u64VMCSPhys;
665 uint32_t u32VMCSRevision;
666 uint32_t u32InstrError;
667 uint32_t u32ExitReason;
668 RTCPUID idEnteredCpu;
669 RTCPUID idCurrentCpu;
670 uint32_t padding;
671 } lasterror;
672
673#ifdef VBOX_WITH_OLD_VTX_CODE
674 /** The last seen guest paging mode (by VT-x). */
675 PGMMODE enmLastSeenGuestMode;
676 /** Current guest paging mode (as seen by HMR3PagingModeChanged). */
677 PGMMODE enmCurrGuestMode;
678 /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
679 PGMMODE enmPrevGuestMode;
680#else
681 /** Which host-state bits to restore before being preempted. */
682 uint32_t fRestoreHostFlags;
683 /** The host-state restoration structure. */
684 VMXRESTOREHOST RestoreHost;
685 /** Set if guest was executing in real mode (extra checks). */
686 bool fWasInRealMode;
687#endif
688 } vmx;
689
690 struct
691 {
692 /** R0 memory object for the host VM control block (VMCB). */
693 RTR0MEMOBJ hMemObjVmcbHost;
694 /** Physical address of the host VM control block (VMCB). */
695 RTHCPHYS HCPhysVmcbHost;
696 /** Virtual address of the host VM control block (VMCB). */
697 R0PTRTYPE(void *) pvVmcbHost;
698
699 /** R0 memory object for the VM control block (VMCB). */
700 RTR0MEMOBJ hMemObjVmcb;
701 /** Physical address of the VM control block (VMCB). */
702 RTHCPHYS HCPhysVmcb;
703 /** Virtual address of the VM control block (VMCB). */
704 R0PTRTYPE(void *) pvVmcb;
705
706 /** Ring 0 handlers for VT-x. */
707 PFNHMSVMVMRUN pfnVMRun;
708
709 /** R0 memory object for the MSR bitmap (8kb). */
710 RTR0MEMOBJ hMemObjMsrBitmap;
711 /** Physical address of the MSR bitmap (8kb). */
712 RTHCPHYS HCPhysMsrBitmap;
713 /** Virtual address of the MSR bitmap. */
714 R0PTRTYPE(void *) pvMsrBitmap;
715 } svm;
716
717 /** Event injection state. */
718 struct
719 {
720 uint32_t fPending;
721 uint32_t u32ErrCode;
722 uint32_t cbInstr;
723 uint32_t u32Padding; /**< Explicit alignment padding. */
724 uint64_t u64IntrInfo;
725 RTGCUINTPTR GCPtrFaultAddress;
726 } Event;
727
728 /** IO Block emulation state. */
729 struct
730 {
731 bool fEnabled;
732 uint8_t u8Align[7];
733
734 /** RIP at the start of the io code we wish to emulate in the recompiler. */
735 RTGCPTR GCPtrFunctionEip;
736
737 uint64_t cr0;
738 } EmulateIoBlock;
739
740 struct
741 {
742 /** Pending IO operation type. */
743 HMPENDINGIO enmType;
744 uint32_t uPadding;
745 RTGCPTR GCPtrRip;
746 RTGCPTR GCPtrRipNext;
747 union
748 {
749 struct
750 {
751 uint32_t uPort;
752 uint32_t uAndVal;
753 uint32_t cbSize;
754 } Port;
755 uint64_t aRaw[2];
756 } s;
757 } PendingIO;
758
759 /** The PAE PDPEs used with Nested Paging (only valid when
760 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
761 X86PDPE aPdpes[4];
762
763 /** Current shadow paging mode. */
764 PGMMODE enmShadowMode;
765
766 /** The CPU ID of the CPU currently owning the VMCS. Set in
767 * HMR0Enter and cleared in HMR0Leave. */
768 RTCPUID idEnteredCpu;
769
770 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
771 struct
772 {
773 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
774 uint32_t cPages;
775 uint32_t u32Padding; /**< Explicit alignment padding. */
776 } TlbShootdown;
777
778 /** For saving stack space, the disassembler state is allocated here instead of
779 * on the stack. */
780 DISCPUSTATE DisState;
781
782 STAMPROFILEADV StatEntry;
783 STAMPROFILEADV StatExit1;
784 STAMPROFILEADV StatExit2;
785 STAMPROFILEADV StatExitIO;
786 STAMPROFILEADV StatExitMovCRx;
787 STAMPROFILEADV StatExitXcptNmi;
788 STAMPROFILEADV StatLoadGuestState;
789 STAMPROFILEADV StatInGC;
790
791#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
792 STAMPROFILEADV StatWorldSwitch3264;
793#endif
794 STAMPROFILEADV StatPoke;
795 STAMPROFILEADV StatSpinPoke;
796 STAMPROFILEADV StatSpinPokeFailed;
797
798 STAMCOUNTER StatIntInject;
799
800 STAMCOUNTER StatExitShadowNM;
801 STAMCOUNTER StatExitGuestNM;
802 STAMCOUNTER StatExitShadowPF; /* Misleading, currently used for MMIO #PFs as well. */
803 STAMCOUNTER StatExitShadowPFEM;
804 STAMCOUNTER StatExitGuestPF;
805 STAMCOUNTER StatExitGuestUD;
806 STAMCOUNTER StatExitGuestSS;
807 STAMCOUNTER StatExitGuestNP;
808 STAMCOUNTER StatExitGuestGP;
809 STAMCOUNTER StatExitGuestDE;
810 STAMCOUNTER StatExitGuestDB;
811 STAMCOUNTER StatExitGuestMF;
812 STAMCOUNTER StatExitGuestBP;
813 STAMCOUNTER StatExitGuestXF;
814 STAMCOUNTER StatExitGuestXcpUnk;
815 STAMCOUNTER StatExitInvlpg;
816 STAMCOUNTER StatExitInvd;
817 STAMCOUNTER StatExitWbinvd;
818 STAMCOUNTER StatExitPause;
819 STAMCOUNTER StatExitCpuid;
820 STAMCOUNTER StatExitRdtsc;
821 STAMCOUNTER StatExitRdtscp;
822 STAMCOUNTER StatExitRdpmc;
823 STAMCOUNTER StatExitRdrand;
824 STAMCOUNTER StatExitCli;
825 STAMCOUNTER StatExitSti;
826 STAMCOUNTER StatExitPushf;
827 STAMCOUNTER StatExitPopf;
828 STAMCOUNTER StatExitIret;
829 STAMCOUNTER StatExitInt;
830 STAMCOUNTER StatExitCRxWrite[16];
831 STAMCOUNTER StatExitCRxRead[16];
832 STAMCOUNTER StatExitDRxWrite;
833 STAMCOUNTER StatExitDRxRead;
834 STAMCOUNTER StatExitRdmsr;
835 STAMCOUNTER StatExitWrmsr;
836 STAMCOUNTER StatExitClts;
837 STAMCOUNTER StatExitXdtrAccess;
838 STAMCOUNTER StatExitHlt;
839 STAMCOUNTER StatExitMwait;
840 STAMCOUNTER StatExitMonitor;
841 STAMCOUNTER StatExitLmsw;
842 STAMCOUNTER StatExitIOWrite;
843 STAMCOUNTER StatExitIORead;
844 STAMCOUNTER StatExitIOStringWrite;
845 STAMCOUNTER StatExitIOStringRead;
846 STAMCOUNTER StatExitIntWindow;
847 STAMCOUNTER StatExitMaxResume;
848 STAMCOUNTER StatExitExtInt;
849 STAMCOUNTER StatExitPreemptTimer;
850 STAMCOUNTER StatExitTprBelowThreshold;
851 STAMCOUNTER StatExitTaskSwitch;
852 STAMCOUNTER StatExitMtf;
853 STAMCOUNTER StatExitApicAccess;
854 STAMCOUNTER StatIntReinject;
855 STAMCOUNTER StatPendingHostIrq;
856
857 STAMCOUNTER StatFlushPage;
858 STAMCOUNTER StatFlushPageManual;
859 STAMCOUNTER StatFlushPhysPageManual;
860 STAMCOUNTER StatFlushTlb;
861 STAMCOUNTER StatFlushTlbManual;
862 STAMCOUNTER StatFlushTlbWorldSwitch;
863 STAMCOUNTER StatNoFlushTlbWorldSwitch;
864 STAMCOUNTER StatFlushAsid;
865 STAMCOUNTER StatFlushNestedPaging;
866 STAMCOUNTER StatFlushTlbInvlpgVirt;
867 STAMCOUNTER StatFlushTlbInvlpgPhys;
868 STAMCOUNTER StatTlbShootdown;
869 STAMCOUNTER StatTlbShootdownFlush;
870
871 STAMCOUNTER StatSwitchGuestIrq;
872 STAMCOUNTER StatSwitchHmToR3FF;
873 STAMCOUNTER StatSwitchExitToR3;
874 STAMCOUNTER StatSwitchLongJmpToR3;
875
876 STAMCOUNTER StatTscOffset;
877 STAMCOUNTER StatTscIntercept;
878 STAMCOUNTER StatTscInterceptOverFlow;
879
880 STAMCOUNTER StatExitReasonNpf;
881 STAMCOUNTER StatDRxArmed;
882 STAMCOUNTER StatDRxContextSwitch;
883 STAMCOUNTER StatDRxIoCheck;
884
885 STAMCOUNTER StatLoadMinimal;
886 STAMCOUNTER StatLoadFull;
887
888 STAMCOUNTER StatVmxCheckBadRmSelBase;
889 STAMCOUNTER StatVmxCheckBadRmSelLimit;
890 STAMCOUNTER StatVmxCheckRmOk;
891
892 STAMCOUNTER StatVmxCheckBadSel;
893 STAMCOUNTER StatVmxCheckBadRpl;
894 STAMCOUNTER StatVmxCheckBadLdt;
895 STAMCOUNTER StatVmxCheckBadTr;
896 STAMCOUNTER StatVmxCheckPmOk;
897
898#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
899 STAMCOUNTER StatFpu64SwitchBack;
900 STAMCOUNTER StatDebug64SwitchBack;
901#endif
902
903#ifdef VBOX_WITH_STATISTICS
904 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
905 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
906 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
907 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
908#endif
909#ifdef HM_PROFILE_EXIT_DISPATCH
910 STAMPROFILEADV StatExitDispatch;
911#endif
912} HMCPU;
913/** Pointer to HM VM instance data. */
914typedef HMCPU *PHMCPU;
915
916
917#ifdef IN_RING0
918
919VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void);
920VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
921
922
923#ifdef VBOX_STRICT
924VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
925VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
926#else
927# define HMDumpRegs(a, b ,c) do { } while (0)
928# define HMR0DumpDescriptor(a, b, c) do { } while (0)
929#endif
930
931# ifdef VBOX_WITH_KERNEL_USING_XMM
932DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
933DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
934# endif
935
936# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
937/**
938 * Gets 64-bit GDTR and IDTR on darwin.
939 * @param pGdtr Where to store the 64-bit GDTR.
940 * @param pIdtr Where to store the 64-bit IDTR.
941 */
942DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
943
944/**
945 * Gets 64-bit CR3 on darwin.
946 * @returns CR3
947 */
948DECLASM(uint64_t) HMR0Get64bitCR3(void);
949# endif
950
951#endif /* IN_RING0 */
952
953/** @} */
954
955RT_C_DECLS_END
956
957#endif
958
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette