VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HWACCMInternal.h@ 43049

Last change on this file since 43049 was 42894, checked in by vboxsync, 12 years ago

VMM: nits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 32.5 KB
Line 
1/* $Id: HWACCMInternal.h 42894 2012-08-21 08:00:10Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HWACCMInternal_h
19#define ___HWACCMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hwaccm.h>
27#include <VBox/vmm/hwacc_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#define VMX_USE_CACHED_VMCS_ACCESSES
41#define HWACCM_VMX_EMULATE_REALMODE
42
43/** @todo: Broken on OS X Snow Leopard @bugref{6313}. */
44#if !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
45/* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
46 * handle this MSR manually. See @bugref{6208}. This is clearly visible while
47 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
48 *
49 * Note: don't forget to update the assembly files while modifying this!
50 */
51# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
52#endif
53
54RT_C_DECLS_BEGIN
55
56
57/** @defgroup grp_hwaccm_int Internal
58 * @ingroup grp_hwaccm
59 * @internal
60 * @{
61 */
62
63
64/** Maximum number of exit reason statistics counters. */
65#define MAX_EXITREASON_STAT 0x100
66#define MASK_EXITREASON_STAT 0xff
67#define MASK_INJECT_IRQ_STAT 0xff
68
69/** @name Changed flags
70 * These flags are used to keep track of which important registers that
71 * have been changed since last they were reset.
72 * @{
73 */
74#define HWACCM_CHANGED_GUEST_FPU RT_BIT(0)
75#define HWACCM_CHANGED_GUEST_CR0 RT_BIT(1)
76#define HWACCM_CHANGED_GUEST_CR3 RT_BIT(2)
77#define HWACCM_CHANGED_GUEST_CR4 RT_BIT(3)
78#define HWACCM_CHANGED_GUEST_GDTR RT_BIT(4)
79#define HWACCM_CHANGED_GUEST_IDTR RT_BIT(5)
80#define HWACCM_CHANGED_GUEST_LDTR RT_BIT(6)
81#define HWACCM_CHANGED_GUEST_TR RT_BIT(7)
82#define HWACCM_CHANGED_GUEST_MSR RT_BIT(8)
83#define HWACCM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
84#define HWACCM_CHANGED_GUEST_DEBUG RT_BIT(10)
85#define HWACCM_CHANGED_HOST_CONTEXT RT_BIT(11)
86
87#define HWACCM_CHANGED_ALL ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
88 | HWACCM_CHANGED_GUEST_CR0 \
89 | HWACCM_CHANGED_GUEST_CR3 \
90 | HWACCM_CHANGED_GUEST_CR4 \
91 | HWACCM_CHANGED_GUEST_GDTR \
92 | HWACCM_CHANGED_GUEST_IDTR \
93 | HWACCM_CHANGED_GUEST_LDTR \
94 | HWACCM_CHANGED_GUEST_TR \
95 | HWACCM_CHANGED_GUEST_MSR \
96 | HWACCM_CHANGED_GUEST_FPU \
97 | HWACCM_CHANGED_GUEST_DEBUG \
98 | HWACCM_CHANGED_HOST_CONTEXT)
99
100#define HWACCM_CHANGED_ALL_GUEST ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
101 | HWACCM_CHANGED_GUEST_CR0 \
102 | HWACCM_CHANGED_GUEST_CR3 \
103 | HWACCM_CHANGED_GUEST_CR4 \
104 | HWACCM_CHANGED_GUEST_GDTR \
105 | HWACCM_CHANGED_GUEST_IDTR \
106 | HWACCM_CHANGED_GUEST_LDTR \
107 | HWACCM_CHANGED_GUEST_TR \
108 | HWACCM_CHANGED_GUEST_MSR \
109 | HWACCM_CHANGED_GUEST_DEBUG \
110 | HWACCM_CHANGED_GUEST_FPU)
111
112/** @} */
113
114/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
115#define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 8
116
117/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
118#define HWACCM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
119/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
120#define HWACCM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
121/** Total guest mapped memory needed. */
122#define HWACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
123
124/** Enable for TPR guest patching. */
125#define VBOX_HWACCM_WITH_GUEST_PATCHING
126
127/** HWACCM SSM version
128 */
129#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
130# define HWACCM_SSM_VERSION 5
131# define HWACCM_SSM_VERSION_NO_PATCHING 4
132#else
133# define HWACCM_SSM_VERSION 4
134# define HWACCM_SSM_VERSION_NO_PATCHING 4
135#endif
136#define HWACCM_SSM_VERSION_2_0_X 3
137
138/**
139 * Global per-cpu information. (host)
140 */
141typedef struct HMGLOBLCPUINFO
142{
143 /** The CPU ID. */
144 RTCPUID idCpu;
145 /** The memory object */
146 RTR0MEMOBJ hMemObj;
147 /** Current ASID (AMD-V) / VPID (Intel). */
148 uint32_t uCurrentASID;
149 /** TLB flush count. */
150 uint32_t cTLBFlushes;
151 /** Whether to flush each new ASID/VPID before use. */
152 bool fFlushASIDBeforeUse;
153 /** Configured for VT-x or AMD-V. */
154 bool fConfigured;
155 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
156 bool fIgnoreAMDVInUseError;
157 /** In use by our code. (for power suspend) */
158 volatile bool fInUse;
159} HMGLOBLCPUINFO;
160/** Pointer to the per-cpu global information. */
161typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
162
163typedef enum
164{
165 HWACCMPENDINGIO_INVALID = 0,
166 HWACCMPENDINGIO_PORT_READ,
167 HWACCMPENDINGIO_PORT_WRITE,
168 HWACCMPENDINGIO_STRING_READ,
169 HWACCMPENDINGIO_STRING_WRITE,
170 /** The usual 32-bit paranoia. */
171 HWACCMPENDINGIO_32BIT_HACK = 0x7fffffff
172} HWACCMPENDINGIO;
173
174
175typedef enum
176{
177 HWACCMTPRINSTR_INVALID,
178 HWACCMTPRINSTR_READ,
179 HWACCMTPRINSTR_READ_SHR4,
180 HWACCMTPRINSTR_WRITE_REG,
181 HWACCMTPRINSTR_WRITE_IMM,
182 HWACCMTPRINSTR_JUMP_REPLACEMENT,
183 /** The usual 32-bit paranoia. */
184 HWACCMTPRINSTR_32BIT_HACK = 0x7fffffff
185} HWACCMTPRINSTR;
186
187typedef struct
188{
189 /** The key is the address of patched instruction. (32 bits GC ptr) */
190 AVLOU32NODECORE Core;
191 /** Original opcode. */
192 uint8_t aOpcode[16];
193 /** Instruction size. */
194 uint32_t cbOp;
195 /** Replacement opcode. */
196 uint8_t aNewOpcode[16];
197 /** Replacement instruction size. */
198 uint32_t cbNewOp;
199 /** Instruction type. */
200 HWACCMTPRINSTR enmType;
201 /** Source operand. */
202 uint32_t uSrcOperand;
203 /** Destination operand. */
204 uint32_t uDstOperand;
205 /** Number of times the instruction caused a fault. */
206 uint32_t cFaults;
207 /** Patch address of the jump replacement. */
208 RTGCPTR32 pJumpTarget;
209} HWACCMTPRPATCH;
210/** Pointer to HWACCMTPRPATCH. */
211typedef HWACCMTPRPATCH *PHWACCMTPRPATCH;
212
213/**
214 * Switcher function, HC to RC.
215 *
216 * @param pVM Pointer to the VM.
217 * @param uOffsetVMCPU VMCPU offset from pVM
218 * @returns Return code indicating the action to take.
219 */
220typedef DECLCALLBACK (int) FNHWACCMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
221/** Pointer to switcher function. */
222typedef FNHWACCMSWITCHERHC *PFNHWACCMSWITCHERHC;
223
224/**
225 * HWACCM VM Instance data.
226 * Changes to this must checked against the padding of the hwaccm union in VM!
227 */
228typedef struct HWACCM
229{
230 /** Set when we've initialized VMX or SVM. */
231 bool fInitialized;
232
233 /** Set when hardware acceleration is allowed. */
234 bool fAllowed;
235
236 /** Set if nested paging is enabled. */
237 bool fNestedPaging;
238
239 /** Set if nested paging is allowed. */
240 bool fAllowNestedPaging;
241
242 /** Set if large pages are enabled (requires nested paging). */
243 bool fLargePages;
244
245 /** Set if we can support 64-bit guests or not. */
246 bool fAllow64BitGuests;
247
248 /** Set if an IO-APIC is configured for this VM. */
249 bool fHasIoApic;
250
251 /** Set when TPR patching is allowed. */
252 bool fTRPPatchingAllowed;
253
254 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
255 bool fGlobalInit;
256
257 /** Set when TPR patching is active. */
258 bool fTPRPatchingActive;
259 bool u8Alignment[6];
260
261 /** And mask for copying register contents. */
262 uint64_t u64RegisterMask;
263
264 /** Maximum ASID allowed. */
265 uint32_t uMaxASID;
266
267 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
268 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
269 uint32_t cMaxResumeLoops;
270
271 /** Guest allocated memory for patching purposes. */
272 RTGCPTR pGuestPatchMem;
273 /** Current free pointer inside the patch block. */
274 RTGCPTR pFreeGuestPatchMem;
275 /** Size of the guest patch memory block. */
276 uint32_t cbGuestPatchMem;
277 uint32_t uPadding1;
278
279#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
280 /** 32 to 64 bits switcher entrypoint. */
281 R0PTRTYPE(PFNHWACCMSWITCHERHC) pfnHost32ToGuest64R0;
282
283 /* AMD-V 64 bits vmrun handler */
284 RTRCPTR pfnSVMGCVMRun64;
285
286 /* VT-x 64 bits vmlaunch handler */
287 RTRCPTR pfnVMXGCStartVM64;
288
289 /* RC handler to setup the 64 bits FPU state. */
290 RTRCPTR pfnSaveGuestFPU64;
291
292 /* RC handler to setup the 64 bits debug state. */
293 RTRCPTR pfnSaveGuestDebug64;
294
295 /* Test handler */
296 RTRCPTR pfnTest64;
297
298 RTRCPTR uAlignment[2];
299/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
300 uint32_t u32Alignment[1]; */
301#endif
302
303 struct
304 {
305 /** Set by the ring-0 side of HWACCM to indicate VMX is supported by the
306 * CPU. */
307 bool fSupported;
308
309 /** Set when we've enabled VMX. */
310 bool fEnabled;
311
312 /** Set if VPID is supported. */
313 bool fVPID;
314
315 /** Set if VT-x VPID is allowed. */
316 bool fAllowVPID;
317
318 /** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
319 bool fUnrestrictedGuest;
320
321 /** Whether we're using the preemption timer or not. */
322 bool fUsePreemptTimer;
323 /** The shift mask employed by the VMX-Preemption timer. */
324 uint8_t cPreemptTimerShift;
325
326 bool uAlignment[1];
327
328 /** Virtual address of the TSS page used for real mode emulation. */
329 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
330
331 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
332 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
333
334 /** R0 memory object for the APIC physical page (serves for filtering accesses). */
335 RTR0MEMOBJ pMemObjAPIC;
336 /** Physical address of the APIC physical page (serves for filtering accesses). */
337 RTHCPHYS pAPICPhys;
338 /** Virtual address of the APIC physical page (serves for filtering accesses). */
339 R0PTRTYPE(uint8_t *) pAPIC;
340
341 /** R0 memory object for the MSR entry load page (guest MSRs). */
342 RTR0MEMOBJ pMemObjMSREntryLoad;
343 /** Physical address of the MSR entry load page (guest MSRs). */
344 RTHCPHYS pMSREntryLoadPhys;
345 /** Virtual address of the MSR entry load page (guest MSRs). */
346 R0PTRTYPE(uint8_t *) pMSREntryLoad;
347
348#ifdef VBOX_WITH_CRASHDUMP_MAGIC
349 RTR0MEMOBJ pMemObjScratch;
350 RTHCPHYS pScratchPhys;
351 R0PTRTYPE(uint8_t *) pScratch;
352#endif
353 /** R0 memory object for the MSR exit store page (guest MSRs). */
354 RTR0MEMOBJ pMemObjMSRExitStore;
355 /** Physical address of the MSR exit store page (guest MSRs). */
356 RTHCPHYS pMSRExitStorePhys;
357 /** Virtual address of the MSR exit store page (guest MSRs). */
358 R0PTRTYPE(uint8_t *) pMSRExitStore;
359
360 /** R0 memory object for the MSR exit load page (host MSRs). */
361 RTR0MEMOBJ pMemObjMSRExitLoad;
362 /** Physical address of the MSR exit load page (host MSRs). */
363 RTHCPHYS pMSRExitLoadPhys;
364 /** Virtual address of the MSR exit load page (host MSRs). */
365 R0PTRTYPE(uint8_t *) pMSRExitLoad;
366
367 /** Ring 0 handlers for VT-x. */
368 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
369
370#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
371 uint32_t u32Alignment;
372#endif
373 /** Host CR4 value (set by ring-0 VMX init) */
374 uint64_t hostCR4;
375
376 /** Host EFER value (set by ring-0 VMX init) */
377 uint64_t hostEFER;
378
379 /** VMX MSR values */
380 struct
381 {
382 uint64_t feature_ctrl;
383 uint64_t vmx_basic_info;
384 VMX_CAPABILITY vmx_pin_ctls;
385 VMX_CAPABILITY vmx_proc_ctls;
386 VMX_CAPABILITY vmx_proc_ctls2;
387 VMX_CAPABILITY vmx_exit;
388 VMX_CAPABILITY vmx_entry;
389 uint64_t vmx_misc;
390 uint64_t vmx_cr0_fixed0;
391 uint64_t vmx_cr0_fixed1;
392 uint64_t vmx_cr4_fixed0;
393 uint64_t vmx_cr4_fixed1;
394 uint64_t vmx_vmcs_enum;
395 uint64_t vmx_eptcaps;
396 } msr;
397
398 /** Flush types for invept & invvpid; they depend on capabilities. */
399 VMX_FLUSH_EPT enmFlushEPT;
400 VMX_FLUSH_VPID enmFlushVPID;
401 } vmx;
402
403 struct
404 {
405 /** Set by the ring-0 side of HWACCM to indicate SVM is supported by the
406 * CPU. */
407 bool fSupported;
408 /** Set when we've enabled SVM. */
409 bool fEnabled;
410 /** Set if erratum 170 affects the AMD cpu. */
411 bool fAlwaysFlushTLB;
412 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
413 bool fIgnoreInUseError;
414
415 /** R0 memory object for the IO bitmap (12kb). */
416 RTR0MEMOBJ pMemObjIOBitmap;
417 /** Physical address of the IO bitmap (12kb). */
418 RTHCPHYS pIOBitmapPhys;
419 /** Virtual address of the IO bitmap. */
420 R0PTRTYPE(void *) pIOBitmap;
421
422 /* HWCR msr (for diagnostics) */
423 uint64_t msrHWCR;
424
425 /** SVM revision. */
426 uint32_t u32Rev;
427
428 /** SVM feature bits from cpuid 0x8000000a */
429 uint32_t u32Features;
430 } svm;
431
432 /**
433 * AVL tree with all patches (active or disabled) sorted by guest instruction address
434 */
435 AVLOU32TREE PatchTree;
436 uint32_t cPatches;
437 HWACCMTPRPATCH aPatches[64];
438
439 struct
440 {
441 uint32_t u32AMDFeatureECX;
442 uint32_t u32AMDFeatureEDX;
443 } cpuid;
444
445 /** Saved error from detection */
446 int32_t lLastError;
447
448 /** HWACCMR0Init was run */
449 bool fHWACCMR0Init;
450 bool u8Alignment1[7];
451
452 STAMCOUNTER StatTPRPatchSuccess;
453 STAMCOUNTER StatTPRPatchFailure;
454 STAMCOUNTER StatTPRReplaceSuccess;
455 STAMCOUNTER StatTPRReplaceFailure;
456} HWACCM;
457/** Pointer to HWACCM VM instance data. */
458typedef HWACCM *PHWACCM;
459
460/* Maximum number of cached entries. */
461#define VMCSCACHE_MAX_ENTRY 128
462
463/* Structure for storing read and write VMCS actions. */
464typedef struct VMCSCACHE
465{
466#ifdef VBOX_WITH_CRASHDUMP_MAGIC
467 /* Magic marker for searching in crash dumps. */
468 uint8_t aMagic[16];
469 uint64_t uMagic;
470 uint64_t u64TimeEntry;
471 uint64_t u64TimeSwitch;
472 uint64_t cResume;
473 uint64_t interPD;
474 uint64_t pSwitcher;
475 uint32_t uPos;
476 uint32_t idCpu;
477#endif
478 /* CR2 is saved here for EPT syncing. */
479 uint64_t cr2;
480 struct
481 {
482 uint32_t cValidEntries;
483 uint32_t uAlignment;
484 uint32_t aField[VMCSCACHE_MAX_ENTRY];
485 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
486 } Write;
487 struct
488 {
489 uint32_t cValidEntries;
490 uint32_t uAlignment;
491 uint32_t aField[VMCSCACHE_MAX_ENTRY];
492 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
493 } Read;
494#ifdef DEBUG
495 struct
496 {
497 RTHCPHYS HCPhysCpuPage;
498 RTHCPHYS HCPhysVMCS;
499 RTGCPTR pCache;
500 RTGCPTR pCtx;
501 } TestIn;
502 struct
503 {
504 RTHCPHYS HCPhysVMCS;
505 RTGCPTR pCache;
506 RTGCPTR pCtx;
507 uint64_t eflags;
508 uint64_t cr8;
509 } TestOut;
510 struct
511 {
512 uint64_t param1;
513 uint64_t param2;
514 uint64_t param3;
515 uint64_t param4;
516 } ScratchPad;
517#endif
518} VMCSCACHE;
519/** Pointer to VMCSCACHE. */
520typedef VMCSCACHE *PVMCSCACHE;
521
522/** VMX StartVM function. */
523typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
524/** Pointer to a VMX StartVM function. */
525typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;
526
527/** SVM VMRun function. */
528typedef DECLCALLBACK(int) FNHWACCMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
529/** Pointer to a SVM VMRun function. */
530typedef R0PTRTYPE(FNHWACCMSVMVMRUN *) PFNHWACCMSVMVMRUN;
531
532/**
533 * HWACCM VMCPU Instance data.
534 */
535typedef struct HWACCMCPU
536{
537 /** Old style FPU reporting trap mask override performed (optimization) */
538 bool fFPUOldStyleOverride;
539
540 /** Set if we don't have to flush the TLB on VM entry. */
541 bool fResumeVM;
542
543 /** Set if we need to flush the TLB during the world switch. */
544 bool fForceTLBFlush;
545
546 /** Set when we're using VT-x or AMD-V at that moment. */
547 bool fActive;
548
549 /** Set when the TLB has been checked until we return from the world switch. */
550 volatile bool fCheckedTLBFlush;
551 uint8_t bAlignment[3];
552
553 /** World switch exit counter. */
554 volatile uint32_t cWorldSwitchExits;
555
556 /** HWACCM_CHANGED_* flags. */
557 uint32_t fContextUseFlags;
558
559 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
560 RTCPUID idLastCpu;
561
562 /** TLB flush count */
563 uint32_t cTLBFlushes;
564
565 /** Current ASID in use by the VM */
566 uint32_t uCurrentASID;
567
568 uint32_t u32Alignment;
569
570 /* Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
571 uint64_t u64HostTSCAux;
572
573 struct
574 {
575 /** Physical address of the VM control structure (VMCS). */
576 RTHCPHYS HCPhysVMCS;
577 /** R0 memory object for the VM control structure (VMCS). */
578 RTR0MEMOBJ hMemObjVMCS;
579 /** Virtual address of the VM control structure (VMCS). */
580 R0PTRTYPE(void *) pvVMCS;
581
582 /** Ring 0 handlers for VT-x. */
583 PFNHWACCMVMXSTARTVM pfnStartVM;
584
585#if HC_ARCH_BITS == 32
586 uint32_t u32Alignment;
587#endif
588
589 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
590 uint64_t proc_ctls;
591
592 /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
593 uint64_t proc_ctls2;
594
595 /** Physical address of the virtual APIC page for TPR caching. */
596 RTHCPHYS HCPhysVAPIC;
597 /** R0 memory object for the virtual APIC page for TPR caching. */
598 RTR0MEMOBJ hMemObjVAPIC;
599 /** Virtual address of the virtual APIC page for TPR caching. */
600 R0PTRTYPE(uint8_t *) pbVAPIC;
601
602 /** Current CR0 mask. */
603 uint64_t cr0_mask;
604 /** Current CR4 mask. */
605 uint64_t cr4_mask;
606
607 /** Current EPTP. */
608 RTHCPHYS GCPhysEPTP;
609
610 /** Physical address of the MSR bitmap (1 page). */
611 RTHCPHYS pMSRBitmapPhys;
612 /** R0 memory object for the MSR bitmap (1 page). */
613 RTR0MEMOBJ pMemObjMSRBitmap;
614 /** Virtual address of the MSR bitmap (1 page). */
615 R0PTRTYPE(uint8_t *) pMSRBitmap;
616
617#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
618 /** Physical address of the guest MSR load area (1 page). */
619 RTHCPHYS pGuestMSRPhys;
620 /** R0 memory object for the guest MSR load area (1 page). */
621 RTR0MEMOBJ pMemObjGuestMSR;
622 /** Virtual address of the guest MSR load area (1 page). */
623 R0PTRTYPE(uint8_t *) pGuestMSR;
624
625 /** Physical address of the MSR load area (1 page). */
626 RTHCPHYS pHostMSRPhys;
627 /** R0 memory object for the MSR load area (1 page). */
628 RTR0MEMOBJ pMemObjHostMSR;
629 /** Virtual address of the MSR load area (1 page). */
630 R0PTRTYPE(uint8_t *) pHostMSR;
631
632 /* Number of automatically loaded/restored guest MSRs during the world switch. */
633 uint32_t cCachedMSRs;
634 uint32_t uAlignment;
635#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
636
637 /* Last use TSC offset value. (cached) */
638 uint64_t u64TSCOffset;
639
640 /** VMCS cache. */
641 VMCSCACHE VMCSCache;
642
643 /** Real-mode emulation state. */
644 struct
645 {
646 X86EFLAGS eflags;
647 uint32_t fValid;
648 } RealMode;
649
650 struct
651 {
652 uint64_t u64VMCSPhys;
653 uint32_t ulVMCSRevision;
654 uint32_t ulInstrError;
655 uint32_t ulExitReason;
656 RTCPUID idEnteredCpu;
657 RTCPUID idCurrentCpu;
658 uint32_t padding;
659 } lasterror;
660
661 /** The last seen guest paging mode (by VT-x). */
662 PGMMODE enmLastSeenGuestMode;
663 /** Current guest paging mode (as seen by HWACCMR3PagingModeChanged). */
664 PGMMODE enmCurrGuestMode;
665 /** Previous guest paging mode (as seen by HWACCMR3PagingModeChanged). */
666 PGMMODE enmPrevGuestMode;
667 } vmx;
668
669 struct
670 {
671 /** R0 memory object for the host VM control block (VMCB). */
672 RTR0MEMOBJ pMemObjVMCBHost;
673 /** Physical address of the host VM control block (VMCB). */
674 RTHCPHYS pVMCBHostPhys;
675 /** Virtual address of the host VM control block (VMCB). */
676 R0PTRTYPE(void *) pVMCBHost;
677
678 /** R0 memory object for the VM control block (VMCB). */
679 RTR0MEMOBJ pMemObjVMCB;
680 /** Physical address of the VM control block (VMCB). */
681 RTHCPHYS pVMCBPhys;
682 /** Virtual address of the VM control block (VMCB). */
683 R0PTRTYPE(void *) pVMCB;
684
685 /** Ring 0 handlers for VT-x. */
686 PFNHWACCMSVMVMRUN pfnVMRun;
687
688 /** R0 memory object for the MSR bitmap (8kb). */
689 RTR0MEMOBJ pMemObjMSRBitmap;
690 /** Physical address of the MSR bitmap (8kb). */
691 RTHCPHYS pMSRBitmapPhys;
692 /** Virtual address of the MSR bitmap. */
693 R0PTRTYPE(void *) pMSRBitmap;
694 } svm;
695
696 /** Event injection state. */
697 struct
698 {
699 uint32_t fPending;
700 uint32_t errCode;
701 uint64_t intInfo;
702 } Event;
703
704 /** IO Block emulation state. */
705 struct
706 {
707 bool fEnabled;
708 uint8_t u8Align[7];
709
710 /** RIP at the start of the io code we wish to emulate in the recompiler. */
711 RTGCPTR GCPtrFunctionEip;
712
713 uint64_t cr0;
714 } EmulateIoBlock;
715
716 struct
717 {
718 /* Pending IO operation type. */
719 HWACCMPENDINGIO enmType;
720 uint32_t uPadding;
721 RTGCPTR GCPtrRip;
722 RTGCPTR GCPtrRipNext;
723 union
724 {
725 struct
726 {
727 unsigned uPort;
728 unsigned uAndVal;
729 unsigned cbSize;
730 } Port;
731 uint64_t aRaw[2];
732 } s;
733 } PendingIO;
734
735 /** Currently shadow paging mode. */
736 PGMMODE enmShadowMode;
737
738 /** The CPU ID of the CPU currently owning the VMCS. Set in
739 * HWACCMR0Enter and cleared in HWACCMR0Leave. */
740 RTCPUID idEnteredCpu;
741
742 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
743 struct
744 {
745 RTGCPTR aPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES];
746 unsigned cPages;
747 } TlbShootdown;
748
749 /** For saving stack space, the disassembler state is allocated here instead of
750 * on the stack. */
751 DISCPUSTATE DisState;
752
753 uint32_t padding2[1];
754
755 STAMPROFILEADV StatEntry;
756 STAMPROFILEADV StatExit1;
757 STAMPROFILEADV StatExit2;
758#if 1 /* temporary for tracking down darwin issues. */
759 STAMPROFILEADV StatExit2Sub1;
760 STAMPROFILEADV StatExit2Sub2;
761 STAMPROFILEADV StatExit2Sub3;
762#endif
763 STAMPROFILEADV StatInGC;
764
765#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
766 STAMPROFILEADV StatWorldSwitch3264;
767#endif
768 STAMPROFILEADV StatPoke;
769 STAMPROFILEADV StatSpinPoke;
770 STAMPROFILEADV StatSpinPokeFailed;
771
772 STAMCOUNTER StatIntInject;
773
774 STAMCOUNTER StatExitShadowNM;
775 STAMCOUNTER StatExitGuestNM;
776 STAMCOUNTER StatExitShadowPF;
777 STAMCOUNTER StatExitShadowPFEM;
778 STAMCOUNTER StatExitGuestPF;
779 STAMCOUNTER StatExitGuestUD;
780 STAMCOUNTER StatExitGuestSS;
781 STAMCOUNTER StatExitGuestNP;
782 STAMCOUNTER StatExitGuestGP;
783 STAMCOUNTER StatExitGuestDE;
784 STAMCOUNTER StatExitGuestDB;
785 STAMCOUNTER StatExitGuestMF;
786 STAMCOUNTER StatExitGuestBP;
787 STAMCOUNTER StatExitGuestXF;
788 STAMCOUNTER StatExitGuestXcpUnk;
789 STAMCOUNTER StatExitInvlpg;
790 STAMCOUNTER StatExitInvd;
791 STAMCOUNTER StatExitCpuid;
792 STAMCOUNTER StatExitRdtsc;
793 STAMCOUNTER StatExitRdtscp;
794 STAMCOUNTER StatExitRdpmc;
795 STAMCOUNTER StatExitCli;
796 STAMCOUNTER StatExitSti;
797 STAMCOUNTER StatExitPushf;
798 STAMCOUNTER StatExitPopf;
799 STAMCOUNTER StatExitIret;
800 STAMCOUNTER StatExitInt;
801 STAMCOUNTER StatExitCRxWrite[16];
802 STAMCOUNTER StatExitCRxRead[16];
803 STAMCOUNTER StatExitDRxWrite;
804 STAMCOUNTER StatExitDRxRead;
805 STAMCOUNTER StatExitRdmsr;
806 STAMCOUNTER StatExitWrmsr;
807 STAMCOUNTER StatExitCLTS;
808 STAMCOUNTER StatExitHlt;
809 STAMCOUNTER StatExitMwait;
810 STAMCOUNTER StatExitMonitor;
811 STAMCOUNTER StatExitLMSW;
812 STAMCOUNTER StatExitIOWrite;
813 STAMCOUNTER StatExitIORead;
814 STAMCOUNTER StatExitIOStringWrite;
815 STAMCOUNTER StatExitIOStringRead;
816 STAMCOUNTER StatExitIrqWindow;
817 STAMCOUNTER StatExitMaxResume;
818 STAMCOUNTER StatExitPreemptPending;
819 STAMCOUNTER StatExitMTF;
820 STAMCOUNTER StatIntReinject;
821 STAMCOUNTER StatPendingHostIrq;
822
823 STAMCOUNTER StatFlushPage;
824 STAMCOUNTER StatFlushPageManual;
825 STAMCOUNTER StatFlushPhysPageManual;
826 STAMCOUNTER StatFlushTLB;
827 STAMCOUNTER StatFlushTLBManual;
828 STAMCOUNTER StatFlushPageInvlpg;
829 STAMCOUNTER StatFlushTLBWorldSwitch;
830 STAMCOUNTER StatNoFlushTLBWorldSwitch;
831 STAMCOUNTER StatFlushTLBCRxChange;
832 STAMCOUNTER StatFlushASID;
833 STAMCOUNTER StatFlushTLBInvlpga;
834 STAMCOUNTER StatTlbShootdown;
835 STAMCOUNTER StatTlbShootdownFlush;
836
837 STAMCOUNTER StatSwitchGuestIrq;
838 STAMCOUNTER StatSwitchToR3;
839
840 STAMCOUNTER StatTSCOffset;
841 STAMCOUNTER StatTSCIntercept;
842 STAMCOUNTER StatTSCInterceptOverFlow;
843
844 STAMCOUNTER StatExitReasonNPF;
845 STAMCOUNTER StatDRxArmed;
846 STAMCOUNTER StatDRxContextSwitch;
847 STAMCOUNTER StatDRxIOCheck;
848
849 STAMCOUNTER StatLoadMinimal;
850 STAMCOUNTER StatLoadFull;
851
852#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
853 STAMCOUNTER StatFpu64SwitchBack;
854 STAMCOUNTER StatDebug64SwitchBack;
855#endif
856
857#ifdef VBOX_WITH_STATISTICS
858 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
859 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
860 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
861 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
862#endif
863} HWACCMCPU;
864/** Pointer to HWACCM VM instance data. */
865typedef HWACCMCPU *PHWACCMCPU;
866
867
868#ifdef IN_RING0
869
870VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void);
871VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
872
873
874#ifdef VBOX_STRICT
875VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
876VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
877#else
878# define HWACCMDumpRegs(a, b ,c) do { } while (0)
879# define HWACCMR0DumpDescriptor(a, b, c) do { } while (0)
880#endif
881
882# ifdef VBOX_WITH_KERNEL_USING_XMM
883DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
884DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
885# endif
886
887# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
888/**
889 * Gets 64-bit GDTR and IDTR on darwin.
890 * @param pGdtr Where to store the 64-bit GDTR.
891 * @param pIdtr Where to store the 64-bit IDTR.
892 */
893DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
894
895/**
896 * Gets 64-bit CR3 on darwin.
897 * @returns CR3
898 */
899DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
900# endif
901
902#endif /* IN_RING0 */
903
904/** @} */
905
906RT_C_DECLS_END
907
908#endif
909
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette