VirtualBox

source: vbox/trunk/src/VBox/VMM/include/HWACCMInternal.h@ 41728

Last change on this file since 41728 was 41692, checked in by vboxsync, 13 years ago

DIS: Reducing the DISCPUMODE even more (200 bytes now) and making it have the same layout in all contexts. This is useful since it's used several places in the VM structure. Also a bunch of other cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 32.3 KB
Line 
1/* $Id: HWACCMInternal.h 41692 2012-06-13 19:32:54Z vboxsync $ */
2/** @file
3 * HM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HWACCMInternal_h
19#define ___HWACCMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hwaccm.h>
27#include <VBox/vmm/hwacc_vmx.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/cpum.h>
30#include <iprt/memobj.h>
31#include <iprt/cpuset.h>
32#include <iprt/mp.h>
33#include <iprt/avl.h>
34
35#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
36/* Enable 64 bits guest support. */
37# define VBOX_ENABLE_64_BITS_GUESTS
38#endif
39
40#define VMX_USE_CACHED_VMCS_ACCESSES
41#define HWACCM_VMX_EMULATE_REALMODE
42
43
44#if 0
45/* Seeing somewhat random behaviour on my Nehalem system with auto-save of guest MSRs;
46 * for some strange reason the CPU doesn't save the MSRs during the VM-exit.
47 * Clearly visible with a dual VCPU configured OpenSolaris 200906 live cd VM.
48 *
49 * Note: change the assembly files when enabling this! (remove the manual auto load/save)
50 */
51#define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
52#endif
53
54RT_C_DECLS_BEGIN
55
56
57/** @defgroup grp_hwaccm_int Internal
58 * @ingroup grp_hwaccm
59 * @internal
60 * @{
61 */
62
63
64/** Maximum number of exit reason statistics counters. */
65#define MAX_EXITREASON_STAT 0x100
66#define MASK_EXITREASON_STAT 0xff
67#define MASK_INJECT_IRQ_STAT 0xff
68
69/** @name Changed flags
70 * These flags are used to keep track of which important registers that
71 * have been changed since last they were reset.
72 * @{
73 */
74#define HWACCM_CHANGED_GUEST_FPU RT_BIT(0)
75#define HWACCM_CHANGED_GUEST_CR0 RT_BIT(1)
76#define HWACCM_CHANGED_GUEST_CR3 RT_BIT(2)
77#define HWACCM_CHANGED_GUEST_CR4 RT_BIT(3)
78#define HWACCM_CHANGED_GUEST_GDTR RT_BIT(4)
79#define HWACCM_CHANGED_GUEST_IDTR RT_BIT(5)
80#define HWACCM_CHANGED_GUEST_LDTR RT_BIT(6)
81#define HWACCM_CHANGED_GUEST_TR RT_BIT(7)
82#define HWACCM_CHANGED_GUEST_MSR RT_BIT(8)
83#define HWACCM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
84#define HWACCM_CHANGED_GUEST_DEBUG RT_BIT(10)
85#define HWACCM_CHANGED_HOST_CONTEXT RT_BIT(11)
86
87#define HWACCM_CHANGED_ALL ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
88 | HWACCM_CHANGED_GUEST_CR0 \
89 | HWACCM_CHANGED_GUEST_CR3 \
90 | HWACCM_CHANGED_GUEST_CR4 \
91 | HWACCM_CHANGED_GUEST_GDTR \
92 | HWACCM_CHANGED_GUEST_IDTR \
93 | HWACCM_CHANGED_GUEST_LDTR \
94 | HWACCM_CHANGED_GUEST_TR \
95 | HWACCM_CHANGED_GUEST_MSR \
96 | HWACCM_CHANGED_GUEST_FPU \
97 | HWACCM_CHANGED_GUEST_DEBUG \
98 | HWACCM_CHANGED_HOST_CONTEXT)
99
100#define HWACCM_CHANGED_ALL_GUEST ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
101 | HWACCM_CHANGED_GUEST_CR0 \
102 | HWACCM_CHANGED_GUEST_CR3 \
103 | HWACCM_CHANGED_GUEST_CR4 \
104 | HWACCM_CHANGED_GUEST_GDTR \
105 | HWACCM_CHANGED_GUEST_IDTR \
106 | HWACCM_CHANGED_GUEST_LDTR \
107 | HWACCM_CHANGED_GUEST_TR \
108 | HWACCM_CHANGED_GUEST_MSR \
109 | HWACCM_CHANGED_GUEST_DEBUG \
110 | HWACCM_CHANGED_GUEST_FPU)
111
112/** @} */
113
114/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
115#define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 8
116
117/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
118#define HWACCM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
119/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
120#define HWACCM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
121/** Total guest mapped memory needed. */
122#define HWACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
123
124/** Enable for TPR guest patching. */
125#define VBOX_HWACCM_WITH_GUEST_PATCHING
126
127/** HWACCM SSM version
128 */
129#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
130# define HWACCM_SSM_VERSION 5
131# define HWACCM_SSM_VERSION_NO_PATCHING 4
132#else
133# define HWACCM_SSM_VERSION 4
134# define HWACCM_SSM_VERSION_NO_PATCHING 4
135#endif
136#define HWACCM_SSM_VERSION_2_0_X 3
137
138/**
139 * Global per-cpu information. (host)
140 */
141typedef struct HMGLOBLCPUINFO
142{
143 /** The CPU ID. */
144 RTCPUID idCpu;
145 /** The memory object */
146 RTR0MEMOBJ hMemObj;
147 /** Current ASID (AMD-V) / VPID (Intel). */
148 uint32_t uCurrentASID;
149 /** TLB flush count. */
150 uint32_t cTLBFlushes;
151
152 /** Whether to flush each new ASID/VPID before use. */
153 bool fFlushASIDBeforeUse;
154
155 /** Configured for VT-x or AMD-V. */
156 bool fConfigured;
157
158 /** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
159 bool fIgnoreAMDVInUseError;
160
161 /** In use by our code. (for power suspend) */
162 volatile bool fInUse;
163} HMGLOBLCPUINFO;
164/** Pointer to the per-cpu global information. */
165typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
166
167typedef enum
168{
169 HWACCMPENDINGIO_INVALID = 0,
170 HWACCMPENDINGIO_PORT_READ,
171 HWACCMPENDINGIO_PORT_WRITE,
172 HWACCMPENDINGIO_STRING_READ,
173 HWACCMPENDINGIO_STRING_WRITE,
174 /** The usual 32-bit paranoia. */
175 HWACCMPENDINGIO_32BIT_HACK = 0x7fffffff
176} HWACCMPENDINGIO;
177
178
179typedef enum
180{
181 HWACCMTPRINSTR_INVALID,
182 HWACCMTPRINSTR_READ,
183 HWACCMTPRINSTR_READ_SHR4,
184 HWACCMTPRINSTR_WRITE_REG,
185 HWACCMTPRINSTR_WRITE_IMM,
186 HWACCMTPRINSTR_JUMP_REPLACEMENT,
187 /** The usual 32-bit paranoia. */
188 HWACCMTPRINSTR_32BIT_HACK = 0x7fffffff
189} HWACCMTPRINSTR;
190
191typedef struct
192{
193 /** The key is the address of patched instruction. (32 bits GC ptr) */
194 AVLOU32NODECORE Core;
195 /** Original opcode. */
196 uint8_t aOpcode[16];
197 /** Instruction size. */
198 uint32_t cbOp;
199 /** Replacement opcode. */
200 uint8_t aNewOpcode[16];
201 /** Replacement instruction size. */
202 uint32_t cbNewOp;
203 /** Instruction type. */
204 HWACCMTPRINSTR enmType;
205 /** Source operand. */
206 uint32_t uSrcOperand;
207 /** Destination operand. */
208 uint32_t uDstOperand;
209 /** Number of times the instruction caused a fault. */
210 uint32_t cFaults;
211 /** Patch address of the jump replacement. */
212 RTGCPTR32 pJumpTarget;
213} HWACCMTPRPATCH;
214/** Pointer to HWACCMTPRPATCH. */
215typedef HWACCMTPRPATCH *PHWACCMTPRPATCH;
216
217/**
218 * Switcher function, HC to RC.
219 *
220 * @param pVM The VM handle.
221 * @param uOffsetVMCPU VMCPU offset from pVM
222 * @returns Return code indicating the action to take.
223 */
224typedef DECLCALLBACK (int) FNHWACCMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
225/** Pointer to switcher function. */
226typedef FNHWACCMSWITCHERHC *PFNHWACCMSWITCHERHC;
227
228/**
229 * HWACCM VM Instance data.
230 * Changes to this must checked against the padding of the hwaccm union in VM!
231 */
232typedef struct HWACCM
233{
234 /** Set when we've initialized VMX or SVM. */
235 bool fInitialized;
236
237 /** Set when hardware acceleration is allowed. */
238 bool fAllowed;
239
240 /** Set if nested paging is enabled. */
241 bool fNestedPaging;
242
243 /** Set if nested paging is allowed. */
244 bool fAllowNestedPaging;
245
246 /** Set if large pages are enabled (requires nested paging). */
247 bool fLargePages;
248
249 /** Set if we can support 64-bit guests or not. */
250 bool fAllow64BitGuests;
251
252 /** Set if an IO-APIC is configured for this VM. */
253 bool fHasIoApic;
254
255 /** Set when TPR patching is allowed. */
256 bool fTRPPatchingAllowed;
257
258 /** Set when we initialize VT-x or AMD-V once for all CPUs. */
259 bool fGlobalInit;
260
261 /** Set when TPR patching is active. */
262 bool fTPRPatchingActive;
263 bool u8Alignment[6];
264
265 /** And mask for copying register contents. */
266 uint64_t u64RegisterMask;
267
268 /** Maximum ASID allowed. */
269 uint32_t uMaxASID;
270
271 /** The maximum number of resumes loops allowed in ring-0 (safety precaution).
272 * This number is set much higher when RTThreadPreemptIsPending is reliable. */
273 uint32_t cMaxResumeLoops;
274
275 /** Guest allocated memory for patching purposes. */
276 RTGCPTR pGuestPatchMem;
277 /** Current free pointer inside the patch block. */
278 RTGCPTR pFreeGuestPatchMem;
279 /** Size of the guest patch memory block. */
280 uint32_t cbGuestPatchMem;
281 uint32_t uPadding1;
282
283#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
284 /** 32 to 64 bits switcher entrypoint. */
285 R0PTRTYPE(PFNHWACCMSWITCHERHC) pfnHost32ToGuest64R0;
286
287 /* AMD-V 64 bits vmrun handler */
288 RTRCPTR pfnSVMGCVMRun64;
289
290 /* VT-x 64 bits vmlaunch handler */
291 RTRCPTR pfnVMXGCStartVM64;
292
293 /* RC handler to setup the 64 bits FPU state. */
294 RTRCPTR pfnSaveGuestFPU64;
295
296 /* RC handler to setup the 64 bits debug state. */
297 RTRCPTR pfnSaveGuestDebug64;
298
299 /* Test handler */
300 RTRCPTR pfnTest64;
301
302 RTRCPTR uAlignment[2];
303/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
304 uint32_t u32Alignment[1]; */
305#endif
306
307 struct
308 {
309 /** Set by the ring-0 side of HWACCM to indicate VMX is supported by the
310 * CPU. */
311 bool fSupported;
312
313 /** Set when we've enabled VMX. */
314 bool fEnabled;
315
316 /** Set if VPID is supported. */
317 bool fVPID;
318
319 /** Set if VT-x VPID is allowed. */
320 bool fAllowVPID;
321
322 /** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
323 bool fUnrestrictedGuest;
324
325 /** Whether we're using the preemption timer or not. */
326 bool fUsePreemptTimer;
327 /** The shift mask employed by the VMX-Preemption timer. */
328 uint8_t cPreemptTimerShift;
329
330 bool uAlignment[1];
331
332 /** Virtual address of the TSS page used for real mode emulation. */
333 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
334
335 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
336 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
337
338 /** R0 memory object for the APIC physical page (serves for filtering accesses). */
339 RTR0MEMOBJ pMemObjAPIC;
340 /** Physical address of the APIC physical page (serves for filtering accesses). */
341 RTHCPHYS pAPICPhys;
342 /** Virtual address of the APIC physical page (serves for filtering accesses). */
343 R0PTRTYPE(uint8_t *) pAPIC;
344
345 /** R0 memory object for the MSR entry load page (guest MSRs). */
346 RTR0MEMOBJ pMemObjMSREntryLoad;
347 /** Physical address of the MSR entry load page (guest MSRs). */
348 RTHCPHYS pMSREntryLoadPhys;
349 /** Virtual address of the MSR entry load page (guest MSRs). */
350 R0PTRTYPE(uint8_t *) pMSREntryLoad;
351
352#ifdef VBOX_WITH_CRASHDUMP_MAGIC
353 RTR0MEMOBJ pMemObjScratch;
354 RTHCPHYS pScratchPhys;
355 R0PTRTYPE(uint8_t *) pScratch;
356#endif
357 /** R0 memory object for the MSR exit store page (guest MSRs). */
358 RTR0MEMOBJ pMemObjMSRExitStore;
359 /** Physical address of the MSR exit store page (guest MSRs). */
360 RTHCPHYS pMSRExitStorePhys;
361 /** Virtual address of the MSR exit store page (guest MSRs). */
362 R0PTRTYPE(uint8_t *) pMSRExitStore;
363
364 /** R0 memory object for the MSR exit load page (host MSRs). */
365 RTR0MEMOBJ pMemObjMSRExitLoad;
366 /** Physical address of the MSR exit load page (host MSRs). */
367 RTHCPHYS pMSRExitLoadPhys;
368 /** Virtual address of the MSR exit load page (host MSRs). */
369 R0PTRTYPE(uint8_t *) pMSRExitLoad;
370
371 /** Ring 0 handlers for VT-x. */
372 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
373
374#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
375 uint32_t u32Alignment;
376#endif
377 /** Host CR4 value (set by ring-0 VMX init) */
378 uint64_t hostCR4;
379
380 /** Host EFER value (set by ring-0 VMX init) */
381 uint64_t hostEFER;
382
383 /** VMX MSR values */
384 struct
385 {
386 uint64_t feature_ctrl;
387 uint64_t vmx_basic_info;
388 VMX_CAPABILITY vmx_pin_ctls;
389 VMX_CAPABILITY vmx_proc_ctls;
390 VMX_CAPABILITY vmx_proc_ctls2;
391 VMX_CAPABILITY vmx_exit;
392 VMX_CAPABILITY vmx_entry;
393 uint64_t vmx_misc;
394 uint64_t vmx_cr0_fixed0;
395 uint64_t vmx_cr0_fixed1;
396 uint64_t vmx_cr4_fixed0;
397 uint64_t vmx_cr4_fixed1;
398 uint64_t vmx_vmcs_enum;
399 uint64_t vmx_eptcaps;
400 } msr;
401
402 /** Flush types for invept & invvpid; they depend on capabilities. */
403 VMX_FLUSH_EPT enmFlushEPT;
404 VMX_FLUSH_VPID enmFlushVPID;
405 } vmx;
406
407 struct
408 {
409 /** Set by the ring-0 side of HWACCM to indicate SVM is supported by the
410 * CPU. */
411 bool fSupported;
412 /** Set when we've enabled SVM. */
413 bool fEnabled;
414 /** Set if erratum 170 affects the AMD cpu. */
415 bool fAlwaysFlushTLB;
416 /** Set when the hack to ignore VERR_SVM_IN_USE is active. */
417 bool fIgnoreInUseError;
418
419 /** R0 memory object for the IO bitmap (12kb). */
420 RTR0MEMOBJ pMemObjIOBitmap;
421 /** Physical address of the IO bitmap (12kb). */
422 RTHCPHYS pIOBitmapPhys;
423 /** Virtual address of the IO bitmap. */
424 R0PTRTYPE(void *) pIOBitmap;
425
426 /* HWCR msr (for diagnostics) */
427 uint64_t msrHWCR;
428
429 /** SVM revision. */
430 uint32_t u32Rev;
431
432 /** SVM feature bits from cpuid 0x8000000a */
433 uint32_t u32Features;
434 } svm;
435
436 /**
437 * AVL tree with all patches (active or disabled) sorted by guest instruction address
438 */
439 AVLOU32TREE PatchTree;
440 uint32_t cPatches;
441 HWACCMTPRPATCH aPatches[64];
442
443 struct
444 {
445 uint32_t u32AMDFeatureECX;
446 uint32_t u32AMDFeatureEDX;
447 } cpuid;
448
449 /** Saved error from detection */
450 int32_t lLastError;
451
452 /** HWACCMR0Init was run */
453 bool fHWACCMR0Init;
454 bool u8Alignment1[7];
455
456 STAMCOUNTER StatTPRPatchSuccess;
457 STAMCOUNTER StatTPRPatchFailure;
458 STAMCOUNTER StatTPRReplaceSuccess;
459 STAMCOUNTER StatTPRReplaceFailure;
460} HWACCM;
461/** Pointer to HWACCM VM instance data. */
462typedef HWACCM *PHWACCM;
463
464/* Maximum number of cached entries. */
465#define VMCSCACHE_MAX_ENTRY 128
466
467/* Structure for storing read and write VMCS actions. */
468typedef struct VMCSCACHE
469{
470#ifdef VBOX_WITH_CRASHDUMP_MAGIC
471 /* Magic marker for searching in crash dumps. */
472 uint8_t aMagic[16];
473 uint64_t uMagic;
474 uint64_t u64TimeEntry;
475 uint64_t u64TimeSwitch;
476 uint64_t cResume;
477 uint64_t interPD;
478 uint64_t pSwitcher;
479 uint32_t uPos;
480 uint32_t idCpu;
481#endif
482 /* CR2 is saved here for EPT syncing. */
483 uint64_t cr2;
484 struct
485 {
486 uint32_t cValidEntries;
487 uint32_t uAlignment;
488 uint32_t aField[VMCSCACHE_MAX_ENTRY];
489 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
490 } Write;
491 struct
492 {
493 uint32_t cValidEntries;
494 uint32_t uAlignment;
495 uint32_t aField[VMCSCACHE_MAX_ENTRY];
496 uint64_t aFieldVal[VMCSCACHE_MAX_ENTRY];
497 } Read;
498#ifdef DEBUG
499 struct
500 {
501 RTHCPHYS HCPhysCpuPage;
502 RTHCPHYS HCPhysVMCS;
503 RTGCPTR pCache;
504 RTGCPTR pCtx;
505 } TestIn;
506 struct
507 {
508 RTHCPHYS HCPhysVMCS;
509 RTGCPTR pCache;
510 RTGCPTR pCtx;
511 uint64_t eflags;
512 uint64_t cr8;
513 } TestOut;
514 struct
515 {
516 uint64_t param1;
517 uint64_t param2;
518 uint64_t param3;
519 uint64_t param4;
520 } ScratchPad;
521#endif
522} VMCSCACHE;
523/** Pointer to VMCSCACHE. */
524typedef VMCSCACHE *PVMCSCACHE;
525
526/** VMX StartVM function. */
527typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
528/** Pointer to a VMX StartVM function. */
529typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;
530
531/** SVM VMRun function. */
532typedef DECLCALLBACK(int) FNHWACCMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
533/** Pointer to a SVM VMRun function. */
534typedef R0PTRTYPE(FNHWACCMSVMVMRUN *) PFNHWACCMSVMVMRUN;
535
536/**
537 * HWACCM VMCPU Instance data.
538 */
539typedef struct HWACCMCPU
540{
541 /** Old style FPU reporting trap mask override performed (optimization) */
542 bool fFPUOldStyleOverride;
543
544 /** Set if we don't have to flush the TLB on VM entry. */
545 bool fResumeVM;
546
547 /** Set if we need to flush the TLB during the world switch. */
548 bool fForceTLBFlush;
549
550 /** Set when we're using VT-x or AMD-V at that moment. */
551 bool fActive;
552
553 /** Set when the TLB has been checked until we return from the world switch. */
554 volatile bool fCheckedTLBFlush;
555 uint8_t bAlignment[3];
556
557 /** World switch exit counter. */
558 volatile uint32_t cWorldSwitchExits;
559
560 /** HWACCM_CHANGED_* flags. */
561 uint32_t fContextUseFlags;
562
563 /** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
564 RTCPUID idLastCpu;
565
566 /** TLB flush count */
567 uint32_t cTLBFlushes;
568
569 /** Current ASID in use by the VM */
570 uint32_t uCurrentASID;
571
572 uint32_t u32Alignment;
573
574 struct
575 {
576 /** Physical address of the VM control structure (VMCS). */
577 RTHCPHYS HCPhysVMCS;
578 /** R0 memory object for the VM control structure (VMCS). */
579 RTR0MEMOBJ hMemObjVMCS;
580 /** Virtual address of the VM control structure (VMCS). */
581 R0PTRTYPE(void *) pvVMCS;
582
583 /** Ring 0 handlers for VT-x. */
584 PFNHWACCMVMXSTARTVM pfnStartVM;
585
586#if HC_ARCH_BITS == 32
587 uint32_t u32Alignment;
588#endif
589
590 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
591 uint64_t proc_ctls;
592
593 /** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
594 uint64_t proc_ctls2;
595
596 /** Physical address of the virtual APIC page for TPR caching. */
597 RTHCPHYS HCPhysVAPIC;
598 /** R0 memory object for the virtual APIC page for TPR caching. */
599 RTR0MEMOBJ hMemObjVAPIC;
600 /** Virtual address of the virtual APIC page for TPR caching. */
601 R0PTRTYPE(uint8_t *) pbVAPIC;
602
603 /** Current CR0 mask. */
604 uint64_t cr0_mask;
605 /** Current CR4 mask. */
606 uint64_t cr4_mask;
607
608 /** Current EPTP. */
609 RTHCPHYS GCPhysEPTP;
610
611 /** Physical address of the MSR bitmap (1 page). */
612 RTHCPHYS pMSRBitmapPhys;
613 /** R0 memory object for the MSR bitmap (1 page). */
614 RTR0MEMOBJ pMemObjMSRBitmap;
615 /** Virtual address of the MSR bitmap (1 page). */
616 R0PTRTYPE(uint8_t *) pMSRBitmap;
617
618#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
619 /** Physical address of the guest MSR load area (1 page). */
620 RTHCPHYS pGuestMSRPhys;
621 /** R0 memory object for the guest MSR load area (1 page). */
622 RTR0MEMOBJ pMemObjGuestMSR;
623 /** Virtual address of the guest MSR load area (1 page). */
624 R0PTRTYPE(uint8_t *) pGuestMSR;
625
626 /** Physical address of the MSR load area (1 page). */
627 RTHCPHYS pHostMSRPhys;
628 /** R0 memory object for the MSR load area (1 page). */
629 RTR0MEMOBJ pMemObjHostMSR;
630 /** Virtual address of the MSR load area (1 page). */
631 R0PTRTYPE(uint8_t *) pHostMSR;
632#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
633
634 /* Number of automatically loaded/restored MSRs. */
635 uint32_t cCachedMSRs;
636 uint32_t uAlignement;
637
638 /* Last use TSC offset value. (cached) */
639 uint64_t u64TSCOffset;
640
641 /** VMCS cache. */
642 VMCSCACHE VMCSCache;
643
644 /** Real-mode emulation state. */
645 struct
646 {
647 X86EFLAGS eflags;
648 uint32_t fValid;
649 } RealMode;
650
651 struct
652 {
653 uint64_t u64VMCSPhys;
654 uint32_t ulVMCSRevision;
655 uint32_t ulInstrError;
656 uint32_t ulExitReason;
657 RTCPUID idEnteredCpu;
658 RTCPUID idCurrentCpu;
659 uint32_t padding;
660 } lasterror;
661
662 /** The last seen guest paging mode (by VT-x). */
663 PGMMODE enmLastSeenGuestMode;
664 /** Current guest paging mode (as seen by HWACCMR3PagingModeChanged). */
665 PGMMODE enmCurrGuestMode;
666 /** Previous guest paging mode (as seen by HWACCMR3PagingModeChanged). */
667 PGMMODE enmPrevGuestMode;
668 } vmx;
669
670 struct
671 {
672 /** R0 memory object for the host VM control block (VMCB). */
673 RTR0MEMOBJ pMemObjVMCBHost;
674 /** Physical address of the host VM control block (VMCB). */
675 RTHCPHYS pVMCBHostPhys;
676 /** Virtual address of the host VM control block (VMCB). */
677 R0PTRTYPE(void *) pVMCBHost;
678
679 /** R0 memory object for the VM control block (VMCB). */
680 RTR0MEMOBJ pMemObjVMCB;
681 /** Physical address of the VM control block (VMCB). */
682 RTHCPHYS pVMCBPhys;
683 /** Virtual address of the VM control block (VMCB). */
684 R0PTRTYPE(void *) pVMCB;
685
686 /** Ring 0 handlers for VT-x. */
687 PFNHWACCMSVMVMRUN pfnVMRun;
688
689 /** R0 memory object for the MSR bitmap (8kb). */
690 RTR0MEMOBJ pMemObjMSRBitmap;
691 /** Physical address of the MSR bitmap (8kb). */
692 RTHCPHYS pMSRBitmapPhys;
693 /** Virtual address of the MSR bitmap. */
694 R0PTRTYPE(void *) pMSRBitmap;
695 } svm;
696
697 /** Event injection state. */
698 struct
699 {
700 uint32_t fPending;
701 uint32_t errCode;
702 uint64_t intInfo;
703 } Event;
704
705 /** IO Block emulation state. */
706 struct
707 {
708 bool fEnabled;
709 uint8_t u8Align[7];
710
711 /** RIP at the start of the io code we wish to emulate in the recompiler. */
712 RTGCPTR GCPtrFunctionEip;
713
714 uint64_t cr0;
715 } EmulateIoBlock;
716
717 struct
718 {
719 /* Pending IO operation type. */
720 HWACCMPENDINGIO enmType;
721 uint32_t uPadding;
722 RTGCPTR GCPtrRip;
723 RTGCPTR GCPtrRipNext;
724 union
725 {
726 struct
727 {
728 unsigned uPort;
729 unsigned uAndVal;
730 unsigned cbSize;
731 } Port;
732 uint64_t aRaw[2];
733 } s;
734 } PendingIO;
735
736 /** Currently shadow paging mode. */
737 PGMMODE enmShadowMode;
738
739 /** The CPU ID of the CPU currently owning the VMCS. Set in
740 * HWACCMR0Enter and cleared in HWACCMR0Leave. */
741 RTCPUID idEnteredCpu;
742
743 /** To keep track of pending TLB shootdown pages. (SMP guest only) */
744 struct
745 {
746 RTGCPTR aPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES];
747 unsigned cPages;
748 } TlbShootdown;
749
750 /** For saving stack space, the disassembler state is allocated here instead of
751 * on the stack. */
752 DISCPUSTATE DisState;
753
754 uint32_t padding2[1];
755
756 STAMPROFILEADV StatEntry;
757 STAMPROFILEADV StatExit1;
758 STAMPROFILEADV StatExit2;
759#if 1 /* temporary for tracking down darwin issues. */
760 STAMPROFILEADV StatExit2Sub1;
761 STAMPROFILEADV StatExit2Sub2;
762 STAMPROFILEADV StatExit2Sub3;
763#endif
764 STAMPROFILEADV StatInGC;
765
766#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
767 STAMPROFILEADV StatWorldSwitch3264;
768#endif
769 STAMPROFILEADV StatPoke;
770 STAMPROFILEADV StatSpinPoke;
771 STAMPROFILEADV StatSpinPokeFailed;
772
773 STAMCOUNTER StatIntInject;
774
775 STAMCOUNTER StatExitShadowNM;
776 STAMCOUNTER StatExitGuestNM;
777 STAMCOUNTER StatExitShadowPF;
778 STAMCOUNTER StatExitShadowPFEM;
779 STAMCOUNTER StatExitGuestPF;
780 STAMCOUNTER StatExitGuestUD;
781 STAMCOUNTER StatExitGuestSS;
782 STAMCOUNTER StatExitGuestNP;
783 STAMCOUNTER StatExitGuestGP;
784 STAMCOUNTER StatExitGuestDE;
785 STAMCOUNTER StatExitGuestDB;
786 STAMCOUNTER StatExitGuestMF;
787 STAMCOUNTER StatExitGuestBP;
788 STAMCOUNTER StatExitGuestXF;
789 STAMCOUNTER StatExitGuestXcpUnk;
790 STAMCOUNTER StatExitInvpg;
791 STAMCOUNTER StatExitInvd;
792 STAMCOUNTER StatExitCpuid;
793 STAMCOUNTER StatExitRdtsc;
794 STAMCOUNTER StatExitRdpmc;
795 STAMCOUNTER StatExitCli;
796 STAMCOUNTER StatExitSti;
797 STAMCOUNTER StatExitPushf;
798 STAMCOUNTER StatExitPopf;
799 STAMCOUNTER StatExitIret;
800 STAMCOUNTER StatExitInt;
801 STAMCOUNTER StatExitCRxWrite[16];
802 STAMCOUNTER StatExitCRxRead[16];
803 STAMCOUNTER StatExitDRxWrite;
804 STAMCOUNTER StatExitDRxRead;
805 STAMCOUNTER StatExitRdmsr;
806 STAMCOUNTER StatExitWrmsr;
807 STAMCOUNTER StatExitCLTS;
808 STAMCOUNTER StatExitHlt;
809 STAMCOUNTER StatExitMwait;
810 STAMCOUNTER StatExitMonitor;
811 STAMCOUNTER StatExitLMSW;
812 STAMCOUNTER StatExitIOWrite;
813 STAMCOUNTER StatExitIORead;
814 STAMCOUNTER StatExitIOStringWrite;
815 STAMCOUNTER StatExitIOStringRead;
816 STAMCOUNTER StatExitIrqWindow;
817 STAMCOUNTER StatExitMaxResume;
818 STAMCOUNTER StatExitPreemptPending;
819 STAMCOUNTER StatExitMTF;
820 STAMCOUNTER StatIntReinject;
821 STAMCOUNTER StatPendingHostIrq;
822
823 STAMCOUNTER StatFlushPage;
824 STAMCOUNTER StatFlushPageManual;
825 STAMCOUNTER StatFlushPhysPageManual;
826 STAMCOUNTER StatFlushTLB;
827 STAMCOUNTER StatFlushTLBManual;
828 STAMCOUNTER StatFlushPageInvlpg;
829 STAMCOUNTER StatFlushTLBWorldSwitch;
830 STAMCOUNTER StatNoFlushTLBWorldSwitch;
831 STAMCOUNTER StatFlushTLBCRxChange;
832 STAMCOUNTER StatFlushASID;
833 STAMCOUNTER StatFlushTLBInvlpga;
834 STAMCOUNTER StatTlbShootdown;
835 STAMCOUNTER StatTlbShootdownFlush;
836
837 STAMCOUNTER StatSwitchGuestIrq;
838 STAMCOUNTER StatSwitchToR3;
839
840 STAMCOUNTER StatTSCOffset;
841 STAMCOUNTER StatTSCIntercept;
842 STAMCOUNTER StatTSCInterceptOverFlow;
843
844 STAMCOUNTER StatExitReasonNPF;
845 STAMCOUNTER StatDRxArmed;
846 STAMCOUNTER StatDRxContextSwitch;
847 STAMCOUNTER StatDRxIOCheck;
848
849 STAMCOUNTER StatLoadMinimal;
850 STAMCOUNTER StatLoadFull;
851
852#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
853 STAMCOUNTER StatFpu64SwitchBack;
854 STAMCOUNTER StatDebug64SwitchBack;
855#endif
856
857#ifdef VBOX_WITH_STATISTICS
858 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
859 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
860 R3PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqs;
861 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
862#endif
863} HWACCMCPU;
864/** Pointer to HWACCM VM instance data. */
865typedef HWACCMCPU *PHWACCMCPU;
866
867
868#ifdef IN_RING0
869
870VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void);
871VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
872
873
874#ifdef VBOX_STRICT
875VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
876VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
877#else
878# define HWACCMDumpRegs(a, b ,c) do { } while (0)
879# define HWACCMR0DumpDescriptor(a, b, c) do { } while (0)
880#endif
881
882# ifdef VBOX_WITH_KERNEL_USING_XMM
883DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
884DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
885# endif
886
887# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
888/**
889 * Gets 64-bit GDTR and IDTR on darwin.
890 * @param pGdtr Where to store the 64-bit GDTR.
891 * @param pIdtr Where to store the 64-bit IDTR.
892 */
893DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
894
895/**
896 * Gets 64-bit CR3 on darwin.
897 * @returns CR3
898 */
899DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
900# endif
901
902#endif /* IN_RING0 */
903
904/** @} */
905
906RT_C_DECLS_END
907
908#endif
909
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette