VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HWACCM.cpp@ 38990

Last change on this file since 38990 was 38838, checked in by vboxsync, 14 years ago

VMM,++: Try fix the async reset, suspend and power-off problems in PDM wrt conflicting VMM requests. Split them into priority requests and normal requests. The priority requests can safely be processed when PDM is doing async state change waits, the normal ones cannot. (The problem I bumped into was a unmap-chunk request from PGM being processed during PDMR3Reset, causing a recursive VMMR3EmtRendezvous deadlock.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 134.5 KB
Line 
1/* $Id: HWACCM.cpp 38838 2011-09-23 11:21:55Z vboxsync $ */
2/** @file
3 * HWACCM - Intel/AMD VM Hardware Support Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HWACCM
22#include <VBox/vmm/cpum.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/pdmapi.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/ssm.h>
28#include <VBox/vmm/trpm.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/patm.h>
32#include <VBox/vmm/csam.h>
33#include <VBox/vmm/selm.h>
34#include <VBox/vmm/rem.h>
35#include <VBox/vmm/hwacc_vmx.h>
36#include <VBox/vmm/hwacc_svm.h>
37#include "HWACCMInternal.h"
38#include <VBox/vmm/vm.h>
39#include <VBox/err.h>
40#include <VBox/param.h>
41
42#include <iprt/assert.h>
43#include <VBox/log.h>
44#include <iprt/asm.h>
45#include <iprt/asm-amd64-x86.h>
46#include <iprt/string.h>
47#include <iprt/env.h>
48#include <iprt/thread.h>
49
50/*******************************************************************************
51* Global Variables *
52*******************************************************************************/
53#ifdef VBOX_WITH_STATISTICS
54# define EXIT_REASON(def, val, str) #def " - " #val " - " str
55# define EXIT_REASON_NIL() NULL
56/** Exit reason descriptions for VT-x, used to describe statistics. */
57static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
58{
59 EXIT_REASON(VMX_EXIT_EXCEPTION , 0, "Exception or non-maskable interrupt (NMI)."),
60 EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ , 1, "External interrupt."),
61 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
62 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
63 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
64 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
65 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
66 EXIT_REASON(VMX_EXIT_IRQ_WINDOW , 7, "Interrupt window."),
67 EXIT_REASON_NIL(),
68 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
69 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest software attempted to execute CPUID."),
70 EXIT_REASON_NIL(),
71 EXIT_REASON(VMX_EXIT_HLT , 12, "Guest software attempted to execute HLT."),
72 EXIT_REASON(VMX_EXIT_INVD , 13, "Guest software attempted to execute INVD."),
73 EXIT_REASON(VMX_EXIT_INVPG , 14, "Guest software attempted to execute INVPG."),
74 EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest software attempted to execute RDPMC."),
75 EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest software attempted to execute RDTSC."),
76 EXIT_REASON(VMX_EXIT_RSM , 17, "Guest software attempted to execute RSM in SMM."),
77 EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest software executed VMCALL."),
78 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest software executed VMCLEAR."),
79 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest software executed VMLAUNCH."),
80 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest software executed VMPTRLD."),
81 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest software executed VMPTRST."),
82 EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest software executed VMREAD."),
83 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest software executed VMRESUME."),
84 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest software executed VMWRITE."),
85 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest software executed VMXOFF."),
86 EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest software executed VMXON."),
87 EXIT_REASON(VMX_EXIT_CRX_MOVE , 28, "Control-register accesses."),
88 EXIT_REASON(VMX_EXIT_DRX_MOVE , 29, "Debug-register accesses."),
89 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
90 EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR. Guest software attempted to execute RDMSR."),
91 EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR. Guest software attempted to execute WRMSR."),
92 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
93 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
94 EXIT_REASON_NIL(),
95 EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest software executed MWAIT."),
96 EXIT_REASON_NIL(),
97 EXIT_REASON_NIL(),
98 EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest software attempted to execute MONITOR."),
99 EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest software attempted to execute PAUSE."),
100 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
101 EXIT_REASON_NIL(),
102 EXIT_REASON(VMX_EXIT_TPR , 43, "TPR below threshold. Guest software executed MOV to CR8."),
103 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
104 EXIT_REASON_NIL(),
105 EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
106 EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
107 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
108 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
109 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."),
110 EXIT_REASON_NIL(),
111 EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
112 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."),
113 EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD. Guest software attempted to execute WBINVD."),
114 EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV. Guest software attempted to execute XSETBV."),
115 EXIT_REASON_NIL()
116};
117/** Exit reason descriptions for AMD-V, used to describe statistics. */
118static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
119{
120 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
121 EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
122 EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
123 EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
124 EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
125 EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
126 EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
127 EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
128 EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
129 EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
130 EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
131 EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
132 EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
133 EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
134 EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
135 EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
136 EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
137 EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
138 EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
139 EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
140 EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
141 EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
142 EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
143 EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
144 EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
145 EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
146 EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
147 EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
148 EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
149 EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
150 EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
151 EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
152 EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
153 EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
154 EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
155 EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
156 EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
157 EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
158 EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
159 EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
160 EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
161 EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
162 EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
163 EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
164 EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
165 EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
166 EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
167 EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
168 EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
169 EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
170 EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
171 EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
172 EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
173 EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
174 EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
175 EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
176 EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
177 EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
178 EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
179 EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
180 EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
181 EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
182 EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
183 EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
184 EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (0x0)."),
185 EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (0x1)."),
186 EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (0x2)."),
187 EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (0x3)."),
188 EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (0x4)."),
189 EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (0x5)."),
190 EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (0x6)."),
191 EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (0x7)."),
192 EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (0x8)."),
193 EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (0x9)."),
194 EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (0xA)."),
195 EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (0xB)."),
196 EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (0xC)."),
197 EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (0xD)."),
198 EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (0xE)."),
199 EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0xF)."),
200 EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (0x10)."),
201 EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (0x11)."),
202 EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (0x12)."),
203 EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (0x13)."),
204 EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
205 EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
206 EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
207 EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
208 EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
209 EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
210 EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
211 EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
212 EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
213 EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
214 EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
215 EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
216 EXIT_REASON(SVM_EXIT_INTR , 96, "Physical maskable interrupt."),
217 EXIT_REASON(SVM_EXIT_NMI , 97, "Physical non-maskable interrupt."),
218 EXIT_REASON(SVM_EXIT_SMI , 98, "System management interrupt."),
219 EXIT_REASON(SVM_EXIT_INIT , 99, "Physical INIT signal."),
220 EXIT_REASON(SVM_EXIT_VINTR ,100, "Virtual interrupt."),
221 EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
222 EXIT_REASON(SVM_EXIT_IDTR_READ ,102, "Read IDTR"),
223 EXIT_REASON(SVM_EXIT_GDTR_READ ,103, "Read GDTR"),
224 EXIT_REASON(SVM_EXIT_LDTR_READ ,104, "Read LDTR."),
225 EXIT_REASON(SVM_EXIT_TR_READ ,105, "Read TR."),
226 EXIT_REASON(SVM_EXIT_TR_READ ,106, "Write IDTR."),
227 EXIT_REASON(SVM_EXIT_TR_READ ,107, "Write GDTR."),
228 EXIT_REASON(SVM_EXIT_TR_READ ,108, "Write LDTR."),
229 EXIT_REASON(SVM_EXIT_TR_READ ,109, "Write TR."),
230 EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
231 EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
232 EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
233 EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
234 EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
235 EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
236 EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
237 EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
238 EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
239 EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
240 EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
241 EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
242 EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
243 EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port (EXITINFO1 field provides more information)."),
244 EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
245 EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
246 EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt"),
247 EXIT_REASON(SVM_EXIT_SHUTDOWN ,127, "Shutdown."),
248 EXIT_REASON(SVM_EXIT_VMRUN ,128, "VMRUN instruction."),
249 EXIT_REASON(SVM_EXIT_VMMCALL ,129, "VMCALL instruction."),
250 EXIT_REASON(SVM_EXIT_VMLOAD ,130, "VMLOAD instruction."),
251 EXIT_REASON(SVM_EXIT_VMSAVE ,131, "VMSAVE instruction."),
252 EXIT_REASON(SVM_EXIT_STGI ,132, "STGI instruction."),
253 EXIT_REASON(SVM_EXIT_CLGI ,133, "CLGI instruction."),
254 EXIT_REASON(SVM_EXIT_SKINIT ,134, "SKINIT instruction."),
255 EXIT_REASON(SVM_EXIT_RDTSCP ,135, "RDTSCP instruction."),
256 EXIT_REASON(SVM_EXIT_ICEBP ,136, "ICEBP instruction."),
257 EXIT_REASON(SVM_EXIT_WBINVD ,137, "WBINVD instruction."),
258 EXIT_REASON(SVM_EXIT_MONITOR ,138, "MONITOR instruction."),
259 EXIT_REASON(SVM_EXIT_MWAIT_UNCOND ,139, "MWAIT instruction unconditional."),
260 EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
261 EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
262 EXIT_REASON_NIL()
263};
264# undef EXIT_REASON
265# undef EXIT_REASON_NIL
266#endif /* VBOX_WITH_STATISTICS */
267
268/*******************************************************************************
269* Internal Functions *
270*******************************************************************************/
271static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
272static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
273static int hwaccmR3InitCPU(PVM pVM);
274static int hwaccmR3InitFinalizeR0(PVM pVM);
275static int hwaccmR3TermCPU(PVM pVM);
276
277
278/**
279 * Initializes the HWACCM.
280 *
281 * @returns VBox status code.
282 * @param pVM The VM to operate on.
283 */
284VMMR3DECL(int) HWACCMR3Init(PVM pVM)
285{
286 LogFlow(("HWACCMR3Init\n"));
287
288 /*
289 * Assert alignment and sizes.
290 */
291 AssertCompileMemberAlignment(VM, hwaccm.s, 32);
292 AssertCompile(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
293
294 /* Some structure checks. */
295 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
296 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
297 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
298
299 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
300 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.TR) == 0x490, ("guest.TR offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.TR)));
301 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8CPL) == 0x4CB, ("guest.u8CPL offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8CPL)));
302 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64EFER) == 0x4D0, ("guest.u64EFER offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64EFER)));
303 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR4) == 0x548, ("guest.u64CR4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR4)));
304 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RIP) == 0x578, ("guest.u64RIP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RIP)));
305 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64RSP) == 0x5D8, ("guest.u64RSP offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64RSP)));
306 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64CR2) == 0x640, ("guest.u64CR2 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64CR2)));
307 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64GPAT) == 0x668, ("guest.u64GPAT offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64GPAT)));
308 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO) == 0x690, ("guest.u64LASTEXCPTO offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u64LASTEXCPTO)));
309 AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
310
311
312 /*
313 * Register the saved state data unit.
314 */
315 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
316 NULL, NULL, NULL,
317 NULL, hwaccmR3Save, NULL,
318 NULL, hwaccmR3Load, NULL);
319 if (RT_FAILURE(rc))
320 return rc;
321
322 /* Misc initialisation. */
323 pVM->hwaccm.s.vmx.fSupported = false;
324 pVM->hwaccm.s.svm.fSupported = false;
325 pVM->hwaccm.s.vmx.fEnabled = false;
326 pVM->hwaccm.s.svm.fEnabled = false;
327
328 pVM->hwaccm.s.fNestedPaging = false;
329 pVM->hwaccm.s.fLargePages = false;
330
331 /* Disabled by default. */
332 pVM->fHWACCMEnabled = false;
333
334 /*
335 * Check CFGM options.
336 */
337 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
338 PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
339 /* Nested paging: disabled by default. */
340 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
341 AssertRC(rc);
342
343 /* Large pages: disabled by default. */
344 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hwaccm.s.fLargePages, false);
345 AssertRC(rc);
346
347 /* VT-x VPID: disabled by default. */
348 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
349 AssertRC(rc);
350
351 /* HWACCM support must be explicitely enabled in the configuration file. */
352 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
353 AssertRC(rc);
354
355 /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
356 rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
357 AssertRC(rc);
358
359#ifdef RT_OS_DARWIN
360 if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
361#else
362 if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
363#endif
364 {
365 AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
366 VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
367 return VERR_HWACCM_CONFIG_MISMATCH;
368 }
369
370 if (VMMIsHwVirtExtForced(pVM))
371 pVM->fHWACCMEnabled = true;
372
373#if HC_ARCH_BITS == 32
374 /* 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
375 * (To use the default, don't set 64bitEnabled in CFGM.) */
376 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
377 AssertLogRelRCReturn(rc, rc);
378 if (pVM->hwaccm.s.fAllow64BitGuests)
379 {
380# ifdef RT_OS_DARWIN
381 if (!VMMIsHwVirtExtForced(pVM))
382# else
383 if (!pVM->hwaccm.s.fAllowed)
384# endif
385 return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
386 }
387#else
388 /* On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
389 * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.) */
390 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
391 AssertLogRelRCReturn(rc, rc);
392#endif
393
394
395 /** Determine the init method for AMD-V and VT-x; either one global init for each host CPU
396 * or local init each time we wish to execute guest code.
397 *
398 * Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
399 */
400 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hwaccm.s.fGlobalInit,
401#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
402 false
403#else
404 true
405#endif
406 );
407
408 /* Max number of resume loops. */
409 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
410 AssertRC(rc);
411
412 return rc;
413}
414
415/**
416 * Initializes the per-VCPU HWACCM.
417 *
418 * @returns VBox status code.
419 * @param pVM The VM to operate on.
420 */
421static int hwaccmR3InitCPU(PVM pVM)
422{
423 LogFlow(("HWACCMR3InitCPU\n"));
424
425 for (VMCPUID i = 0; i < pVM->cCpus; i++)
426 {
427 PVMCPU pVCpu = &pVM->aCpus[i];
428
429 pVCpu->hwaccm.s.fActive = false;
430 }
431
432#ifdef VBOX_WITH_STATISTICS
433 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
434 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
435 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
436 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
437
438 /*
439 * Statistics.
440 */
441 for (VMCPUID i = 0; i < pVM->cCpus; i++)
442 {
443 PVMCPU pVCpu = &pVM->aCpus[i];
444 int rc;
445
446 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",
447 "/PROF/HWACCM/CPU%d/Poke", i);
448 AssertRC(rc);
449 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",
450 "/PROF/HWACCM/CPU%d/PokeWait", i);
451 AssertRC(rc);
452 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",
453 "/PROF/HWACCM/CPU%d/PokeWaitFailed", i);
454 AssertRC(rc);
455 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
456 "/PROF/HWACCM/CPU%d/SwitchToGC", i);
457 AssertRC(rc);
458 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
459 "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
460 AssertRC(rc);
461 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
462 "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
463 AssertRC(rc);
464# if 1 /* temporary for tracking down darwin holdup. */
465 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
466 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
467 AssertRC(rc);
468 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
469 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
470 AssertRC(rc);
471 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
472 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
473 AssertRC(rc);
474# endif
475 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
476 "/PROF/HWACCM/CPU%d/InGC", i);
477 AssertRC(rc);
478
479# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
480 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
481 "/PROF/HWACCM/CPU%d/Switcher3264", i);
482 AssertRC(rc);
483# endif
484
485# define HWACCM_REG_COUNTER(a, b) \
486 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
487 AssertRC(rc);
488
489 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM, "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
490 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM, "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
491 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
492 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF, "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
493 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD, "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
494 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS, "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
495 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP, "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
496 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP, "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
497 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF, "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
498 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE, "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
499 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB, "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
500 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvpg, "/HWACCM/CPU%d/Exit/Instr/Invlpg");
501 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd, "/HWACCM/CPU%d/Exit/Instr/Invd");
502 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid");
503 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
504 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc, "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
505 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr, "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
506 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr, "/HWACCM/CPU%d/Exit/Instr/Wrmsr");
507 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait, "/HWACCM/CPU%d/Exit/Instr/Mwait");
508 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMonitor, "/HWACCM/CPU%d/Exit/Instr/Monitor");
509 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite, "/HWACCM/CPU%d/Exit/Instr/DR/Write");
510 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead, "/HWACCM/CPU%d/Exit/Instr/DR/Read");
511 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS, "/HWACCM/CPU%d/Exit/Instr/CLTS");
512 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW, "/HWACCM/CPU%d/Exit/Instr/LMSW");
513 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli, "/HWACCM/CPU%d/Exit/Instr/Cli");
514 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti, "/HWACCM/CPU%d/Exit/Instr/Sti");
515 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf, "/HWACCM/CPU%d/Exit/Instr/Pushf");
516 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf, "/HWACCM/CPU%d/Exit/Instr/Popf");
517 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret, "/HWACCM/CPU%d/Exit/Instr/Iret");
518 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt, "/HWACCM/CPU%d/Exit/Instr/Int");
519 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt, "/HWACCM/CPU%d/Exit/Instr/Hlt");
520 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite, "/HWACCM/CPU%d/Exit/IO/Write");
521 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead, "/HWACCM/CPU%d/Exit/IO/Read");
522 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite, "/HWACCM/CPU%d/Exit/IO/WriteString");
523 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead, "/HWACCM/CPU%d/Exit/IO/ReadString");
524 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow");
525 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume");
526 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending, "/HWACCM/CPU%d/Exit/PreemptPending");
527
528 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending");
529 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3, "/HWACCM/CPU%d/Switch/ToR3");
530
531 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject, "/HWACCM/CPU%d/Irq/Inject");
532 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject, "/HWACCM/CPU%d/Irq/Reinject");
533 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq, "/HWACCM/CPU%d/Irq/PendingOnHost");
534
535 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPage, "/HWACCM/CPU%d/Flush/Page");
536 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual, "/HWACCM/CPU%d/Flush/Page/Virt");
537 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual, "/HWACCM/CPU%d/Flush/Page/Phys");
538 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLB, "/HWACCM/CPU%d/Flush/TLB");
539 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual, "/HWACCM/CPU%d/Flush/TLB/Manual");
540 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange, "/HWACCM/CPU%d/Flush/TLB/CRx");
541 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg, "/HWACCM/CPU%d/Flush/Page/Invlpg");
542 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Switch");
543 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Skipped");
544 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID");
545 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
546 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/Shootdown/Page");
547 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush, "/HWACCM/CPU%d/Flush/Shootdown/TLB");
548
549 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset");
550 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept");
551 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow, "/HWACCM/CPU%d/TSC/InterceptOverflow");
552
553 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed, "/HWACCM/CPU%d/Debug/Armed");
554 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch, "/HWACCM/CPU%d/Debug/ContextSwitch");
555 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck, "/HWACCM/CPU%d/Debug/IOCheck");
556
557 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadMinimal, "/HWACCM/CPU%d/Load/Minimal");
558 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadFull, "/HWACCM/CPU%d/Load/Full");
559
560#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
561 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFpu64SwitchBack, "/HWACCM/CPU%d/Switch64/Fpu");
562 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDebug64SwitchBack, "/HWACCM/CPU%d/Switch64/Debug");
563#endif
564
565 for (unsigned j=0;j<RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite);j++)
566 {
567 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
568 "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
569 AssertRC(rc);
570 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
571 "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
572 AssertRC(rc);
573 }
574
575#undef HWACCM_REG_COUNTER
576
577 pVCpu->hwaccm.s.paStatExitReason = NULL;
578
579 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
580 AssertRC(rc);
581 if (RT_SUCCESS(rc))
582 {
583 const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
584 for (int j=0;j<MAX_EXITREASON_STAT;j++)
585 {
586 if (papszDesc[j])
587 {
588 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
589 papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
590 AssertRC(rc);
591 }
592 }
593 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
594 AssertRC(rc);
595 }
596 pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
597# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
598 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
599# else
600 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
601# endif
602
603 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);
604 AssertRCReturn(rc, rc);
605 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
606# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
607 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
608# else
609 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
610# endif
611 for (unsigned j = 0; j < 255; j++)
612 STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
613 (j < 0x20) ? "/HWACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);
614
615 }
616#endif /* VBOX_WITH_STATISTICS */
617
618#ifdef VBOX_WITH_CRASHDUMP_MAGIC
619 /* Magic marker for searching in crash dumps. */
620 for (VMCPUID i = 0; i < pVM->cCpus; i++)
621 {
622 PVMCPU pVCpu = &pVM->aCpus[i];
623
624 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
625 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
626 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
627 }
628#endif
629 return VINF_SUCCESS;
630}
631
632/**
633 * Called when a init phase has completed.
634 *
635 * @returns VBox status code.
636 * @param pVM The VM.
637 * @param enmWhat The phase that completed.
638 */
639VMMR3_INT_DECL(int) HWACCMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
640{
641 switch (enmWhat)
642 {
643 case VMINITCOMPLETED_RING3:
644 return hwaccmR3InitCPU(pVM);
645 case VMINITCOMPLETED_RING0:
646 return hwaccmR3InitFinalizeR0(pVM);
647 default:
648 return VINF_SUCCESS;
649 }
650}
651
652/**
653 * Turns off normal raw mode features
654 *
655 * @param pVM The VM to operate on.
656 */
657static void hwaccmR3DisableRawMode(PVM pVM)
658{
659 /* Disable PATM & CSAM. */
660 PATMR3AllowPatching(pVM, false);
661 CSAMDisableScanning(pVM);
662
663 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
664 SELMR3DisableMonitoring(pVM);
665 TRPMR3DisableMonitoring(pVM);
666
667 /* Disable the switcher code (safety precaution). */
668 VMMR3DisableSwitcher(pVM);
669
670 /* Disable mapping of the hypervisor into the shadow page table. */
671 PGMR3MappingsDisable(pVM);
672
673 /* Disable the switcher */
674 VMMR3DisableSwitcher(pVM);
675
676 /* Reinit the paging mode to force the new shadow mode. */
677 for (VMCPUID i = 0; i < pVM->cCpus; i++)
678 {
679 PVMCPU pVCpu = &pVM->aCpus[i];
680
681 PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
682 }
683}
684
685/**
686 * Initialize VT-x or AMD-V.
687 *
688 * @returns VBox status code.
689 * @param pVM The VM handle.
690 */
691static int hwaccmR3InitFinalizeR0(PVM pVM)
692{
693 int rc;
694
695 /* Hack to allow users to work around broken BIOSes that incorrectly set EFER.SVME, which makes us believe somebody else
696 * is already using AMD-V.
697 */
698 if ( !pVM->hwaccm.s.vmx.fSupported
699 && !pVM->hwaccm.s.svm.fSupported
700 && pVM->hwaccm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
701 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
702 {
703 LogRel(("HWACCM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
704 pVM->hwaccm.s.svm.fSupported = true;
705 pVM->hwaccm.s.svm.fIgnoreInUseError = true;
706 }
707 else
708 if ( !pVM->hwaccm.s.vmx.fSupported
709 && !pVM->hwaccm.s.svm.fSupported)
710 {
711 LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
712 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
713
714 if (VMMIsHwVirtExtForced(pVM))
715 {
716 switch (pVM->hwaccm.s.lLastError)
717 {
718 case VERR_VMX_NO_VMX:
719 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
720 case VERR_VMX_IN_VMX_ROOT_MODE:
721 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
722 case VERR_SVM_IN_USE:
723 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
724 case VERR_SVM_NO_SVM:
725 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
726 case VERR_SVM_DISABLED:
727 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
728 default:
729 return pVM->hwaccm.s.lLastError;
730 }
731 }
732 return VINF_SUCCESS;
733 }
734
735 if (pVM->hwaccm.s.vmx.fSupported)
736 {
737 rc = SUPR3QueryVTxSupported();
738 if (RT_FAILURE(rc))
739 {
740#ifdef RT_OS_LINUX
741 LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
742#else
743 LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
744#endif
745 if ( pVM->cCpus > 1
746 || VMMIsHwVirtExtForced(pVM))
747 return rc;
748
749 /* silently fall back to raw mode */
750 return VINF_SUCCESS;
751 }
752 }
753
754 if (!pVM->hwaccm.s.fAllowed)
755 return VINF_SUCCESS; /* nothing to do */
756
757 /* Enable VT-x or AMD-V on all host CPUs. */
758 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_ENABLE, 0, NULL);
759 if (RT_FAILURE(rc))
760 {
761 LogRel(("HWACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
762 return rc;
763 }
764 Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
765
766 pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
767 /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
768 if (!pVM->hwaccm.s.fHasIoApic)
769 {
770 Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
771 pVM->hwaccm.s.fTRPPatchingAllowed = false;
772 }
773
774 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
775 if (pVM->hwaccm.s.vmx.fSupported)
776 {
777 Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
778
779 if ( pVM->hwaccm.s.fInitialized == false
780 && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
781 {
782 uint64_t val;
783 RTGCPHYS GCPhys = 0;
784
785 LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
786 LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
787 LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
788 LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
789 LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
790 LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
791 LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
792 LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
793
794 LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
795 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
796 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
797 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
798 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
799 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
800 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
801 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
802 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
803 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
804 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
805 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
806 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
807 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
808 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
809 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
810 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
811 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
812 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
813
814 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
815 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
816 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
817 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
818 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
819 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
820 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
821 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
822 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
823 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
824 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
825 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
826 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
827 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
828 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
829 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
830 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
831 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
832 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
833 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
834 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
835 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
836 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
837 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
838 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
839 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
840 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
841 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
842 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
843 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
844 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
845 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
846 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
847 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
848 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
849 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
850 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
851 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
852 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
853 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
854 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
855 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
856 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
857 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
858
859 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
860 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
861 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
862 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
863 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
864 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
865 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
866 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
867 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
868 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
869 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
870 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
871 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
872 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
873 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
874 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
875 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
876 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
877 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
878 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
879 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
880 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
881 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
882 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
883 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
884 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
885 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
886 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
887 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
888 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
889 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
890 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
891 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
892 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
893 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
894 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
895 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
896 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
897 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
898 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
899 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
900 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
901 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
902
903 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
904 {
905 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
906 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
907 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
908 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
909 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
910 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
911 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
912 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
913 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
914 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT\n"));
915 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
916 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
917 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
918 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
919 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
920 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
921 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
922 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
923 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
924 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
925
926 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
927 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
928 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
929 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
930 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
931 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
932 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT *must* be set\n"));
933 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
934 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
935 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
936 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
937 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
938 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
939 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
940 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
941 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
942 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
943 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
944 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
945 }
946
947 LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
948 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
949 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
950 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
951 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
952 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
953 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
954 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
955 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
956 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
957 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
958 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
959 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
960 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
961 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
962 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
963 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
964 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
965 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
966 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
967 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
968 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
969 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
970 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
971 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
972 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
973 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
974 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
975 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
976 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
977 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
978
979 LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
980 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
981 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
982 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
983 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
984 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
985 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
986 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
987 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
988 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
989 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
990 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
991 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
992 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
993 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
994 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
995 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
996 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
997 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
998 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
999 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
1000 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
1001 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
1002 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
1003 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
1004 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
1005 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
1006 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
1007 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
1008 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
1009 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
1010 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
1011 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
1012 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
1013 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
1014
1015 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
1016 {
1017 LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
1018
1019 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
1020 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
1021 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
1022 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
1023 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
1024 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
1025 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
1026 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
1027 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
1028 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
1029 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
1030 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
1031 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
1032 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
1033 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
1034 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
1035 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
1036 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
1037 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
1038 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
1039 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
1040 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
1041 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
1042 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
1043 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
1044 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
1045 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
1046 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
1047 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
1048 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
1049 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
1050 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
1051 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
1052 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
1053 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
1054 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
1055 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV)
1056 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV\n"));
1057 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
1058 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT\n"));
1059 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL)
1060 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL\n"));
1061 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
1062 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
1063 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV)
1064 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV\n"));
1065 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
1066 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT\n"));
1067 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL)
1068 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL\n"));
1069 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL)
1070 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL\n"));
1071 }
1072
1073 LogRel(("HWACCM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
1074 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc) == pVM->hwaccm.s.vmx.cPreemptTimerShift)
1075 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1076 else
1077 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift));
1078 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1079 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1080 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1081 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
1082
1083 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
1084 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
1085 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
1086 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
1087 LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
1088
1089 LogRel(("HWACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
1090
1091 /* Paranoia */
1092 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
1093
1094 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1095 {
1096 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
1097 LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));
1098 }
1099
1100#ifdef HWACCM_VTX_WITH_EPT
1101 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
1102 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1103#endif /* HWACCM_VTX_WITH_EPT */
1104#ifdef HWACCM_VTX_WITH_VPID
1105 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
1106 && !pVM->hwaccm.s.fNestedPaging) /* VPID and EPT are mutually exclusive. */
1107 pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
1108#endif /* HWACCM_VTX_WITH_VPID */
1109
1110 /* Unrestricted guest execution relies on EPT. */
1111 if ( pVM->hwaccm.s.fNestedPaging
1112 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
1113 {
1114 pVM->hwaccm.s.vmx.fUnrestrictedGuest = true;
1115 }
1116
1117 /* Only try once. */
1118 pVM->hwaccm.s.fInitialized = true;
1119
1120 if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
1121 {
1122 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1123 rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
1124 if (RT_SUCCESS(rc))
1125 {
1126 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
1127 ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
1128 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
1129 /* Bit set to 0 means redirection enabled. */
1130 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
1131 /* Allow all port IO, so the VT-x IO intercepts do their job. */
1132 memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
1133 *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
1134
1135 /* Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
1136 * real and protected mode without paging with EPT.
1137 */
1138 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1139 for (unsigned i=0;i<X86_PG_ENTRIES;i++)
1140 {
1141 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1142 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
1143 }
1144
1145 /* We convert it here every time as pci regions could be reconfigured. */
1146 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
1147 AssertRC(rc);
1148 LogRel(("HWACCM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
1149
1150 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1151 AssertRC(rc);
1152 LogRel(("HWACCM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
1153 }
1154 else
1155 {
1156 LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1157 pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
1158 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1159 }
1160 }
1161
1162 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1163 AssertRC(rc);
1164 if (rc == VINF_SUCCESS)
1165 {
1166 pVM->fHWACCMEnabled = true;
1167 pVM->hwaccm.s.vmx.fEnabled = true;
1168 hwaccmR3DisableRawMode(pVM);
1169
1170 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1171#ifdef VBOX_ENABLE_64_BITS_GUESTS
1172 if (pVM->hwaccm.s.fAllow64BitGuests)
1173 {
1174 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1175 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1176 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1177 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1178 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1179 }
1180 else
1181 /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE (we reuse the host EFER in the switcher) */
1182 /* Todo: this needs to be fixed properly!! */
1183 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
1184 && (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
1185 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1186
1187 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1188 ? "HWACCM: 32-bit and 64-bit guests supported.\n"
1189 : "HWACCM: 32-bit guests supported.\n"));
1190#else
1191 LogRel(("HWACCM: 32-bit guests supported.\n"));
1192#endif
1193 LogRel(("HWACCM: VMX enabled!\n"));
1194 if (pVM->hwaccm.s.fNestedPaging)
1195 {
1196 LogRel(("HWACCM: Enabled nested paging\n"));
1197 LogRel(("HWACCM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
1198 if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
1199 LogRel(("HWACCM: Unrestricted guest execution enabled!\n"));
1200
1201#if HC_ARCH_BITS == 64
1202 if (pVM->hwaccm.s.fLargePages)
1203 {
1204 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1205 PGMSetLargePageUsage(pVM, true);
1206 LogRel(("HWACCM: Large page support enabled!\n"));
1207 }
1208#endif
1209 }
1210 else
1211 Assert(!pVM->hwaccm.s.vmx.fUnrestrictedGuest);
1212
1213 if (pVM->hwaccm.s.vmx.fVPID)
1214 LogRel(("HWACCM: Enabled VPID\n"));
1215
1216 if ( pVM->hwaccm.s.fNestedPaging
1217 || pVM->hwaccm.s.vmx.fVPID)
1218 {
1219 LogRel(("HWACCM: enmFlushPage %d\n", pVM->hwaccm.s.vmx.enmFlushPage));
1220 LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext));
1221 }
1222
1223 /* TPR patching status logging. */
1224 if (pVM->hwaccm.s.fTRPPatchingAllowed)
1225 {
1226 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1227 && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
1228 {
1229 pVM->hwaccm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */
1230 LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
1231 }
1232 else
1233 {
1234 uint32_t u32Eax, u32Dummy;
1235
1236 /* TPR patching needs access to the MSR_K8_LSTAR msr. */
1237 ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
1238 if ( u32Eax < 0x80000001
1239 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1240 {
1241 pVM->hwaccm.s.fTRPPatchingAllowed = false;
1242 LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n"));
1243 }
1244 }
1245 }
1246 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1247
1248 /*
1249 * Check for preemption timer config override and log the state of it.
1250 */
1251 if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
1252 {
1253 PCFGMNODE pCfgHwAccM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HWACCM");
1254 int rc2 = CFGMR3QueryBoolDef(pCfgHwAccM, "UsePreemptTimer", &pVM->hwaccm.s.vmx.fUsePreemptTimer, true);
1255 AssertLogRelRC(rc2);
1256 }
1257 if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
1258 LogRel(("HWACCM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hwaccm.s.vmx.cPreemptTimerShift));
1259 }
1260 else
1261 {
1262 LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
1263 LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
1264 pVM->fHWACCMEnabled = false;
1265 }
1266 }
1267 }
1268 else
1269 if (pVM->hwaccm.s.svm.fSupported)
1270 {
1271 Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
1272
1273 if (pVM->hwaccm.s.fInitialized == false)
1274 {
1275 /* Erratum 170 which requires a forced TLB flush for each world switch:
1276 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
1277 *
1278 * All BH-G1/2 and DH-G1/2 models include a fix:
1279 * Athlon X2: 0x6b 1/2
1280 * 0x68 1/2
1281 * Athlon 64: 0x7f 1
1282 * 0x6f 2
1283 * Sempron: 0x7f 1/2
1284 * 0x6f 2
1285 * 0x6c 2
1286 * 0x7c 2
1287 * Turion 64: 0x68 2
1288 *
1289 */
1290 uint32_t u32Dummy;
1291 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
1292 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
1293 u32BaseFamily= (u32Version >> 8) & 0xf;
1294 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
1295 u32Model = ((u32Version >> 4) & 0xf);
1296 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
1297 u32Stepping = u32Version & 0xf;
1298 if ( u32Family == 0xf
1299 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
1300 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
1301 {
1302 LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
1303 }
1304
1305 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
1306 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
1307 LogRel(("HWACCM: AMD HWCR MSR = %RX64\n", pVM->hwaccm.s.svm.msrHWCR));
1308 LogRel(("HWACCM: AMD-V revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
1309 LogRel(("HWACCM: AMD-V max ASID = %d\n", pVM->hwaccm.s.uMaxASID));
1310 LogRel(("HWACCM: AMD-V features = %X\n", pVM->hwaccm.s.svm.u32Features));
1311 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1312 {
1313#define FLAG_NAME(a_Define) { a_Define, #a_Define }
1314 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1315 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1316 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1317 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1318 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1319 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1320 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1321 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST),
1322 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE),
1323 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1324 FLAG_NAME(AMD_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1325#undef FLAG_NAME
1326 };
1327 uint32_t fSvmFeatures = pVM->hwaccm.s.svm.u32Features;
1328 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1329 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1330 {
1331 LogRel(("HWACCM: %s\n", s_aSvmFeatures[i].pszName));
1332 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1333 }
1334 if (fSvmFeatures)
1335 for (unsigned iBit = 0; iBit < 32; iBit++)
1336 if (RT_BIT_32(iBit) & fSvmFeatures)
1337 LogRel(("HWACCM: Reserved bit %u\n", iBit));
1338
1339 /* Only try once. */
1340 pVM->hwaccm.s.fInitialized = true;
1341
1342 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1343 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1344
1345 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1346 AssertRC(rc);
1347 if (rc == VINF_SUCCESS)
1348 {
1349 pVM->fHWACCMEnabled = true;
1350 pVM->hwaccm.s.svm.fEnabled = true;
1351
1352 if (pVM->hwaccm.s.fNestedPaging)
1353 {
1354 LogRel(("HWACCM: Enabled nested paging\n"));
1355#if HC_ARCH_BITS == 64
1356 if (pVM->hwaccm.s.fLargePages)
1357 {
1358 /* Use large (2 MB) pages for our nested paging PDEs where possible. */
1359 PGMSetLargePageUsage(pVM, true);
1360 LogRel(("HWACCM: Large page support enabled!\n"));
1361 }
1362#endif
1363 }
1364
1365 hwaccmR3DisableRawMode(pVM);
1366 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1367 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1368 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1369#ifdef VBOX_ENABLE_64_BITS_GUESTS
1370 if (pVM->hwaccm.s.fAllow64BitGuests)
1371 {
1372 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1373 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1374 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1375 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1376 }
1377 else
1378 /* Turn on NXE if PAE has been enabled. */
1379 if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1380 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1381#endif
1382
1383 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1384 ? "HWACCM: 32-bit and 64-bit guest supported.\n"
1385 : "HWACCM: 32-bit guest supported.\n"));
1386
1387 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1388 }
1389 else
1390 {
1391 pVM->fHWACCMEnabled = false;
1392 }
1393 }
1394 }
1395 if (pVM->fHWACCMEnabled)
1396 LogRel(("HWACCM: VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
1397 RTLogRelSetBuffering(fOldBuffered);
1398 return VINF_SUCCESS;
1399}
1400
1401/**
1402 * Applies relocations to data and code managed by this
1403 * component. This function will be called at init and
1404 * whenever the VMM need to relocate it self inside the GC.
1405 *
1406 * @param pVM The VM.
1407 */
1408VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
1409{
1410 Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1411
1412 /* Fetch the current paging mode during the relocate callback during state loading. */
1413 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1414 {
1415 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1416 {
1417 PVMCPU pVCpu = &pVM->aCpus[i];
1418
1419 pVCpu->hwaccm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1420 Assert(pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
1421 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);
1422 }
1423 }
1424#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1425 if (pVM->fHWACCMEnabled)
1426 {
1427 int rc;
1428
1429 switch(PGMGetHostMode(pVM))
1430 {
1431 case PGMMODE_32_BIT:
1432 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1433 break;
1434
1435 case PGMMODE_PAE:
1436 case PGMMODE_PAE_NX:
1437 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1438 break;
1439
1440 default:
1441 AssertFailed();
1442 break;
1443 }
1444 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
1445 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
1446
1447 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hwaccm.s.pfnSVMGCVMRun64);
1448 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
1449
1450 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestFPU64", &pVM->hwaccm.s.pfnSaveGuestFPU64);
1451 AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
1452
1453 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestDebug64", &pVM->hwaccm.s.pfnSaveGuestDebug64);
1454 AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
1455
1456# ifdef DEBUG
1457 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMTestSwitcher64", &pVM->hwaccm.s.pfnTest64);
1458 AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
1459# endif
1460 }
1461#endif
1462 return;
1463}
1464
1465/**
1466 * Checks hardware accelerated raw mode is allowed.
1467 *
1468 * @returns boolean
1469 * @param pVM The VM to operate on.
1470 */
1471VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
1472{
1473 return pVM->hwaccm.s.fAllowed;
1474}
1475
1476/**
1477 * Notification callback which is called whenever there is a chance that a CR3
1478 * value might have changed.
1479 *
1480 * This is called by PGM.
1481 *
1482 * @param pVM The VM to operate on.
1483 * @param pVCpu The VMCPU to operate on.
1484 * @param enmShadowMode New shadow paging mode.
1485 * @param enmGuestMode New guest paging mode.
1486 */
1487VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1488{
1489 /* Ignore page mode changes during state loading. */
1490 if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
1491 return;
1492
1493 pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
1494
1495 if ( pVM->hwaccm.s.vmx.fEnabled
1496 && pVM->fHWACCMEnabled)
1497 {
1498 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1499 && enmGuestMode >= PGMMODE_PROTECTED)
1500 {
1501 PCPUMCTX pCtx;
1502
1503 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1504
1505 /* After a real mode switch to protected mode we must force
1506 * CPL to 0. Our real mode emulation had to set it to 3.
1507 */
1508 pCtx->ssHid.Attr.n.u2Dpl = 0;
1509 }
1510 }
1511
1512 if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
1513 {
1514 /* Keep track of paging mode changes. */
1515 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
1516 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
1517
1518 /* Did we miss a change, because all code was executed in the recompiler? */
1519 if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
1520 {
1521 Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
1522 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
1523 }
1524 }
1525
1526 /* Reset the contents of the read cache. */
1527 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1528 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1529 pCache->Read.aFieldVal[j] = 0;
1530}
1531
1532/**
1533 * Terminates the HWACCM.
1534 *
1535 * Termination means cleaning up and freeing all resources,
1536 * the VM it self is at this point powered off or suspended.
1537 *
1538 * @returns VBox status code.
1539 * @param pVM The VM to operate on.
1540 */
1541VMMR3DECL(int) HWACCMR3Term(PVM pVM)
1542{
1543 if (pVM->hwaccm.s.vmx.pRealModeTSS)
1544 {
1545 PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
1546 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
1547 }
1548 hwaccmR3TermCPU(pVM);
1549 return 0;
1550}
1551
1552/**
1553 * Terminates the per-VCPU HWACCM.
1554 *
1555 * @returns VBox status code.
1556 * @param pVM The VM to operate on.
1557 */
1558static int hwaccmR3TermCPU(PVM pVM)
1559{
1560 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1561 {
1562 PVMCPU pVCpu = &pVM->aCpus[i];
1563
1564#ifdef VBOX_WITH_STATISTICS
1565 if (pVCpu->hwaccm.s.paStatExitReason)
1566 {
1567 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
1568 pVCpu->hwaccm.s.paStatExitReason = NULL;
1569 pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1570 }
1571 if (pVCpu->hwaccm.s.paStatInjectedIrqs)
1572 {
1573 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
1574 pVCpu->hwaccm.s.paStatInjectedIrqs = NULL;
1575 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
1576 }
1577#endif
1578
1579#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1580 memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
1581 pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
1582 pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
1583#endif
1584 }
1585 return 0;
1586}
1587
1588/**
1589 * Resets a virtual CPU.
1590 *
1591 * Used by HWACCMR3Reset and CPU hot plugging.
1592 *
1593 * @param pVCpu The CPU to reset.
1594 */
1595VMMR3DECL(void) HWACCMR3ResetCpu(PVMCPU pVCpu)
1596{
1597 /* On first entry we'll sync everything. */
1598 pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
1599
1600 pVCpu->hwaccm.s.vmx.cr0_mask = 0;
1601 pVCpu->hwaccm.s.vmx.cr4_mask = 0;
1602
1603 pVCpu->hwaccm.s.fActive = false;
1604 pVCpu->hwaccm.s.Event.fPending = false;
1605
1606 /* Reset state information for real-mode emulation in VT-x. */
1607 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1608 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
1609 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
1610
1611 /* Reset the contents of the read cache. */
1612 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1613 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1614 pCache->Read.aFieldVal[j] = 0;
1615
1616#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1617 /* Magic marker for searching in crash dumps. */
1618 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
1619 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1620#endif
1621}
1622
1623/**
1624 * The VM is being reset.
1625 *
1626 * For the HWACCM component this means that any GDT/LDT/TSS monitors
1627 * needs to be removed.
1628 *
1629 * @param pVM VM handle.
1630 */
1631VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
1632{
1633 LogFlow(("HWACCMR3Reset:\n"));
1634
1635 if (pVM->fHWACCMEnabled)
1636 hwaccmR3DisableRawMode(pVM);
1637
1638 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1639 {
1640 PVMCPU pVCpu = &pVM->aCpus[i];
1641
1642 HWACCMR3ResetCpu(pVCpu);
1643 }
1644
1645 /* Clear all patch information. */
1646 pVM->hwaccm.s.pGuestPatchMem = 0;
1647 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1648 pVM->hwaccm.s.cbGuestPatchMem = 0;
1649 pVM->hwaccm.s.cPatches = 0;
1650 pVM->hwaccm.s.PatchTree = 0;
1651 pVM->hwaccm.s.fTPRPatchingActive = false;
1652 ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));
1653}
1654
1655/**
1656 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1657 *
1658 * @returns VBox strict status code.
1659 * @param pVM The VM handle.
1660 * @param pVCpu The VMCPU for the EMT we're being called on.
1661 * @param pvUser Unused
1662 *
1663 */
1664DECLCALLBACK(VBOXSTRICTRC) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
1665{
1666 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1667
1668 /* Only execute the handler on the VCPU the original patch request was issued. */
1669 if (pVCpu->idCpu != idCpu)
1670 return VINF_SUCCESS;
1671
1672 Log(("hwaccmR3RemovePatches\n"));
1673 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
1674 {
1675 uint8_t szInstr[15];
1676 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
1677 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
1678 int rc;
1679
1680#ifdef LOG_ENABLED
1681 char szOutput[256];
1682
1683 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1684 szOutput, sizeof(szOutput), NULL);
1685 if (RT_SUCCESS(rc))
1686 Log(("Patched instr: %s\n", szOutput));
1687#endif
1688
1689 /* Check if the instruction is still the same. */
1690 rc = PGMPhysSimpleReadGCPtr(pVCpu, szInstr, pInstrGC, pPatch->cbNewOp);
1691 if (rc != VINF_SUCCESS)
1692 {
1693 Log(("Patched code removed? (rc=%Rrc0\n", rc));
1694 continue; /* swapped out or otherwise removed; skip it. */
1695 }
1696
1697 if (memcmp(szInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
1698 {
1699 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
1700 continue; /* skip it. */
1701 }
1702
1703 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
1704 AssertRC(rc);
1705
1706#ifdef LOG_ENABLED
1707 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1708 szOutput, sizeof(szOutput), NULL);
1709 if (RT_SUCCESS(rc))
1710 Log(("Original instr: %s\n", szOutput));
1711#endif
1712 }
1713 pVM->hwaccm.s.cPatches = 0;
1714 pVM->hwaccm.s.PatchTree = 0;
1715 pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
1716 pVM->hwaccm.s.fTPRPatchingActive = false;
1717 return VINF_SUCCESS;
1718}
1719
1720/**
1721 * Enable patching in a VT-x/AMD-V guest
1722 *
1723 * @returns VBox status code.
1724 * @param pVM The VM to operate on.
1725 * @param idCpu VCPU to execute hwaccmR3RemovePatches on
1726 * @param pPatchMem Patch memory range
1727 * @param cbPatchMem Size of the memory range
1728 */
1729int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
1730{
1731 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)(uintptr_t)idCpu);
1732 AssertRC(rc);
1733
1734 pVM->hwaccm.s.pGuestPatchMem = pPatchMem;
1735 pVM->hwaccm.s.pFreeGuestPatchMem = pPatchMem;
1736 pVM->hwaccm.s.cbGuestPatchMem = cbPatchMem;
1737 return VINF_SUCCESS;
1738}
1739
1740/**
1741 * Enable patching in a VT-x/AMD-V guest
1742 *
1743 * @returns VBox status code.
1744 * @param pVM The VM to operate on.
1745 * @param pPatchMem Patch memory range
1746 * @param cbPatchMem Size of the memory range
1747 */
1748VMMR3DECL(int) HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1749{
1750 VM_ASSERT_EMT(pVM);
1751 Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1752 if (pVM->cCpus > 1)
1753 {
1754 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
1755 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
1756 (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1757 AssertRC(rc);
1758 return rc;
1759 }
1760 return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1761}
1762
1763/**
1764 * Disable patching in a VT-x/AMD-V guest
1765 *
1766 * @returns VBox status code.
1767 * @param pVM The VM to operate on.
1768 * @param pPatchMem Patch memory range
1769 * @param cbPatchMem Size of the memory range
1770 */
1771VMMR3DECL(int) HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1772{
1773 Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1774
1775 Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
1776 Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
1777
1778 /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
1779 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)(uintptr_t)VMMGetCpuId(pVM));
1780 AssertRC(rc);
1781
1782 pVM->hwaccm.s.pGuestPatchMem = 0;
1783 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1784 pVM->hwaccm.s.cbGuestPatchMem = 0;
1785 pVM->hwaccm.s.fTPRPatchingActive = false;
1786 return VINF_SUCCESS;
1787}
1788
1789
1790/**
1791 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1792 *
1793 * @returns VBox strict status code.
1794 * @param pVM The VM handle.
1795 * @param pVCpu The VMCPU for the EMT we're being called on.
1796 * @param pvUser User specified CPU context
1797 *
1798 */
1799DECLCALLBACK(VBOXSTRICTRC) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1800{
1801 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1802 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1803 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1804 unsigned cbOp;
1805
1806 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1807 if (pVCpu->idCpu != idCpu)
1808 return VINF_SUCCESS;
1809
1810 Log(("hwaccmR3ReplaceTprInstr: %RGv\n", pCtx->rip));
1811
1812 /* Two or more VCPUs were racing to patch this instruction. */
1813 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1814 if (pPatch)
1815 return VINF_SUCCESS;
1816
1817 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
1818
1819 int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1820 AssertRC(rc);
1821 if ( rc == VINF_SUCCESS
1822 && pDis->pCurInstr->opcode == OP_MOV
1823 && cbOp >= 3)
1824 {
1825 uint8_t aVMMCall[3] = { 0xf, 0x1, 0xd9};
1826 uint32_t idx = pVM->hwaccm.s.cPatches;
1827
1828 pPatch = &pVM->hwaccm.s.aPatches[idx];
1829
1830 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1831 AssertRC(rc);
1832
1833 pPatch->cbOp = cbOp;
1834
1835 if (pDis->param1.flags == USE_DISPLACEMENT32)
1836 {
1837 /* write. */
1838 if (pDis->param2.flags == USE_REG_GEN32)
1839 {
1840 pPatch->enmType = HWACCMTPRINSTR_WRITE_REG;
1841 pPatch->uSrcOperand = pDis->param2.base.reg_gen;
1842 }
1843 else
1844 {
1845 Assert(pDis->param2.flags == USE_IMMEDIATE32);
1846 pPatch->enmType = HWACCMTPRINSTR_WRITE_IMM;
1847 pPatch->uSrcOperand = pDis->param2.parval;
1848 }
1849 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1850 AssertRC(rc);
1851
1852 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1853 pPatch->cbNewOp = sizeof(aVMMCall);
1854 }
1855 else
1856 {
1857 RTGCPTR oldrip = pCtx->rip;
1858 uint32_t oldcbOp = cbOp;
1859 uint32_t uMmioReg = pDis->param1.base.reg_gen;
1860
1861 /* read */
1862 Assert(pDis->param1.flags == USE_REG_GEN32);
1863
1864 /* Found:
1865 * mov eax, dword [fffe0080] (5 bytes)
1866 * Check if next instruction is:
1867 * shr eax, 4
1868 */
1869 pCtx->rip += cbOp;
1870 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1871 pCtx->rip = oldrip;
1872 if ( rc == VINF_SUCCESS
1873 && pDis->pCurInstr->opcode == OP_SHR
1874 && pDis->param1.flags == USE_REG_GEN32
1875 && pDis->param1.base.reg_gen == uMmioReg
1876 && pDis->param2.flags == USE_IMMEDIATE8
1877 && pDis->param2.parval == 4
1878 && oldcbOp + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode))
1879 {
1880 uint8_t szInstr[15];
1881
1882 /* Replacing two instructions now. */
1883 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, oldcbOp + cbOp);
1884 AssertRC(rc);
1885
1886 pPatch->cbOp = oldcbOp + cbOp;
1887
1888 /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
1889 szInstr[0] = 0xF0;
1890 szInstr[1] = 0x0F;
1891 szInstr[2] = 0x20;
1892 szInstr[3] = 0xC0 | pDis->param1.base.reg_gen;
1893 for (unsigned i = 4; i < pPatch->cbOp; i++)
1894 szInstr[i] = 0x90; /* nop */
1895
1896 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, szInstr, pPatch->cbOp);
1897 AssertRC(rc);
1898
1899 memcpy(pPatch->aNewOpcode, szInstr, pPatch->cbOp);
1900 pPatch->cbNewOp = pPatch->cbOp;
1901
1902 Log(("Acceptable read/shr candidate!\n"));
1903 pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
1904 }
1905 else
1906 {
1907 pPatch->enmType = HWACCMTPRINSTR_READ;
1908 pPatch->uDstOperand = pDis->param1.base.reg_gen;
1909
1910 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1911 AssertRC(rc);
1912
1913 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1914 pPatch->cbNewOp = sizeof(aVMMCall);
1915 }
1916 }
1917
1918 pPatch->Core.Key = pCtx->eip;
1919 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
1920 AssertRC(rc);
1921
1922 pVM->hwaccm.s.cPatches++;
1923 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
1924 return VINF_SUCCESS;
1925 }
1926
1927 /* Save invalid patch, so we will not try again. */
1928 uint32_t idx = pVM->hwaccm.s.cPatches;
1929
1930#ifdef LOG_ENABLED
1931 char szOutput[256];
1932 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1933 szOutput, sizeof(szOutput), NULL);
1934 if (RT_SUCCESS(rc))
1935 Log(("Failed to patch instr: %s\n", szOutput));
1936#endif
1937
1938 pPatch = &pVM->hwaccm.s.aPatches[idx];
1939 pPatch->Core.Key = pCtx->eip;
1940 pPatch->enmType = HWACCMTPRINSTR_INVALID;
1941 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
1942 AssertRC(rc);
1943 pVM->hwaccm.s.cPatches++;
1944 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
1945 return VINF_SUCCESS;
1946}
1947
1948/**
1949 * Callback to patch a TPR instruction (jump to generated code)
1950 *
1951 * @returns VBox strict status code.
1952 * @param pVM The VM handle.
1953 * @param pVCpu The VMCPU for the EMT we're being called on.
1954 * @param pvUser User specified CPU context
1955 *
1956 */
1957DECLCALLBACK(VBOXSTRICTRC) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1958{
1959 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1960 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1961 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1962 unsigned cbOp;
1963 int rc;
1964#ifdef LOG_ENABLED
1965 RTGCPTR pInstr;
1966 char szOutput[256];
1967#endif
1968
1969 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1970 if (pVCpu->idCpu != idCpu)
1971 return VINF_SUCCESS;
1972
1973 Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
1974
1975 /* Two or more VCPUs were racing to patch this instruction. */
1976 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1977 if (pPatch)
1978 {
1979 Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
1980 return VINF_SUCCESS;
1981 }
1982
1983 Log(("hwaccmR3PatchTprInstr %RGv\n", pCtx->rip));
1984
1985 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1986 AssertRC(rc);
1987 if ( rc == VINF_SUCCESS
1988 && pDis->pCurInstr->opcode == OP_MOV
1989 && cbOp >= 5)
1990 {
1991 uint32_t idx = pVM->hwaccm.s.cPatches;
1992 uint8_t aPatch[64];
1993 uint32_t off = 0;
1994
1995 pPatch = &pVM->hwaccm.s.aPatches[idx];
1996
1997#ifdef LOG_ENABLED
1998 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1999 szOutput, sizeof(szOutput), NULL);
2000 if (RT_SUCCESS(rc))
2001 Log(("Original instr: %s\n", szOutput));
2002#endif
2003
2004 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2005 AssertRC(rc);
2006
2007 pPatch->cbOp = cbOp;
2008 pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
2009
2010 if (pDis->param1.flags == USE_DISPLACEMENT32)
2011 {
2012 /*
2013 * TPR write:
2014 *
2015 * push ECX [51]
2016 * push EDX [52]
2017 * push EAX [50]
2018 * xor EDX,EDX [31 D2]
2019 * mov EAX,EAX [89 C0]
2020 * or
2021 * mov EAX,0000000CCh [B8 CC 00 00 00]
2022 * mov ECX,0C0000082h [B9 82 00 00 C0]
2023 * wrmsr [0F 30]
2024 * pop EAX [58]
2025 * pop EDX [5A]
2026 * pop ECX [59]
2027 * jmp return_address [E9 return_address]
2028 *
2029 */
2030 bool fUsesEax = (pDis->param2.flags == USE_REG_GEN32 && pDis->param2.base.reg_gen == USE_REG_EAX);
2031
2032 aPatch[off++] = 0x51; /* push ecx */
2033 aPatch[off++] = 0x52; /* push edx */
2034 if (!fUsesEax)
2035 aPatch[off++] = 0x50; /* push eax */
2036 aPatch[off++] = 0x31; /* xor edx, edx */
2037 aPatch[off++] = 0xD2;
2038 if (pDis->param2.flags == USE_REG_GEN32)
2039 {
2040 if (!fUsesEax)
2041 {
2042 aPatch[off++] = 0x89; /* mov eax, src_reg */
2043 aPatch[off++] = MAKE_MODRM(3, pDis->param2.base.reg_gen, USE_REG_EAX);
2044 }
2045 }
2046 else
2047 {
2048 Assert(pDis->param2.flags == USE_IMMEDIATE32);
2049 aPatch[off++] = 0xB8; /* mov eax, immediate */
2050 *(uint32_t *)&aPatch[off] = pDis->param2.parval;
2051 off += sizeof(uint32_t);
2052 }
2053 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2054 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2055 off += sizeof(uint32_t);
2056
2057 aPatch[off++] = 0x0F; /* wrmsr */
2058 aPatch[off++] = 0x30;
2059 if (!fUsesEax)
2060 aPatch[off++] = 0x58; /* pop eax */
2061 aPatch[off++] = 0x5A; /* pop edx */
2062 aPatch[off++] = 0x59; /* pop ecx */
2063 }
2064 else
2065 {
2066 /*
2067 * TPR read:
2068 *
2069 * push ECX [51]
2070 * push EDX [52]
2071 * push EAX [50]
2072 * mov ECX,0C0000082h [B9 82 00 00 C0]
2073 * rdmsr [0F 32]
2074 * mov EAX,EAX [89 C0]
2075 * pop EAX [58]
2076 * pop EDX [5A]
2077 * pop ECX [59]
2078 * jmp return_address [E9 return_address]
2079 *
2080 */
2081 Assert(pDis->param1.flags == USE_REG_GEN32);
2082
2083 if (pDis->param1.base.reg_gen != USE_REG_ECX)
2084 aPatch[off++] = 0x51; /* push ecx */
2085 if (pDis->param1.base.reg_gen != USE_REG_EDX)
2086 aPatch[off++] = 0x52; /* push edx */
2087 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2088 aPatch[off++] = 0x50; /* push eax */
2089
2090 aPatch[off++] = 0x31; /* xor edx, edx */
2091 aPatch[off++] = 0xD2;
2092
2093 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
2094 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2095 off += sizeof(uint32_t);
2096
2097 aPatch[off++] = 0x0F; /* rdmsr */
2098 aPatch[off++] = 0x32;
2099
2100 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2101 {
2102 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2103 aPatch[off++] = MAKE_MODRM(3, USE_REG_EAX, pDis->param1.base.reg_gen);
2104 }
2105
2106 if (pDis->param1.base.reg_gen != USE_REG_EAX)
2107 aPatch[off++] = 0x58; /* pop eax */
2108 if (pDis->param1.base.reg_gen != USE_REG_EDX)
2109 aPatch[off++] = 0x5A; /* pop edx */
2110 if (pDis->param1.base.reg_gen != USE_REG_ECX)
2111 aPatch[off++] = 0x59; /* pop ecx */
2112 }
2113 aPatch[off++] = 0xE9; /* jmp return_address */
2114 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
2115 off += sizeof(RTRCUINTPTR);
2116
2117 if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
2118 {
2119 /* Write new code to the patch buffer. */
2120 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
2121 AssertRC(rc);
2122
2123#ifdef LOG_ENABLED
2124 pInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
2125 while (true)
2126 {
2127 uint32_t cb;
2128
2129 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2130 szOutput, sizeof(szOutput), &cb);
2131 if (RT_SUCCESS(rc))
2132 Log(("Patch instr %s\n", szOutput));
2133
2134 pInstr += cb;
2135
2136 if (pInstr >= pVM->hwaccm.s.pFreeGuestPatchMem + off)
2137 break;
2138 }
2139#endif
2140
2141 pPatch->aNewOpcode[0] = 0xE9;
2142 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2143
2144 /* Overwrite the TPR instruction with a jump. */
2145 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2146 AssertRC(rc);
2147
2148#ifdef LOG_ENABLED
2149 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2150 szOutput, sizeof(szOutput), NULL);
2151 if (RT_SUCCESS(rc))
2152 Log(("Jump: %s\n", szOutput));
2153#endif
2154 pVM->hwaccm.s.pFreeGuestPatchMem += off;
2155 pPatch->cbNewOp = 5;
2156
2157 pPatch->Core.Key = pCtx->eip;
2158 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2159 AssertRC(rc);
2160
2161 pVM->hwaccm.s.cPatches++;
2162 pVM->hwaccm.s.fTPRPatchingActive = true;
2163 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
2164 return VINF_SUCCESS;
2165 }
2166 else
2167 Log(("Ran out of space in our patch buffer!\n"));
2168 }
2169
2170 /* Save invalid patch, so we will not try again. */
2171 uint32_t idx = pVM->hwaccm.s.cPatches;
2172
2173#ifdef LOG_ENABLED
2174 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2175 szOutput, sizeof(szOutput), NULL);
2176 if (RT_SUCCESS(rc))
2177 Log(("Failed to patch instr: %s\n", szOutput));
2178#endif
2179
2180 pPatch = &pVM->hwaccm.s.aPatches[idx];
2181 pPatch->Core.Key = pCtx->eip;
2182 pPatch->enmType = HWACCMTPRINSTR_INVALID;
2183 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2184 AssertRC(rc);
2185 pVM->hwaccm.s.cPatches++;
2186 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
2187 return VINF_SUCCESS;
2188}
2189
2190/**
2191 * Attempt to patch TPR mmio instructions
2192 *
2193 * @returns VBox status code.
2194 * @param pVM The VM to operate on.
2195 * @param pVCpu The VM CPU to operate on.
2196 * @param pCtx CPU context
2197 */
2198VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2199{
2200 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, (pVM->hwaccm.s.pGuestPatchMem) ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr, (void *)(uintptr_t)pVCpu->idCpu);
2201 AssertRC(rc);
2202 return rc;
2203}
2204
2205/**
2206 * Force execution of the current IO code in the recompiler
2207 *
2208 * @returns VBox status code.
2209 * @param pVM The VM to operate on.
2210 * @param pCtx Partial VM execution context
2211 */
2212VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
2213{
2214 PVMCPU pVCpu = VMMGetCpu(pVM);
2215
2216 Assert(pVM->fHWACCMEnabled);
2217 Log(("HWACCMR3EmulateIoBlock\n"));
2218
2219 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
2220 if (HWACCMCanEmulateIoBlockEx(pCtx))
2221 {
2222 Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
2223 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = true;
2224 pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
2225 pVCpu->hwaccm.s.EmulateIoBlock.cr0 = pCtx->cr0;
2226 return VINF_EM_RESCHEDULE_REM;
2227 }
2228 return VINF_SUCCESS;
2229}
2230
2231/**
2232 * Checks if we can currently use hardware accelerated raw mode.
2233 *
2234 * @returns boolean
2235 * @param pVM The VM to operate on.
2236 * @param pCtx Partial VM execution context
2237 */
2238VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
2239{
2240 PVMCPU pVCpu = VMMGetCpu(pVM);
2241
2242 Assert(pVM->fHWACCMEnabled);
2243
2244 /* If we're still executing the IO code, then return false. */
2245 if ( RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
2246 && pCtx->rip < pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
2247 && pCtx->rip > pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
2248 && pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
2249 return false;
2250
2251 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
2252
2253 /* AMD-V supports real & protected mode with or without paging. */
2254 if (pVM->hwaccm.s.svm.fEnabled)
2255 {
2256 pVCpu->hwaccm.s.fActive = true;
2257 return true;
2258 }
2259
2260 pVCpu->hwaccm.s.fActive = false;
2261
2262 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
2263 Assert((pVM->hwaccm.s.vmx.fUnrestrictedGuest && !pVM->hwaccm.s.vmx.pRealModeTSS) || (!pVM->hwaccm.s.vmx.fUnrestrictedGuest && pVM->hwaccm.s.vmx.pRealModeTSS));
2264
2265 bool fSupportsRealMode = pVM->hwaccm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);
2266 if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
2267 {
2268 /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. */
2269 if (fSupportsRealMode)
2270 {
2271 if (CPUMIsGuestInRealModeEx(pCtx))
2272 {
2273 /* VT-x will not allow high selector bases in v86 mode; fall back to the recompiler in that case.
2274 * The base must also be equal to (sel << 4).
2275 */
2276 if ( ( pCtx->cs != (pCtx->csHid.u64Base >> 4)
2277 && pCtx->csHid.u64Base != 0xffff0000 /* we can deal with the BIOS code as it's also mapped into the lower region. */)
2278 || pCtx->ds != (pCtx->dsHid.u64Base >> 4)
2279 || pCtx->es != (pCtx->esHid.u64Base >> 4)
2280 || pCtx->fs != (pCtx->fsHid.u64Base >> 4)
2281 || pCtx->gs != (pCtx->gsHid.u64Base >> 4)
2282 || pCtx->ss != (pCtx->ssHid.u64Base >> 4))
2283 {
2284 return false;
2285 }
2286 }
2287 else
2288 {
2289 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
2290 /* Verify the requirements for executing code in protected mode. VT-x can't handle the CPU state right after a switch
2291 * from real to protected mode. (all sorts of RPL & DPL assumptions)
2292 */
2293 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
2294 && enmGuestMode >= PGMMODE_PROTECTED)
2295 {
2296 if ( (pCtx->cs & X86_SEL_RPL)
2297 || (pCtx->ds & X86_SEL_RPL)
2298 || (pCtx->es & X86_SEL_RPL)
2299 || (pCtx->fs & X86_SEL_RPL)
2300 || (pCtx->gs & X86_SEL_RPL)
2301 || (pCtx->ss & X86_SEL_RPL))
2302 {
2303 return false;
2304 }
2305 }
2306 /* VT-x also chokes on invalid tr or ldtr selectors (minix) */
2307 if ( pCtx->gdtr.cbGdt
2308 && ( pCtx->tr > pCtx->gdtr.cbGdt
2309 || pCtx->ldtr > pCtx->gdtr.cbGdt))
2310 {
2311 return false;
2312 }
2313 }
2314 }
2315 else
2316 {
2317 if ( !CPUMIsGuestInLongModeEx(pCtx)
2318 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
2319 {
2320 /** @todo This should (probably) be set on every excursion to the REM,
2321 * however it's too risky right now. So, only apply it when we go
2322 * back to REM for real mode execution. (The XP hack below doesn't
2323 * work reliably without this.)
2324 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM. */
2325 pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2326
2327 if ( !pVM->hwaccm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/
2328 || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */
2329 return false;
2330
2331 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
2332 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
2333 return false;
2334
2335 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
2336 /* Windows XP; switch to protected mode; all selectors are marked not present in the
2337 * hidden registers (possible recompiler bug; see load_seg_vm) */
2338 if (pCtx->csHid.Attr.n.u1Present == 0)
2339 return false;
2340 if (pCtx->ssHid.Attr.n.u1Present == 0)
2341 return false;
2342
2343 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
2344 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
2345 /** @todo This check is actually wrong, it doesn't take the direction of the
2346 * stack segment into account. But, it does the job for now. */
2347 if (pCtx->rsp >= pCtx->ssHid.u32Limit)
2348 return false;
2349 #if 0
2350 if ( pCtx->cs >= pCtx->gdtr.cbGdt
2351 || pCtx->ss >= pCtx->gdtr.cbGdt
2352 || pCtx->ds >= pCtx->gdtr.cbGdt
2353 || pCtx->es >= pCtx->gdtr.cbGdt
2354 || pCtx->fs >= pCtx->gdtr.cbGdt
2355 || pCtx->gs >= pCtx->gdtr.cbGdt)
2356 return false;
2357 #endif
2358 }
2359 }
2360 }
2361
2362 if (pVM->hwaccm.s.vmx.fEnabled)
2363 {
2364 uint32_t mask;
2365
2366 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
2367 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
2368 /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
2369 mask &= ~X86_CR0_NE;
2370
2371 if (fSupportsRealMode)
2372 {
2373 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
2374 mask &= ~(X86_CR0_PG|X86_CR0_PE);
2375 }
2376 else
2377 {
2378 /* We support protected mode without paging using identity mapping. */
2379 mask &= ~X86_CR0_PG;
2380 }
2381 if ((pCtx->cr0 & mask) != mask)
2382 return false;
2383
2384 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
2385 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
2386 if ((pCtx->cr0 & mask) != 0)
2387 return false;
2388
2389 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
2390 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
2391 mask &= ~X86_CR4_VMXE;
2392 if ((pCtx->cr4 & mask) != mask)
2393 return false;
2394
2395 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
2396 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
2397 if ((pCtx->cr4 & mask) != 0)
2398 return false;
2399
2400 pVCpu->hwaccm.s.fActive = true;
2401 return true;
2402 }
2403
2404 return false;
2405}
2406
2407/**
2408 * Checks if we need to reschedule due to VMM device heap changes
2409 *
2410 * @returns boolean
2411 * @param pVM The VM to operate on.
2412 * @param pCtx VM execution context
2413 */
2414VMMR3DECL(bool) HWACCMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
2415{
2416 /** The VMM device heap is a requirement for emulating real mode or protected mode without paging when the unrestricted guest execution feature is missing. (VT-x only) */
2417 if ( pVM->hwaccm.s.vmx.fEnabled
2418 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest
2419 && !CPUMIsGuestInPagedProtectedModeEx(pCtx)
2420 && !PDMVMMDevHeapIsEnabled(pVM)
2421 && (pVM->hwaccm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
2422 return true;
2423
2424 return false;
2425}
2426
2427
2428/**
2429 * Notification from EM about a rescheduling into hardware assisted execution
2430 * mode.
2431 *
2432 * @param pVCpu Pointer to the current virtual cpu structure.
2433 */
2434VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
2435{
2436 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2437}
2438
2439/**
2440 * Notification from EM about returning from instruction emulation (REM / EM).
2441 *
2442 * @param pVCpu Pointer to the current virtual cpu structure.
2443 */
2444VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
2445{
2446 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2447}
2448
2449/**
2450 * Checks if we are currently using hardware accelerated raw mode.
2451 *
2452 * @returns boolean
2453 * @param pVCpu The VMCPU to operate on.
2454 */
2455VMMR3DECL(bool) HWACCMR3IsActive(PVMCPU pVCpu)
2456{
2457 return pVCpu->hwaccm.s.fActive;
2458}
2459
2460/**
2461 * Checks if we are currently using nested paging.
2462 *
2463 * @returns boolean
2464 * @param pVM The VM to operate on.
2465 */
2466VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
2467{
2468 return pVM->hwaccm.s.fNestedPaging;
2469}
2470
2471/**
2472 * Checks if we are currently using VPID in VT-x mode.
2473 *
2474 * @returns boolean
2475 * @param pVM The VM to operate on.
2476 */
2477VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
2478{
2479 return pVM->hwaccm.s.vmx.fVPID;
2480}
2481
2482
2483/**
2484 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
2485 *
2486 * @returns boolean
2487 * @param pVM The VM to operate on.
2488 */
2489VMMR3DECL(bool) HWACCMR3IsEventPending(PVMCPU pVCpu)
2490{
2491 return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
2492}
2493
2494/**
2495 * Checks if the VMX-preemption timer is being used.
2496 *
2497 * @returns true if it is, false if it isn't.
2498 * @param pVM The VM handle.
2499 */
2500VMMR3DECL(bool) HWACCMR3IsVmxPreemptionTimerUsed(PVM pVM)
2501{
2502 return HWACCMIsEnabled(pVM)
2503 && pVM->hwaccm.s.vmx.fEnabled
2504 && pVM->hwaccm.s.vmx.fUsePreemptTimer;
2505}
2506
2507/**
2508 * Restart an I/O instruction that was refused in ring-0
2509 *
2510 * @returns Strict VBox status code. Informational status codes other than the one documented
2511 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2512 * @retval VINF_SUCCESS Success.
2513 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2514 * status code must be passed on to EM.
2515 * @retval VERR_NOT_FOUND if no pending I/O instruction.
2516 *
2517 * @param pVM The VM to operate on.
2518 * @param pVCpu The VMCPU to operate on.
2519 * @param pCtx VCPU register context
2520 */
2521VMMR3DECL(VBOXSTRICTRC) HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2522{
2523 HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
2524
2525 pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
2526
2527 if ( pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
2528 || enmType == HWACCMPENDINGIO_INVALID)
2529 return VERR_NOT_FOUND;
2530
2531 VBOXSTRICTRC rcStrict;
2532 switch (enmType)
2533 {
2534 case HWACCMPENDINGIO_PORT_READ:
2535 {
2536 uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
2537 uint32_t u32Val = 0;
2538
2539 rcStrict = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2540 &u32Val,
2541 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2542 if (IOM_SUCCESS(rcStrict))
2543 {
2544 /* Write back to the EAX register. */
2545 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
2546 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2547 }
2548 break;
2549 }
2550
2551 case HWACCMPENDINGIO_PORT_WRITE:
2552 rcStrict = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2553 pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal,
2554 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2555 if (IOM_SUCCESS(rcStrict))
2556 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2557 break;
2558
2559 default:
2560 AssertFailed();
2561 return VERR_INTERNAL_ERROR;
2562 }
2563
2564 return rcStrict;
2565}
2566
2567/**
2568 * Inject an NMI into a running VM (only VCPU 0!)
2569 *
2570 * @returns boolean
2571 * @param pVM The VM to operate on.
2572 */
2573VMMR3DECL(int) HWACCMR3InjectNMI(PVM pVM)
2574{
2575 VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
2576 return VINF_SUCCESS;
2577}
2578
2579/**
2580 * Check fatal VT-x/AMD-V error and produce some meaningful
2581 * log release message.
2582 *
2583 * @param pVM The VM to operate on.
2584 * @param iStatusCode VBox status code
2585 */
2586VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
2587{
2588 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2589 {
2590 switch(iStatusCode)
2591 {
2592 case VERR_VMX_INVALID_VMCS_FIELD:
2593 break;
2594
2595 case VERR_VMX_INVALID_VMCS_PTR:
2596 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));
2597 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
2598 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
2599 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
2600 break;
2601
2602 case VERR_VMX_UNABLE_TO_START_VM:
2603 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2604 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2605#if 0 /* @todo dump the current control fields to the release log */
2606 if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
2607 {
2608
2609 }
2610#endif
2611 break;
2612
2613 case VERR_VMX_UNABLE_TO_RESUME_VM:
2614 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2615 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2616 break;
2617
2618 case VERR_VMX_INVALID_VMXON_PTR:
2619 break;
2620 }
2621 }
2622}
2623
2624/**
2625 * Execute state save operation.
2626 *
2627 * @returns VBox status code.
2628 * @param pVM VM Handle.
2629 * @param pSSM SSM operation handle.
2630 */
2631static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
2632{
2633 int rc;
2634
2635 Log(("hwaccmR3Save:\n"));
2636
2637 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2638 {
2639 /*
2640 * Save the basic bits - fortunately all the other things can be resynced on load.
2641 */
2642 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
2643 AssertRCReturn(rc, rc);
2644 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
2645 AssertRCReturn(rc, rc);
2646 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
2647 AssertRCReturn(rc, rc);
2648
2649 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
2650 AssertRCReturn(rc, rc);
2651 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
2652 AssertRCReturn(rc, rc);
2653 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
2654 AssertRCReturn(rc, rc);
2655 }
2656#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2657 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
2658 AssertRCReturn(rc, rc);
2659 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
2660 AssertRCReturn(rc, rc);
2661 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
2662 AssertRCReturn(rc, rc);
2663
2664 /* Store all the guest patch records too. */
2665 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches);
2666 AssertRCReturn(rc, rc);
2667
2668 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
2669 {
2670 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
2671
2672 rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
2673 AssertRCReturn(rc, rc);
2674
2675 rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2676 AssertRCReturn(rc, rc);
2677
2678 rc = SSMR3PutU32(pSSM, pPatch->cbOp);
2679 AssertRCReturn(rc, rc);
2680
2681 rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2682 AssertRCReturn(rc, rc);
2683
2684 rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
2685 AssertRCReturn(rc, rc);
2686
2687 AssertCompileSize(HWACCMTPRINSTR, 4);
2688 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
2689 AssertRCReturn(rc, rc);
2690
2691 rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
2692 AssertRCReturn(rc, rc);
2693
2694 rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
2695 AssertRCReturn(rc, rc);
2696
2697 rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
2698 AssertRCReturn(rc, rc);
2699
2700 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
2701 AssertRCReturn(rc, rc);
2702 }
2703#endif
2704 return VINF_SUCCESS;
2705}
2706
2707/**
2708 * Execute state load operation.
2709 *
2710 * @returns VBox status code.
2711 * @param pVM VM Handle.
2712 * @param pSSM SSM operation handle.
2713 * @param uVersion Data layout version.
2714 * @param uPass The data pass.
2715 */
2716static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2717{
2718 int rc;
2719
2720 Log(("hwaccmR3Load:\n"));
2721 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
2722
2723 /*
2724 * Validate version.
2725 */
2726 if ( uVersion != HWACCM_SSM_VERSION
2727 && uVersion != HWACCM_SSM_VERSION_NO_PATCHING
2728 && uVersion != HWACCM_SSM_VERSION_2_0_X)
2729 {
2730 AssertMsgFailed(("hwaccmR3Load: Invalid version uVersion=%d!\n", uVersion));
2731 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2732 }
2733 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2734 {
2735 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
2736 AssertRCReturn(rc, rc);
2737 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
2738 AssertRCReturn(rc, rc);
2739 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
2740 AssertRCReturn(rc, rc);
2741
2742 if (uVersion >= HWACCM_SSM_VERSION_NO_PATCHING)
2743 {
2744 uint32_t val;
2745
2746 rc = SSMR3GetU32(pSSM, &val);
2747 AssertRCReturn(rc, rc);
2748 pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
2749
2750 rc = SSMR3GetU32(pSSM, &val);
2751 AssertRCReturn(rc, rc);
2752 pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
2753
2754 rc = SSMR3GetU32(pSSM, &val);
2755 AssertRCReturn(rc, rc);
2756 pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
2757 }
2758 }
2759#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2760 if (uVersion > HWACCM_SSM_VERSION_NO_PATCHING)
2761 {
2762 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
2763 AssertRCReturn(rc, rc);
2764 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
2765 AssertRCReturn(rc, rc);
2766 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
2767 AssertRCReturn(rc, rc);
2768
2769 /* Fetch all TPR patch records. */
2770 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches);
2771 AssertRCReturn(rc, rc);
2772
2773 for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
2774 {
2775 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
2776
2777 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
2778 AssertRCReturn(rc, rc);
2779
2780 rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2781 AssertRCReturn(rc, rc);
2782
2783 rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
2784 AssertRCReturn(rc, rc);
2785
2786 rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2787 AssertRCReturn(rc, rc);
2788
2789 rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
2790 AssertRCReturn(rc, rc);
2791
2792 rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
2793 AssertRCReturn(rc, rc);
2794
2795 if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
2796 pVM->hwaccm.s.fTPRPatchingActive = true;
2797
2798 Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);
2799
2800 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
2801 AssertRCReturn(rc, rc);
2802
2803 rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
2804 AssertRCReturn(rc, rc);
2805
2806 rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
2807 AssertRCReturn(rc, rc);
2808
2809 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
2810 AssertRCReturn(rc, rc);
2811
2812 Log(("hwaccmR3Load: patch %d\n", i));
2813 Log(("Key = %x\n", pPatch->Core.Key));
2814 Log(("cbOp = %d\n", pPatch->cbOp));
2815 Log(("cbNewOp = %d\n", pPatch->cbNewOp));
2816 Log(("type = %d\n", pPatch->enmType));
2817 Log(("srcop = %d\n", pPatch->uSrcOperand));
2818 Log(("dstop = %d\n", pPatch->uDstOperand));
2819 Log(("cFaults = %d\n", pPatch->cFaults));
2820 Log(("target = %x\n", pPatch->pJumpTarget));
2821 rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
2822 AssertRC(rc);
2823 }
2824 }
2825#endif
2826
2827 /* Recheck all VCPUs if we can go straight into hwaccm execution mode. */
2828 if (HWACCMIsEnabled(pVM))
2829 {
2830 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2831 {
2832 PVMCPU pVCpu = &pVM->aCpus[i];
2833
2834 HWACCMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
2835 }
2836 }
2837 return VINF_SUCCESS;
2838}
2839
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette