VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCM.cpp@ 23487

Last change on this file since 23487 was 23476, checked in by vboxsync, 15 years ago

Use global (one shot on all cpus) or local (each time we want to execute guest code) initialization for VT-x and AMD-V.
Darwin and Windows hosts default to local. Linux and Solaris to global. Configurable.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 122.1 KB
Line 
1/* $Id: HWACCM.cpp 23476 2009-10-01 12:57:36Z vboxsync $ */
2/** @file
3 * HWACCM - Intel/AMD VM Hardware Support Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_HWACCM
26#include <VBox/cpum.h>
27#include <VBox/stam.h>
28#include <VBox/mm.h>
29#include <VBox/pdm.h>
30#include <VBox/pgm.h>
31#include <VBox/trpm.h>
32#include <VBox/dbgf.h>
33#include <VBox/patm.h>
34#include <VBox/csam.h>
35#include <VBox/selm.h>
36#include <VBox/rem.h>
37#include <VBox/hwacc_vmx.h>
38#include <VBox/hwacc_svm.h>
39#include "HWACCMInternal.h"
40#include <VBox/vm.h>
41#include <VBox/err.h>
42#include <VBox/param.h>
43
44#include <iprt/assert.h>
45#include <VBox/log.h>
46#include <iprt/asm.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49
50/*******************************************************************************
51* Global Variables *
52*******************************************************************************/
53#ifdef VBOX_WITH_STATISTICS
54# define EXIT_REASON(def, val, str) #def " - " #val " - " str
55# define EXIT_REASON_NIL() NULL
56/** Exit reason descriptions for VT-x, used to describe statistics. */
57static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
58{
59 EXIT_REASON(VMX_EXIT_EXCEPTION , 0, "Exception or non-maskable interrupt (NMI)."),
60 EXIT_REASON(VMX_EXIT_EXTERNAL_IRQ , 1, "External interrupt."),
61 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
62 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
63 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
64 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
65 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
66 EXIT_REASON(VMX_EXIT_IRQ_WINDOW , 7, "Interrupt window."),
67 EXIT_REASON_NIL(),
68 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
69 EXIT_REASON(VMX_EXIT_CPUID , 10, "Guest software attempted to execute CPUID."),
70 EXIT_REASON_NIL(),
71 EXIT_REASON(VMX_EXIT_HLT , 12, "Guest software attempted to execute HLT."),
72 EXIT_REASON(VMX_EXIT_INVD , 13, "Guest software attempted to execute INVD."),
73 EXIT_REASON(VMX_EXIT_INVPG , 14, "Guest software attempted to execute INVPG."),
74 EXIT_REASON(VMX_EXIT_RDPMC , 15, "Guest software attempted to execute RDPMC."),
75 EXIT_REASON(VMX_EXIT_RDTSC , 16, "Guest software attempted to execute RDTSC."),
76 EXIT_REASON(VMX_EXIT_RSM , 17, "Guest software attempted to execute RSM in SMM."),
77 EXIT_REASON(VMX_EXIT_VMCALL , 18, "Guest software executed VMCALL."),
78 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "Guest software executed VMCLEAR."),
79 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "Guest software executed VMLAUNCH."),
80 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "Guest software executed VMPTRLD."),
81 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "Guest software executed VMPTRST."),
82 EXIT_REASON(VMX_EXIT_VMREAD , 23, "Guest software executed VMREAD."),
83 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "Guest software executed VMRESUME."),
84 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "Guest software executed VMWRITE."),
85 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "Guest software executed VMXOFF."),
86 EXIT_REASON(VMX_EXIT_VMXON , 27, "Guest software executed VMXON."),
87 EXIT_REASON(VMX_EXIT_CRX_MOVE , 28, "Control-register accesses."),
88 EXIT_REASON(VMX_EXIT_DRX_MOVE , 29, "Debug-register accesses."),
89 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
90 EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR. Guest software attempted to execute RDMSR."),
91 EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR. Guest software attempted to execute WRMSR."),
92 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
93 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
94 EXIT_REASON_NIL(),
95 EXIT_REASON(VMX_EXIT_MWAIT , 36, "Guest software executed MWAIT."),
96 EXIT_REASON_NIL(),
97 EXIT_REASON_NIL(),
98 EXIT_REASON(VMX_EXIT_MONITOR , 39, "Guest software attempted to execute MONITOR."),
99 EXIT_REASON(VMX_EXIT_PAUSE , 40, "Guest software attempted to execute PAUSE."),
100 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
101 EXIT_REASON_NIL(),
102 EXIT_REASON(VMX_EXIT_TPR , 43, "TPR below threshold. Guest software executed MOV to CR8."),
103 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access. Guest software attempted to access memory at a physical address on the APIC-access page."),
104 EXIT_REASON_NIL(),
105 EXIT_REASON(VMX_EXIT_XDTR_ACCESS , 46, "Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT."),
106 EXIT_REASON(VMX_EXIT_TR_ACCESS , 47, "Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR."),
107 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures."),
108 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
109 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."),
110 EXIT_REASON_NIL(),
111 EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
112 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."),
113 EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD. Guest software attempted to execute WBINVD."),
114 EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV. Guest software attempted to execute XSETBV."),
115 EXIT_REASON_NIL()
116};
117/** Exit reason descriptions for AMD-V, used to describe statistics. */
118static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
119{
120 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
121 EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
122 EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
123 EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
124 EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
125 EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
126 EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
127 EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
128 EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
129 EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
130 EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
131 EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
132 EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
133 EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
134 EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
135 EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
136 EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
137 EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
138 EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
139 EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
140 EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
141 EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
142 EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
143 EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
144 EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
145 EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
146 EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
147 EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
148 EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
149 EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
150 EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
151 EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
152 EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
153 EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
154 EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
155 EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
156 EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
157 EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
158 EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
159 EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
160 EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
161 EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
162 EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
163 EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
164 EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
165 EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
166 EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
167 EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
168 EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
169 EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
170 EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
171 EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
172 EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
173 EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
174 EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
175 EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
176 EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
177 EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
178 EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
179 EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
180 EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
181 EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
182 EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
183 EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
184 EXIT_REASON(SVM_EXIT_EXCEPTION_0 , 64, "Exception Vector 0 (0x0)."),
185 EXIT_REASON(SVM_EXIT_EXCEPTION_1 , 65, "Exception Vector 1 (0x1)."),
186 EXIT_REASON(SVM_EXIT_EXCEPTION_2 , 66, "Exception Vector 2 (0x2)."),
187 EXIT_REASON(SVM_EXIT_EXCEPTION_3 , 67, "Exception Vector 3 (0x3)."),
188 EXIT_REASON(SVM_EXIT_EXCEPTION_4 , 68, "Exception Vector 4 (0x4)."),
189 EXIT_REASON(SVM_EXIT_EXCEPTION_5 , 69, "Exception Vector 5 (0x5)."),
190 EXIT_REASON(SVM_EXIT_EXCEPTION_6 , 70, "Exception Vector 6 (0x6)."),
191 EXIT_REASON(SVM_EXIT_EXCEPTION_7 , 71, "Exception Vector 7 (0x7)."),
192 EXIT_REASON(SVM_EXIT_EXCEPTION_8 , 72, "Exception Vector 8 (0x8)."),
193 EXIT_REASON(SVM_EXIT_EXCEPTION_9 , 73, "Exception Vector 9 (0x9)."),
194 EXIT_REASON(SVM_EXIT_EXCEPTION_A , 74, "Exception Vector 10 (0xA)."),
195 EXIT_REASON(SVM_EXIT_EXCEPTION_B , 75, "Exception Vector 11 (0xB)."),
196 EXIT_REASON(SVM_EXIT_EXCEPTION_C , 76, "Exception Vector 12 (0xC)."),
197 EXIT_REASON(SVM_EXIT_EXCEPTION_D , 77, "Exception Vector 13 (0xD)."),
198 EXIT_REASON(SVM_EXIT_EXCEPTION_E , 78, "Exception Vector 14 (0xE)."),
199 EXIT_REASON(SVM_EXIT_EXCEPTION_F , 79, "Exception Vector 15 (0xF)."),
200 EXIT_REASON(SVM_EXIT_EXCEPTION_10 , 80, "Exception Vector 16 (0x10)."),
201 EXIT_REASON(SVM_EXIT_EXCEPTION_11 , 81, "Exception Vector 17 (0x11)."),
202 EXIT_REASON(SVM_EXIT_EXCEPTION_12 , 82, "Exception Vector 18 (0x12)."),
203 EXIT_REASON(SVM_EXIT_EXCEPTION_13 , 83, "Exception Vector 19 (0x13)."),
204 EXIT_REASON(SVM_EXIT_EXCEPTION_14 , 84, "Exception Vector 20 (0x14)."),
205 EXIT_REASON(SVM_EXIT_EXCEPTION_15 , 85, "Exception Vector 22 (0x15)."),
206 EXIT_REASON(SVM_EXIT_EXCEPTION_16 , 86, "Exception Vector 22 (0x16)."),
207 EXIT_REASON(SVM_EXIT_EXCEPTION_17 , 87, "Exception Vector 23 (0x17)."),
208 EXIT_REASON(SVM_EXIT_EXCEPTION_18 , 88, "Exception Vector 24 (0x18)."),
209 EXIT_REASON(SVM_EXIT_EXCEPTION_19 , 89, "Exception Vector 25 (0x19)."),
210 EXIT_REASON(SVM_EXIT_EXCEPTION_1A , 90, "Exception Vector 26 (0x1A)."),
211 EXIT_REASON(SVM_EXIT_EXCEPTION_1B , 91, "Exception Vector 27 (0x1B)."),
212 EXIT_REASON(SVM_EXIT_EXCEPTION_1C , 92, "Exception Vector 28 (0x1C)."),
213 EXIT_REASON(SVM_EXIT_EXCEPTION_1D , 93, "Exception Vector 29 (0x1D)."),
214 EXIT_REASON(SVM_EXIT_EXCEPTION_1E , 94, "Exception Vector 30 (0x1E)."),
215 EXIT_REASON(SVM_EXIT_EXCEPTION_1F , 95, "Exception Vector 31 (0x1F)."),
216 EXIT_REASON(SVM_EXIT_EXCEPTION_INTR , 96, "Physical maskable interrupt."),
217 EXIT_REASON(SVM_EXIT_EXCEPTION_NMI , 97, "Physical non-maskable interrupt."),
218 EXIT_REASON(SVM_EXIT_EXCEPTION_SMI , 98, "System management interrupt."),
219 EXIT_REASON(SVM_EXIT_EXCEPTION_INIT , 99, "Physical INIT signal."),
220 EXIT_REASON(SVM_EXIT_EXCEPTION_VINTR ,100, "Visual interrupt."),
221 EXIT_REASON(SVM_EXIT_EXCEPTION_CR0_SEL_WRITE ,101, "Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),
222 EXIT_REASON(SVM_EXIT_EXCEPTION_IDTR_READ ,102, "Read IDTR"),
223 EXIT_REASON(SVM_EXIT_EXCEPTION_GDTR_READ ,103, "Read GDTR"),
224 EXIT_REASON(SVM_EXIT_EXCEPTION_LDTR_READ ,104, "Read LDTR."),
225 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,105, "Read TR."),
226 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,106, "Write IDTR."),
227 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,107, "Write GDTR."),
228 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,108, "Write LDTR."),
229 EXIT_REASON(SVM_EXIT_EXCEPTION_TR_READ ,109, "Write TR."),
230 EXIT_REASON(SVM_EXIT_RDTSC ,110, "RDTSC instruction."),
231 EXIT_REASON(SVM_EXIT_RDPMC ,111, "RDPMC instruction."),
232 EXIT_REASON(SVM_EXIT_PUSHF ,112, "PUSHF instruction."),
233 EXIT_REASON(SVM_EXIT_POPF ,113, "POPF instruction."),
234 EXIT_REASON(SVM_EXIT_CPUID ,114, "CPUID instruction."),
235 EXIT_REASON(SVM_EXIT_RSM ,115, "RSM instruction."),
236 EXIT_REASON(SVM_EXIT_IRET ,116, "IRET instruction."),
237 EXIT_REASON(SVM_EXIT_SWINT ,117, "Software interrupt (INTn instructions)."),
238 EXIT_REASON(SVM_EXIT_INVD ,118, "INVD instruction."),
239 EXIT_REASON(SVM_EXIT_PAUSE ,119, "PAUSE instruction."),
240 EXIT_REASON(SVM_EXIT_HLT ,120, "HLT instruction."),
241 EXIT_REASON(SVM_EXIT_INVLPG ,121, "INVLPG instruction."),
242 EXIT_REASON(SVM_EXIT_INVLPGA ,122, "INVLPGA instruction."),
243 EXIT_REASON(SVM_EXIT_IOIO ,123, "IN/OUT accessing protected port (EXITINFO1 field provides more information)."),
244 EXIT_REASON(SVM_EXIT_MSR ,124, "RDMSR or WRMSR access to protected MSR."),
245 EXIT_REASON(SVM_EXIT_TASK_SWITCH ,125, "Task switch."),
246 EXIT_REASON(SVM_EXIT_FERR_FREEZE ,126, "FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt"),
247 EXIT_REASON(SVM_EXIT_TASK_SHUTDOWN ,127, "Shutdown."),
248 EXIT_REASON(SVM_EXIT_TASK_VMRUN ,128, "VMRUN instruction."),
249 EXIT_REASON(SVM_EXIT_TASK_VMCALL ,129, "VMCALL instruction."),
250 EXIT_REASON(SVM_EXIT_TASK_VMLOAD ,130, "VMLOAD instruction."),
251 EXIT_REASON(SVM_EXIT_TASK_VMSAVE ,131, "VMSAVE instruction."),
252 EXIT_REASON(SVM_EXIT_TASK_STGI ,132, "STGI instruction."),
253 EXIT_REASON(SVM_EXIT_TASK_CLGI ,133, "CLGI instruction."),
254 EXIT_REASON(SVM_EXIT_TASK_SKINIT ,134, "SKINIT instruction."),
255 EXIT_REASON(SVM_EXIT_TASK_RDTSCP ,135, "RDTSCP instruction."),
256 EXIT_REASON(SVM_EXIT_TASK_ICEBP ,136, "ICEBP instruction."),
257 EXIT_REASON(SVM_EXIT_TASK_WBINVD ,137, "WBINVD instruction."),
258 EXIT_REASON(SVM_EXIT_TASK_MONITOR ,138, "MONITOR instruction."),
259 EXIT_REASON(SVM_EXIT_MWAIT_UNCOND ,139, "MWAIT instruction unconditional."),
260 EXIT_REASON(SVM_EXIT_MWAIT_ARMED ,140, "MWAIT instruction when armed."),
261 EXIT_REASON(SVM_EXIT_NPF ,1024, "Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault)."),
262 EXIT_REASON_NIL()
263};
264# undef EXIT_REASON
265# undef EXIT_REASON_NIL
266#endif /* VBOX_WITH_STATISTICS */
267
268/*******************************************************************************
269* Internal Functions *
270*******************************************************************************/
271static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
272static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
273
274
275/**
276 * Initializes the HWACCM.
277 *
278 * @returns VBox status code.
279 * @param pVM The VM to operate on.
280 */
281VMMR3DECL(int) HWACCMR3Init(PVM pVM)
282{
283 LogFlow(("HWACCMR3Init\n"));
284
285 /*
286 * Assert alignment and sizes.
287 */
288 AssertCompileMemberAlignment(VM, hwaccm.s, 32);
289 AssertCompile(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
290
291 /* Some structure checks. */
292 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3)));
293 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
294 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
295 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
296
297 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
298 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4)));
299 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6)));
300 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7)));
301 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9)));
302 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10)));
303 AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
304
305
306 /*
307 * Register the saved state data unit.
308 */
309 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
310 NULL, NULL, NULL,
311 NULL, hwaccmR3Save, NULL,
312 NULL, hwaccmR3Load, NULL);
313 if (RT_FAILURE(rc))
314 return rc;
315
316 /* Misc initialisation. */
317 pVM->hwaccm.s.vmx.fSupported = false;
318 pVM->hwaccm.s.svm.fSupported = false;
319 pVM->hwaccm.s.vmx.fEnabled = false;
320 pVM->hwaccm.s.svm.fEnabled = false;
321
322 pVM->hwaccm.s.fNestedPaging = false;
323
324 /* Disabled by default. */
325 pVM->fHWACCMEnabled = false;
326
327 /*
328 * Check CFGM options.
329 */
330 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
331 PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
332 /* Nested paging: disabled by default. */
333 rc = CFGMR3QueryBoolDef(pRoot, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
334 AssertRC(rc);
335
336 /* VT-x VPID: disabled by default. */
337 rc = CFGMR3QueryBoolDef(pRoot, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
338 AssertRC(rc);
339
340 /* HWACCM support must be explicitely enabled in the configuration file. */
341 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
342 AssertRC(rc);
343
344 /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
345 rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
346 AssertRC(rc);
347
348#ifdef RT_OS_DARWIN
349 if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
350#else
351 if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
352#endif
353 {
354 AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
355 VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
356 return VERR_HWACCM_CONFIG_MISMATCH;
357 }
358
359 if (VMMIsHwVirtExtForced(pVM))
360 pVM->fHWACCMEnabled = true;
361
362#if HC_ARCH_BITS == 32
363 /* 64-bit mode is configurable and it depends on both the kernel mode and VT-x.
364 * (To use the default, don't set 64bitEnabled in CFGM.) */
365 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
366 AssertLogRelRCReturn(rc, rc);
367 if (pVM->hwaccm.s.fAllow64BitGuests)
368 {
369# ifdef RT_OS_DARWIN
370 if (!VMMIsHwVirtExtForced(pVM))
371# else
372 if (!pVM->hwaccm.s.fAllowed)
373# endif
374 return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
375 }
376#else
377 /* On 64-bit hosts 64-bit guest support is enabled by default, but allow this to be overridden
378 * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.) */
379 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
380 AssertLogRelRCReturn(rc, rc);
381#endif
382
383
384 /** Determine the init method for AMD-V and VT-x; either one global init for each host CPU
385 * or local init each time we wish to execute guest code.
386 *
387 * Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
388 */
389 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableGlobalInit", &pVM->hwaccm.s.fGlobalInit,
390#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
391 false
392#else
393 true
394#endif
395 );
396
397 /* Max number of resume loops. */
398 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
399 AssertRC(rc);
400
401 return VINF_SUCCESS;
402}
403
404/**
405 * Initializes the per-VCPU HWACCM.
406 *
407 * @returns VBox status code.
408 * @param pVM The VM to operate on.
409 */
410VMMR3DECL(int) HWACCMR3InitCPU(PVM pVM)
411{
412 LogFlow(("HWACCMR3InitCPU\n"));
413
414 for (VMCPUID i = 0; i < pVM->cCpus; i++)
415 {
416 PVMCPU pVCpu = &pVM->aCpus[i];
417
418 pVCpu->hwaccm.s.fActive = false;
419 }
420
421#ifdef VBOX_WITH_STATISTICS
422 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
423 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
424 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
425 STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
426
427 /*
428 * Statistics.
429 */
430 for (VMCPUID i = 0; i < pVM->cCpus; i++)
431 {
432 PVMCPU pVCpu = &pVM->aCpus[i];
433 int rc;
434
435 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
436 "/PROF/HWACCM/CPU%d/PokeWait", i);
437 AssertRC(rc);
438 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
439 "/PROF/HWACCM/CPU%d/PokeWaitFailed", i);
440 AssertRC(rc);
441 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
442 "/PROF/HWACCM/CPU%d/SwitchToGC", i);
443 AssertRC(rc);
444 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
445 "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
446 AssertRC(rc);
447 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
448 "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
449 AssertRC(rc);
450# if 1 /* temporary for tracking down darwin holdup. */
451 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
452 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
453 AssertRC(rc);
454 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
455 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
456 AssertRC(rc);
457 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
458 "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
459 AssertRC(rc);
460# endif
461 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
462 "/PROF/HWACCM/CPU%d/InGC", i);
463 AssertRC(rc);
464
465# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
466 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
467 "/PROF/HWACCM/CPU%d/Switcher3264", i);
468 AssertRC(rc);
469# endif
470
471# define HWACCM_REG_COUNTER(a, b) \
472 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
473 AssertRC(rc);
474
475 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM, "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
476 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM, "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
477 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
478 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF, "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
479 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD, "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
480 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS, "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
481 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP, "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
482 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP, "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
483 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF, "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
484 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE, "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
485 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB, "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
486 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvpg, "/HWACCM/CPU%d/Exit/Instr/Invlpg");
487 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd, "/HWACCM/CPU%d/Exit/Instr/Invd");
488 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid");
489 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
490 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc, "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
491 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr, "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
492 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr, "/HWACCM/CPU%d/Exit/Instr/Wrmsr");
493 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait, "/HWACCM/CPU%d/Exit/Instr/Mwait");
494 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite, "/HWACCM/CPU%d/Exit/Instr/DR/Write");
495 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead, "/HWACCM/CPU%d/Exit/Instr/DR/Read");
496 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS, "/HWACCM/CPU%d/Exit/Instr/CLTS");
497 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW, "/HWACCM/CPU%d/Exit/Instr/LMSW");
498 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli, "/HWACCM/CPU%d/Exit/Instr/Cli");
499 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti, "/HWACCM/CPU%d/Exit/Instr/Sti");
500 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf, "/HWACCM/CPU%d/Exit/Instr/Pushf");
501 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf, "/HWACCM/CPU%d/Exit/Instr/Popf");
502 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret, "/HWACCM/CPU%d/Exit/Instr/Iret");
503 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt, "/HWACCM/CPU%d/Exit/Instr/Int");
504 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt, "/HWACCM/CPU%d/Exit/Instr/Hlt");
505 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite, "/HWACCM/CPU%d/Exit/IO/Write");
506 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead, "/HWACCM/CPU%d/Exit/IO/Read");
507 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite, "/HWACCM/CPU%d/Exit/IO/WriteString");
508 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead, "/HWACCM/CPU%d/Exit/IO/ReadString");
509 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow");
510 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume");
511 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending, "/HWACCM/CPU%d/Exit/PreemptPending");
512
513 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending");
514 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3, "/HWACCM/CPU%d/Switch/ToR3");
515
516 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject, "/HWACCM/CPU%d/Irq/Inject");
517 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject, "/HWACCM/CPU%d/Irq/Reinject");
518 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq, "/HWACCM/CPU%d/Irq/PendingOnHost");
519
520 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual, "/HWACCM/CPU%d/Flush/Page/Virt");
521 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual, "/HWACCM/CPU%d/Flush/Page/Phys");
522 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual, "/HWACCM/CPU%d/Flush/TLB/Manual");
523 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange, "/HWACCM/CPU%d/Flush/TLB/CRx");
524 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg, "/HWACCM/CPU%d/Flush/Page/Invlpg");
525 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Switch");
526 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Skipped");
527 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID");
528 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
529 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/Shootdown/Page");
530 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush, "/HWACCM/CPU%d/Flush/Shootdown/TLB");
531
532 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset");
533 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept");
534 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow, "/HWACCM/CPU%d/TSC/InterceptOverflow");
535
536 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed, "/HWACCM/CPU%d/Debug/Armed");
537 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch, "/HWACCM/CPU%d/Debug/ContextSwitch");
538 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck, "/HWACCM/CPU%d/Debug/IOCheck");
539
540 for (unsigned j=0;j<RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite);j++)
541 {
542 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
543 "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
544 AssertRC(rc);
545 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
546 "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
547 AssertRC(rc);
548 }
549
550#undef HWACCM_REG_COUNTER
551
552 pVCpu->hwaccm.s.paStatExitReason = NULL;
553
554 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
555 AssertRC(rc);
556 if (RT_SUCCESS(rc))
557 {
558 const char * const *papszDesc = ASMIsIntelCpu() ? &g_apszVTxExitReasons[0] : &g_apszAmdVExitReasons[0];
559 for (int j=0;j<MAX_EXITREASON_STAT;j++)
560 {
561 if (papszDesc[j])
562 {
563 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
564 papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
565 AssertRC(rc);
566 }
567 }
568 rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
569 AssertRC(rc);
570 }
571 pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
572# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
573 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
574# else
575 Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
576# endif
577
578 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);
579 AssertRCReturn(rc, rc);
580 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
581# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
582 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
583# else
584 Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
585# endif
586 for (unsigned j = 0; j < 255; j++)
587 STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
588 (j < 0x20) ? "/HWACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);
589
590 }
591#endif /* VBOX_WITH_STATISTICS */
592
593#ifdef VBOX_WITH_CRASHDUMP_MAGIC
594 /* Magic marker for searching in crash dumps. */
595 for (VMCPUID i = 0; i < pVM->cCpus; i++)
596 {
597 PVMCPU pVCpu = &pVM->aCpus[i];
598
599 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
600 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
601 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
602 }
603#endif
604 return VINF_SUCCESS;
605}
606
607/**
608 * Turns off normal raw mode features
609 *
610 * @param pVM The VM to operate on.
611 */
612static void hwaccmR3DisableRawMode(PVM pVM)
613{
614 /* Disable PATM & CSAM. */
615 PATMR3AllowPatching(pVM, false);
616 CSAMDisableScanning(pVM);
617
618 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
619 SELMR3DisableMonitoring(pVM);
620 TRPMR3DisableMonitoring(pVM);
621
622 /* Disable the switcher code (safety precaution). */
623 VMMR3DisableSwitcher(pVM);
624
625 /* Disable mapping of the hypervisor into the shadow page table. */
626 PGMR3MappingsDisable(pVM);
627
628 /* Disable the switcher */
629 VMMR3DisableSwitcher(pVM);
630
631 /* Reinit the paging mode to force the new shadow mode. */
632 for (VMCPUID i = 0; i < pVM->cCpus; i++)
633 {
634 PVMCPU pVCpu = &pVM->aCpus[i];
635
636 PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
637 }
638}
639
640/**
641 * Initialize VT-x or AMD-V.
642 *
643 * @returns VBox status code.
644 * @param pVM The VM handle.
645 */
646VMMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
647{
648 int rc;
649
650 if ( !pVM->hwaccm.s.vmx.fSupported
651 && !pVM->hwaccm.s.svm.fSupported)
652 {
653 LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
654 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
655 if (VMMIsHwVirtExtForced(pVM))
656 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
657 return VINF_SUCCESS;
658 }
659
660 if (pVM->hwaccm.s.vmx.fSupported)
661 {
662 rc = SUPR3QueryVTxSupported();
663 if (RT_FAILURE(rc))
664 {
665#ifdef RT_OS_LINUX
666 LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
667#else
668 LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
669#endif
670 if ( pVM->cCpus > 1
671 || VMMIsHwVirtExtForced(pVM))
672 return rc;
673
674 /* silently fall back to raw mode */
675 return VINF_SUCCESS;
676 }
677 }
678
679 if (!pVM->hwaccm.s.fAllowed)
680 return VINF_SUCCESS; /* nothing to do */
681
682 /* Enable VT-x or AMD-V on all host CPUs. */
683 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_ENABLE, 0, NULL);
684 if (RT_FAILURE(rc))
685 {
686 LogRel(("HWACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
687 return rc;
688 }
689 Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
690
691 pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
692 /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
693 if (!pVM->hwaccm.s.fHasIoApic)
694 {
695 Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
696 pVM->hwaccm.s.fTRPPatchingAllowed = false;
697 }
698
699 if (pVM->hwaccm.s.vmx.fSupported)
700 {
701 Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
702
703 if ( pVM->hwaccm.s.fInitialized == false
704 && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
705 {
706 uint64_t val;
707 RTGCPHYS GCPhys = 0;
708
709 LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
710 LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
711 LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
712 LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
713 LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
714 LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
715 LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
716 LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
717
718 LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
719 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
720 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
721 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
722 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
723 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
724 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
725 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
726 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
727 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
728 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
729 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
730 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
731 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
732 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
733 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
734 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
735 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
736 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
737
738 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
739 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
740 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
741 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
742 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
743 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
744 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
745 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
746 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
747 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
748 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
749 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
750 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
751 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
752 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
753 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
754 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
755 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
756 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
757 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
758 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
759 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
760 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
761 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
762 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
763 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
764 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
765 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
766 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
767 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
768 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
769 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
770 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
771 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
772 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
773 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
774 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
775 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
776 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
777 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
778 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
779 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
780 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
781 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
782
783 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
784 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
785 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
786 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
787 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
788 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
789 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
790 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
791 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
792 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
793 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
794 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
795 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
796 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
797 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
798 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
799 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
800 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
801 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
802 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
803 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
804 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
805 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
806 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
807 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
808 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
809 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
810 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
811 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
812 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
813 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
814 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
815 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
816 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
817 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
818 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
819 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
820 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
821 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
822 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
823 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
824 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
825 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
826
827 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
828 {
829 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
830 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
831 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
832 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
833 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
834 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
835 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
836 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
837 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
838 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
839 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
840 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
841 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
842 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
843
844 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
845 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
846 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
847 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
848 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
849 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
850 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
851 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
852 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
853 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
854 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
855 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
856 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
857 }
858
859 LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
860 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
861 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
862 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
863 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
864 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
865 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
866 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
867 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
868 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
869 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
870 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
871 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
872 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
873 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
874 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
875 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
876 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
877 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
878 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
879 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
880 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
881 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
882 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
883 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
884 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
885 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
886 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
887 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
888 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
889 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
890
891 LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
892 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
893 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
894 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
895 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
896 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
897 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
898 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
899 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
900 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
901 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
902 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
903 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
904 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
905 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
906 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
907 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
908 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
909 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
910 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
911 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
912 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
913 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
914 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
915 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
916 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
917 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
918 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
919 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
920 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
921 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
922 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
923 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
924 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
925 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
926
927 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
928 {
929 LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
930
931 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
932 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
933 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
934 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
935 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
936 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
937 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
938 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
939 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
940 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
941 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
942 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
943 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
944 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
945 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
946 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
947 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
948 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
949 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
950 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
951 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
952 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
953 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
954 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
955 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
956 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
957 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
958 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
959 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
960 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
961 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
962 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
963 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
964 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
965 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
966 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
967 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV)
968 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_INDIV\n"));
969 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT)
970 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_CONTEXT\n"));
971 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL)
972 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL\n"));
973 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
974 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
975 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV)
976 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV\n"));
977 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT)
978 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT\n"));
979 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL)
980 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL\n"));
981 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL)
982 LogRel(("HWACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_CONTEXT_GLOBAL\n"));
983 }
984
985 LogRel(("HWACCM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
986 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
987 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
988 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
989 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
990 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
991
992 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
993 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
994 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
995 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
996 LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
997
998 LogRel(("HWACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
999
1000 /* Paranoia */
1001 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
1002
1003 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1004 {
1005 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
1006 LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
1007 }
1008
1009#ifdef HWACCM_VTX_WITH_EPT
1010 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
1011 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1012#endif /* HWACCM_VTX_WITH_EPT */
1013#ifdef HWACCM_VTX_WITH_VPID
1014 if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
1015 && !pVM->hwaccm.s.fNestedPaging) /* VPID and EPT are mutually exclusive. */
1016 pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
1017#endif /* HWACCM_VTX_WITH_VPID */
1018
1019 /* Only try once. */
1020 pVM->hwaccm.s.fInitialized = true;
1021
1022 /* Allocate three pages for the TSS we need for real mode emulation. (2 page for the IO bitmap) */
1023#if 1
1024 rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
1025#else
1026 rc = VERR_NO_MEMORY; /* simulation of no VMMDev Heap. */
1027#endif
1028 if (RT_SUCCESS(rc))
1029 {
1030 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
1031 ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
1032 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
1033 /* Bit set to 0 means redirection enabled. */
1034 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
1035 /* Allow all port IO, so the VT-x IO intercepts do their job. */
1036 memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
1037 *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
1038
1039 /* Construct a 1024 element page directory with 4 MB pages for the identity mapped page table used in
1040 * real and protected mode without paging with EPT.
1041 */
1042 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1043 for (unsigned i=0;i<X86_PG_ENTRIES;i++)
1044 {
1045 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1046 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
1047 }
1048
1049 /* We convert it here every time as pci regions could be reconfigured. */
1050 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
1051 AssertRC(rc);
1052 LogRel(("HWACCM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));
1053
1054 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1055 AssertRC(rc);
1056 LogRel(("HWACCM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));
1057 }
1058 else
1059 {
1060 LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1061 pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
1062 pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1063 }
1064
1065 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1066 AssertRC(rc);
1067 if (rc == VINF_SUCCESS)
1068 {
1069 pVM->fHWACCMEnabled = true;
1070 pVM->hwaccm.s.vmx.fEnabled = true;
1071 hwaccmR3DisableRawMode(pVM);
1072
1073 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1074#ifdef VBOX_ENABLE_64_BITS_GUESTS
1075 if (pVM->hwaccm.s.fAllow64BitGuests)
1076 {
1077 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1078 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1079 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1080 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1081 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1082 }
1083 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1084 ? "HWACCM: 32-bit and 64-bit guests supported.\n"
1085 : "HWACCM: 32-bit guests supported.\n"));
1086#else
1087 LogRel(("HWACCM: 32-bit guests supported.\n"));
1088#endif
1089 LogRel(("HWACCM: VMX enabled!\n"));
1090 if (pVM->hwaccm.s.fNestedPaging)
1091 {
1092 LogRel(("HWACCM: Enabled nested paging\n"));
1093 LogRel(("HWACCM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
1094 }
1095 if (pVM->hwaccm.s.vmx.fVPID)
1096 LogRel(("HWACCM: Enabled VPID\n"));
1097
1098 if ( pVM->hwaccm.s.fNestedPaging
1099 || pVM->hwaccm.s.vmx.fVPID)
1100 {
1101 LogRel(("HWACCM: enmFlushPage %d\n", pVM->hwaccm.s.vmx.enmFlushPage));
1102 LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext));
1103 }
1104 }
1105 else
1106 {
1107 LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
1108 LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
1109 pVM->fHWACCMEnabled = false;
1110 }
1111 }
1112 }
1113 else
1114 if (pVM->hwaccm.s.svm.fSupported)
1115 {
1116 Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
1117
1118 if (pVM->hwaccm.s.fInitialized == false)
1119 {
1120 /* Erratum 170 which requires a forced TLB flush for each world switch:
1121 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
1122 *
1123 * All BH-G1/2 and DH-G1/2 models include a fix:
1124 * Athlon X2: 0x6b 1/2
1125 * 0x68 1/2
1126 * Athlon 64: 0x7f 1
1127 * 0x6f 2
1128 * Sempron: 0x7f 1/2
1129 * 0x6f 2
1130 * 0x6c 2
1131 * 0x7c 2
1132 * Turion 64: 0x68 2
1133 *
1134 */
1135 uint32_t u32Dummy;
1136 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
1137 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
1138 u32BaseFamily= (u32Version >> 8) & 0xf;
1139 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
1140 u32Model = ((u32Version >> 4) & 0xf);
1141 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
1142 u32Stepping = u32Version & 0xf;
1143 if ( u32Family == 0xf
1144 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
1145 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
1146 {
1147 LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
1148 }
1149
1150 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
1151 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
1152 LogRel(("HWACCM: AMD-V revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
1153 LogRel(("HWACCM: AMD-V max ASID = %d\n", pVM->hwaccm.s.uMaxASID));
1154 LogRel(("HWACCM: AMD-V features = %X\n", pVM->hwaccm.s.svm.u32Features));
1155
1156 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1157 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING\n"));
1158 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT)
1159 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT\n"));
1160 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK)
1161 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK\n"));
1162 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
1163 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE\n"));
1164 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE)
1165 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE\n"));
1166
1167 /* Only try once. */
1168 pVM->hwaccm.s.fInitialized = true;
1169
1170 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
1171 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
1172
1173 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
1174 AssertRC(rc);
1175 if (rc == VINF_SUCCESS)
1176 {
1177 pVM->fHWACCMEnabled = true;
1178 pVM->hwaccm.s.svm.fEnabled = true;
1179
1180 if (pVM->hwaccm.s.fNestedPaging)
1181 LogRel(("HWACCM: Enabled nested paging\n"));
1182
1183 hwaccmR3DisableRawMode(pVM);
1184 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1185 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1186 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1187#ifdef VBOX_ENABLE_64_BITS_GUESTS
1188 if (pVM->hwaccm.s.fAllow64BitGuests)
1189 {
1190 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1191 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1192 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
1193 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1194 }
1195#endif
1196 LogRel((pVM->hwaccm.s.fAllow64BitGuests
1197 ? "HWACCM: 32-bit and 64-bit guest supported.\n"
1198 : "HWACCM: 32-bit guest supported.\n"));
1199
1200 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
1201 }
1202 else
1203 {
1204 pVM->fHWACCMEnabled = false;
1205 }
1206 }
1207 }
1208 if (pVM->fHWACCMEnabled)
1209 LogRel(("HWACCM: VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
1210 return VINF_SUCCESS;
1211}
1212
1213/**
1214 * Applies relocations to data and code managed by this
1215 * component. This function will be called at init and
1216 * whenever the VMM need to relocate it self inside the GC.
1217 *
1218 * @param pVM The VM.
1219 */
1220VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
1221{
1222 Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1223
1224 /* Fetch the current paging mode during the relocate callback during state loading. */
1225 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1226 {
1227 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1228 {
1229 PVMCPU pVCpu = &pVM->aCpus[i];
1230
1231 pVCpu->hwaccm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1232 Assert(pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
1233 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);
1234 }
1235 }
1236#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1237 if (pVM->fHWACCMEnabled)
1238 {
1239 int rc;
1240
1241 switch(PGMGetHostMode(pVM))
1242 {
1243 case PGMMODE_32_BIT:
1244 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1245 break;
1246
1247 case PGMMODE_PAE:
1248 case PGMMODE_PAE_NX:
1249 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1250 break;
1251
1252 default:
1253 AssertFailed();
1254 break;
1255 }
1256 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
1257 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
1258
1259 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hwaccm.s.pfnSVMGCVMRun64);
1260 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
1261
1262 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestFPU64", &pVM->hwaccm.s.pfnSaveGuestFPU64);
1263 AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
1264
1265 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMSaveGuestDebug64", &pVM->hwaccm.s.pfnSaveGuestDebug64);
1266 AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
1267
1268# ifdef DEBUG
1269 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HWACCMTestSwitcher64", &pVM->hwaccm.s.pfnTest64);
1270 AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
1271# endif
1272 }
1273#endif
1274 return;
1275}
1276
1277/**
1278 * Checks hardware accelerated raw mode is allowed.
1279 *
1280 * @returns boolean
1281 * @param pVM The VM to operate on.
1282 */
1283VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
1284{
1285 return pVM->hwaccm.s.fAllowed;
1286}
1287
1288/**
1289 * Notification callback which is called whenever there is a chance that a CR3
1290 * value might have changed.
1291 *
1292 * This is called by PGM.
1293 *
1294 * @param pVM The VM to operate on.
1295 * @param pVCpu The VMCPU to operate on.
1296 * @param enmShadowMode New shadow paging mode.
1297 * @param enmGuestMode New guest paging mode.
1298 */
1299VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1300{
1301 /* Ignore page mode changes during state loading. */
1302 if (VMR3GetState(pVCpu->pVMR3) == VMSTATE_LOADING)
1303 return;
1304
1305 pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
1306
1307 if ( pVM->hwaccm.s.vmx.fEnabled
1308 && pVM->fHWACCMEnabled)
1309 {
1310 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1311 && enmGuestMode >= PGMMODE_PROTECTED)
1312 {
1313 PCPUMCTX pCtx;
1314
1315 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1316
1317 /* After a real mode switch to protected mode we must force
1318 * CPL to 0. Our real mode emulation had to set it to 3.
1319 */
1320 pCtx->ssHid.Attr.n.u2Dpl = 0;
1321 }
1322 }
1323
1324 if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
1325 {
1326 /* Keep track of paging mode changes. */
1327 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
1328 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
1329
1330 /* Did we miss a change, because all code was executed in the recompiler? */
1331 if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
1332 {
1333 Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
1334 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
1335 }
1336 }
1337
1338 /* Reset the contents of the read cache. */
1339 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1340 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1341 pCache->Read.aFieldVal[j] = 0;
1342}
1343
1344/**
1345 * Terminates the HWACCM.
1346 *
1347 * Termination means cleaning up and freeing all resources,
1348 * the VM it self is at this point powered off or suspended.
1349 *
1350 * @returns VBox status code.
1351 * @param pVM The VM to operate on.
1352 */
1353VMMR3DECL(int) HWACCMR3Term(PVM pVM)
1354{
1355 if (pVM->hwaccm.s.vmx.pRealModeTSS)
1356 {
1357 PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
1358 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
1359 }
1360 HWACCMR3TermCPU(pVM);
1361 return 0;
1362}
1363
1364/**
1365 * Terminates the per-VCPU HWACCM.
1366 *
1367 * Termination means cleaning up and freeing all resources,
1368 * the VM it self is at this point powered off or suspended.
1369 *
1370 * @returns VBox status code.
1371 * @param pVM The VM to operate on.
1372 */
1373VMMR3DECL(int) HWACCMR3TermCPU(PVM pVM)
1374{
1375 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1376 {
1377 PVMCPU pVCpu = &pVM->aCpus[i];
1378
1379#ifdef VBOX_WITH_STATISTICS
1380 if (pVCpu->hwaccm.s.paStatExitReason)
1381 {
1382 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
1383 pVCpu->hwaccm.s.paStatExitReason = NULL;
1384 pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1385 }
1386 if (pVCpu->hwaccm.s.paStatInjectedIrqs)
1387 {
1388 MMHyperFree(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
1389 pVCpu->hwaccm.s.paStatInjectedIrqs = NULL;
1390 pVCpu->hwaccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
1391 }
1392#endif
1393
1394#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1395 memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
1396 pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
1397 pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
1398#endif
1399 }
1400 return 0;
1401}
1402
1403/**
1404 * The VM is being reset.
1405 *
1406 * For the HWACCM component this means that any GDT/LDT/TSS monitors
1407 * needs to be removed.
1408 *
1409 * @param pVM VM handle.
1410 */
1411VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
1412{
1413 LogFlow(("HWACCMR3Reset:\n"));
1414
1415 if (pVM->fHWACCMEnabled)
1416 hwaccmR3DisableRawMode(pVM);
1417
1418 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1419 {
1420 PVMCPU pVCpu = &pVM->aCpus[i];
1421
1422 /* On first entry we'll sync everything. */
1423 pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
1424
1425 pVCpu->hwaccm.s.vmx.cr0_mask = 0;
1426 pVCpu->hwaccm.s.vmx.cr4_mask = 0;
1427
1428 pVCpu->hwaccm.s.fActive = false;
1429 pVCpu->hwaccm.s.Event.fPending = false;
1430
1431 /* Reset state information for real-mode emulation in VT-x. */
1432 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
1433 pVCpu->hwaccm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;
1434 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;
1435
1436 /* Reset the contents of the read cache. */
1437 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
1438 for (unsigned j=0;j<pCache->Read.cValidEntries;j++)
1439 pCache->Read.aFieldVal[j] = 0;
1440
1441#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1442 /* Magic marker for searching in crash dumps. */
1443 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
1444 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
1445#endif
1446 }
1447
1448 /* Clear all patch information. */
1449 pVM->hwaccm.s.pGuestPatchMem = 0;
1450 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1451 pVM->hwaccm.s.cbGuestPatchMem = 0;
1452 pVM->hwaccm.s.svm.cPatches = 0;
1453 pVM->hwaccm.s.svm.PatchTree = 0;
1454 pVM->hwaccm.s.svm.fTPRPatchingActive = false;
1455 ASMMemZero32(pVM->hwaccm.s.svm.aPatches, sizeof(pVM->hwaccm.s.svm.aPatches));
1456}
1457
1458/**
1459 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1460 *
1461 * @returns VBox strict status code.
1462 * @param pVM The VM handle.
1463 * @param pVCpu The VMCPU for the EMT we're being called on.
1464 * @param pvUser Unused
1465 *
1466 */
1467DECLCALLBACK(VBOXSTRICTRC) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
1468{
1469 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1470
1471 /* Only execute the handler on the VCPU the original patch request was issued. */
1472 if (pVCpu->idCpu != idCpu)
1473 return VINF_SUCCESS;
1474
1475 Log(("hwaccmR3RemovePatches\n"));
1476 for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
1477 {
1478 uint8_t szInstr[15];
1479 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
1480 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
1481 int rc;
1482
1483#ifdef LOG_ENABLED
1484 char szOutput[256];
1485
1486 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
1487 if (VBOX_SUCCESS(rc))
1488 Log(("Patched instr: %s\n", szOutput));
1489#endif
1490
1491 /* Check if the instruction is still the same. */
1492 rc = PGMPhysSimpleReadGCPtr(pVCpu, szInstr, pInstrGC, pPatch->cbNewOp);
1493 if (rc != VINF_SUCCESS)
1494 {
1495 Log(("Patched code removed? (rc=%Rrc0\n", rc));
1496 continue; /* swapped out or otherwise removed; skip it. */
1497 }
1498
1499 if (memcmp(szInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
1500 {
1501 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
1502 continue; /* skip it. */
1503 }
1504
1505 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
1506 AssertRC(rc);
1507
1508#ifdef LOG_ENABLED
1509 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, 0, szOutput, sizeof(szOutput), 0);
1510 if (VBOX_SUCCESS(rc))
1511 Log(("Original instr: %s\n", szOutput));
1512#endif
1513 }
1514 pVM->hwaccm.s.svm.cPatches = 0;
1515 pVM->hwaccm.s.svm.PatchTree = 0;
1516 pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
1517 pVM->hwaccm.s.svm.fTPRPatchingActive = false;
1518 return VINF_SUCCESS;
1519}
1520
1521/**
1522 * Enable patching in a VT-x/AMD-V guest
1523 *
1524 * @returns VBox status code.
1525 * @param pVM The VM to operate on.
1526 * @param idCpu VCPU to execute hwaccmR3RemovePatches on
1527 * @param pPatchMem Patch memory range
1528 * @param cbPatchMem Size of the memory range
1529 */
1530int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
1531{
1532 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)idCpu);
1533 AssertRC(rc);
1534
1535 pVM->hwaccm.s.pGuestPatchMem = pPatchMem;
1536 pVM->hwaccm.s.pFreeGuestPatchMem = pPatchMem;
1537 pVM->hwaccm.s.cbGuestPatchMem = cbPatchMem;
1538 return VINF_SUCCESS;
1539}
1540
1541/**
1542 * Enable patching in a VT-x/AMD-V guest
1543 *
1544 * @returns VBox status code.
1545 * @param pVM The VM to operate on.
1546 * @param pPatchMem Patch memory range
1547 * @param cbPatchMem Size of the memory range
1548 */
1549VMMR3DECL(int) HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1550{
1551 Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1552
1553 /* Current TPR patching only applies to AMD cpus.
1554 * Needs to be extended to Intel CPUs without the APIC TPR hardware optimization.
1555 */
1556 if (CPUMGetCPUVendor(pVM) != CPUMCPUVENDOR_AMD)
1557 return VERR_NOT_SUPPORTED;
1558
1559 if (pVM->cCpus > 1)
1560 {
1561 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
1562 int rc = VMR3ReqCallNoWaitU(pVM->pUVM, VMCPUID_ANY_QUEUE,
1563 (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1564 AssertRC(rc);
1565 return rc;
1566 }
1567 return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1568}
1569
1570/**
1571 * Disable patching in a VT-x/AMD-V guest
1572 *
1573 * @returns VBox status code.
1574 * @param pVM The VM to operate on.
1575 * @param pPatchMem Patch memory range
1576 * @param cbPatchMem Size of the memory range
1577 */
1578VMMR3DECL(int) HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1579{
1580 Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
1581
1582 Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
1583 Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
1584
1585 /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
1586 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)VMMGetCpuId(pVM));
1587 AssertRC(rc);
1588
1589 pVM->hwaccm.s.pGuestPatchMem = 0;
1590 pVM->hwaccm.s.pFreeGuestPatchMem = 0;
1591 pVM->hwaccm.s.cbGuestPatchMem = 0;
1592 pVM->hwaccm.s.svm.fTPRPatchingActive = false;
1593 return VINF_SUCCESS;
1594}
1595
1596
1597/**
1598 * Callback to patch a TPR instruction (vmmcall or mov cr8)
1599 *
1600 * @returns VBox strict status code.
1601 * @param pVM The VM handle.
1602 * @param pVCpu The VMCPU for the EMT we're being called on.
1603 * @param pvUser User specified CPU context
1604 *
1605 */
1606DECLCALLBACK(VBOXSTRICTRC) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1607{
1608 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1609 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1610 RTGCPTR oldrip = pCtx->rip;
1611 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1612 unsigned cbOp;
1613
1614 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1615 if (pVCpu->idCpu != idCpu)
1616 return VINF_SUCCESS;
1617
1618 Log(("hwaccmR3ReplaceTprInstr: %RGv\n", pCtx->rip));
1619
1620 /* Two or more VCPUs were racing to patch this instruction. */
1621 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
1622 if (pPatch)
1623 return VINF_SUCCESS;
1624
1625 Assert(pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));
1626
1627 int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1628 AssertRC(rc);
1629 if ( rc == VINF_SUCCESS
1630 && pDis->pCurInstr->opcode == OP_MOV
1631 && cbOp >= 3)
1632 {
1633 uint8_t aVMMCall[3] = { 0xf, 0x1, 0xd9};
1634 uint32_t idx = pVM->hwaccm.s.svm.cPatches;
1635 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
1636
1637 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1638 AssertRC(rc);
1639
1640 pPatch->cbOp = cbOp;
1641
1642 if (pDis->param1.flags == USE_DISPLACEMENT32)
1643 {
1644 /* write. */
1645 if (pDis->param2.flags == USE_REG_GEN32)
1646 {
1647 pPatch->enmType = HWACCMTPRINSTR_WRITE_REG;
1648 pPatch->uSrcOperand = pDis->param2.base.reg_gen;
1649 }
1650 else
1651 {
1652 Assert(pDis->param2.flags == USE_IMMEDIATE32);
1653 pPatch->enmType = HWACCMTPRINSTR_WRITE_IMM;
1654 pPatch->uSrcOperand = pDis->param2.parval;
1655 }
1656 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1657 AssertRC(rc);
1658
1659 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1660 pPatch->cbNewOp = sizeof(aVMMCall);
1661 }
1662 else
1663 {
1664 RTGCPTR oldrip = pCtx->rip;
1665 uint32_t oldcbOp = cbOp;
1666 uint32_t uMmioReg = pDis->param1.base.reg_gen;
1667
1668 /* read */
1669 Assert(pDis->param1.flags == USE_REG_GEN32);
1670
1671 /* Found:
1672 * mov eax, dword [fffe0080] (5 bytes)
1673 * Check if next instruction is:
1674 * shr eax, 4
1675 */
1676 pCtx->rip += cbOp;
1677 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1678 pCtx->rip = oldrip;
1679 if ( rc == VINF_SUCCESS
1680 && pDis->pCurInstr->opcode == OP_SHR
1681 && pDis->param1.flags == USE_REG_GEN32
1682 && pDis->param1.base.reg_gen == uMmioReg
1683 && pDis->param2.flags == USE_IMMEDIATE8
1684 && pDis->param2.parval == 4
1685 && oldcbOp + cbOp < sizeof(pVM->hwaccm.s.svm.aPatches[idx].aOpcode))
1686 {
1687 uint8_t szInstr[15];
1688
1689 /* Replacing two instructions now. */
1690 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, oldcbOp + cbOp);
1691 AssertRC(rc);
1692
1693 pPatch->cbOp = oldcbOp + cbOp;
1694
1695 /* 0xF0, 0x0F, 0x20, 0xC0 = mov eax, cr8 */
1696 szInstr[0] = 0xF0;
1697 szInstr[1] = 0x0F;
1698 szInstr[2] = 0x20;
1699 szInstr[3] = 0xC0 | pDis->param1.base.reg_gen;
1700 for (unsigned i = 4; i < pPatch->cbOp; i++)
1701 szInstr[i] = 0x90; /* nop */
1702
1703 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, szInstr, pPatch->cbOp);
1704 AssertRC(rc);
1705
1706 memcpy(pPatch->aNewOpcode, szInstr, pPatch->cbOp);
1707 pPatch->cbNewOp = pPatch->cbOp;
1708
1709 Log(("Acceptable read/shr candidate!\n"));
1710 pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
1711 }
1712 else
1713 {
1714 pPatch->enmType = HWACCMTPRINSTR_READ;
1715 pPatch->uDstOperand = pDis->param1.base.reg_gen;
1716
1717 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, aVMMCall, sizeof(aVMMCall));
1718 AssertRC(rc);
1719
1720 memcpy(pPatch->aNewOpcode, aVMMCall, sizeof(aVMMCall));
1721 pPatch->cbNewOp = sizeof(aVMMCall);
1722 }
1723 }
1724
1725 pPatch->Core.Key = pCtx->eip;
1726 rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
1727 AssertRC(rc);
1728
1729 pVM->hwaccm.s.svm.cPatches++;
1730 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
1731 return VINF_SUCCESS;
1732 }
1733
1734 /* Save invalid patch, so we will not try again. */
1735 uint32_t idx = pVM->hwaccm.s.svm.cPatches;
1736
1737#ifdef LOG_ENABLED
1738 char szOutput[256];
1739 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1740 if (VBOX_SUCCESS(rc))
1741 Log(("Failed to patch instr: %s\n", szOutput));
1742#endif
1743
1744 pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
1745 pPatch->Core.Key = pCtx->eip;
1746 pPatch->enmType = HWACCMTPRINSTR_INVALID;
1747 rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
1748 AssertRC(rc);
1749 pVM->hwaccm.s.svm.cPatches++;
1750 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
1751 return VINF_SUCCESS;
1752}
1753
1754/**
1755 * Callback to patch a TPR instruction (jump to generated code)
1756 *
1757 * @returns VBox strict status code.
1758 * @param pVM The VM handle.
1759 * @param pVCpu The VMCPU for the EMT we're being called on.
1760 * @param pvUser User specified CPU context
1761 *
1762 */
1763DECLCALLBACK(VBOXSTRICTRC) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1764{
1765 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
1766 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1767 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
1768 unsigned cbOp;
1769 int rc;
1770#ifdef LOG_ENABLED
1771 RTGCPTR pInstr;
1772 char szOutput[256];
1773#endif
1774
1775 /* Only execute the handler on the VCPU the original patch request was issued. (the other CPU(s) might not yet have switched to protected mode) */
1776 if (pVCpu->idCpu != idCpu)
1777 return VINF_SUCCESS;
1778
1779 Assert(pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));
1780
1781 /* Two or more VCPUs were racing to patch this instruction. */
1782 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
1783 if (pPatch)
1784 {
1785 Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
1786 return VINF_SUCCESS;
1787 }
1788
1789 Log(("hwaccmR3PatchTprInstr %RGv\n", pCtx->rip));
1790
1791 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
1792 AssertRC(rc);
1793 if ( rc == VINF_SUCCESS
1794 && pDis->pCurInstr->opcode == OP_MOV
1795 && cbOp >= 5)
1796 {
1797 uint32_t idx = pVM->hwaccm.s.svm.cPatches;
1798 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
1799 uint8_t aPatch[64];
1800 uint32_t off = 0;
1801
1802#ifdef LOG_ENABLED
1803 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1804 if (VBOX_SUCCESS(rc))
1805 Log(("Original instr: %s\n", szOutput));
1806#endif
1807
1808 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
1809 AssertRC(rc);
1810
1811 pPatch->cbOp = cbOp;
1812 pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
1813
1814 if (pDis->param1.flags == USE_DISPLACEMENT32)
1815 {
1816 /*
1817 * TPR write:
1818 *
1819 * push ECX [51]
1820 * push EDX [52]
1821 * push EAX [50]
1822 * xor EDX,EDX [31 D2]
1823 * mov EAX,EAX [89 C0]
1824 * or
1825 * mov EAX,0000000CCh [B8 CC 00 00 00]
1826 * mov ECX,0C0000082h [B9 82 00 00 C0]
1827 * wrmsr [0F 30]
1828 * pop EAX [58]
1829 * pop EDX [5A]
1830 * pop ECX [59]
1831 * jmp return_address [E9 return_address]
1832 *
1833 */
1834 bool fUsesEax = (pDis->param2.flags == USE_REG_GEN32 && pDis->param2.base.reg_gen == USE_REG_EAX);
1835
1836 aPatch[off++] = 0x51; /* push ecx */
1837 aPatch[off++] = 0x52; /* push edx */
1838 if (!fUsesEax)
1839 aPatch[off++] = 0x50; /* push eax */
1840 aPatch[off++] = 0x31; /* xor edx, edx */
1841 aPatch[off++] = 0xD2;
1842 if (pDis->param2.flags == USE_REG_GEN32)
1843 {
1844 if (!fUsesEax)
1845 {
1846 aPatch[off++] = 0x89; /* mov eax, src_reg */
1847 aPatch[off++] = MAKE_MODRM(3, pDis->param2.base.reg_gen, USE_REG_EAX);
1848 }
1849 }
1850 else
1851 {
1852 Assert(pDis->param2.flags == USE_IMMEDIATE32);
1853 aPatch[off++] = 0xB8; /* mov eax, immediate */
1854 *(uint32_t *)&aPatch[off] = pDis->param2.parval;
1855 off += sizeof(uint32_t);
1856 }
1857 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
1858 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
1859 off += sizeof(uint32_t);
1860
1861 aPatch[off++] = 0x0F; /* wrmsr */
1862 aPatch[off++] = 0x30;
1863 if (!fUsesEax)
1864 aPatch[off++] = 0x58; /* pop eax */
1865 aPatch[off++] = 0x5A; /* pop edx */
1866 aPatch[off++] = 0x59; /* pop ecx */
1867 }
1868 else
1869 {
1870 /*
1871 * TPR read:
1872 *
1873 * push ECX [51]
1874 * push EDX [52]
1875 * push EAX [50]
1876 * mov ECX,0C0000082h [B9 82 00 00 C0]
1877 * rdmsr [0F 32]
1878 * mov EAX,EAX [89 C0]
1879 * pop EAX [58]
1880 * pop EDX [5A]
1881 * pop ECX [59]
1882 * jmp return_address [E9 return_address]
1883 *
1884 */
1885 Assert(pDis->param1.flags == USE_REG_GEN32);
1886
1887 if (pDis->param1.base.reg_gen != USE_REG_ECX)
1888 aPatch[off++] = 0x51; /* push ecx */
1889 if (pDis->param1.base.reg_gen != USE_REG_EDX)
1890 aPatch[off++] = 0x52; /* push edx */
1891 if (pDis->param1.base.reg_gen != USE_REG_EAX)
1892 aPatch[off++] = 0x50; /* push eax */
1893
1894 aPatch[off++] = 0x31; /* xor edx, edx */
1895 aPatch[off++] = 0xD2;
1896
1897 aPatch[off++] = 0xB9; /* mov ecx, 0xc0000082 */
1898 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
1899 off += sizeof(uint32_t);
1900
1901 aPatch[off++] = 0x0F; /* rdmsr */
1902 aPatch[off++] = 0x32;
1903
1904 if (pDis->param1.base.reg_gen != USE_REG_EAX)
1905 {
1906 aPatch[off++] = 0x89; /* mov dst_reg, eax */
1907 aPatch[off++] = MAKE_MODRM(3, USE_REG_EAX, pDis->param1.base.reg_gen);
1908 }
1909
1910 if (pDis->param1.base.reg_gen != USE_REG_EAX)
1911 aPatch[off++] = 0x58; /* pop eax */
1912 if (pDis->param1.base.reg_gen != USE_REG_EDX)
1913 aPatch[off++] = 0x5A; /* pop edx */
1914 if (pDis->param1.base.reg_gen != USE_REG_ECX)
1915 aPatch[off++] = 0x59; /* pop ecx */
1916 }
1917 aPatch[off++] = 0xE9; /* jmp return_address */
1918 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
1919 off += sizeof(RTRCUINTPTR);
1920
1921 if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
1922 {
1923 /* Write new code to the patch buffer. */
1924 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
1925 AssertRC(rc);
1926
1927#ifdef LOG_ENABLED
1928 pInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
1929 while (true)
1930 {
1931 uint32_t cb;
1932
1933 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pInstr, 0, szOutput, sizeof(szOutput), &cb);
1934 if (VBOX_SUCCESS(rc))
1935 Log(("Patch instr %s\n", szOutput));
1936
1937 pInstr += cb;
1938
1939 if (pInstr >= pVM->hwaccm.s.pFreeGuestPatchMem + off)
1940 break;
1941 }
1942#endif
1943
1944 pPatch->aNewOpcode[0] = 0xE9;
1945 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
1946
1947 /* Overwrite the TPR instruction with a jump. */
1948 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
1949 AssertRC(rc);
1950
1951#ifdef LOG_ENABLED
1952 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1953 if (VBOX_SUCCESS(rc))
1954 Log(("Jump: %s\n", szOutput));
1955#endif
1956 pVM->hwaccm.s.pFreeGuestPatchMem += off;
1957 pPatch->cbNewOp = 5;
1958
1959 pPatch->Core.Key = pCtx->eip;
1960 rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
1961 AssertRC(rc);
1962
1963 pVM->hwaccm.s.svm.cPatches++;
1964 pVM->hwaccm.s.svm.fTPRPatchingActive = true;
1965 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
1966 return VINF_SUCCESS;
1967 }
1968 else
1969 Log(("Ran out of space in our patch buffer!\n"));
1970 }
1971
1972 /* Save invalid patch, so we will not try again. */
1973 uint32_t idx = pVM->hwaccm.s.svm.cPatches;
1974
1975#ifdef LOG_ENABLED
1976 rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
1977 if (VBOX_SUCCESS(rc))
1978 Log(("Failed to patch instr: %s\n", szOutput));
1979#endif
1980
1981 pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
1982 pPatch->Core.Key = pCtx->eip;
1983 pPatch->enmType = HWACCMTPRINSTR_INVALID;
1984 rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
1985 AssertRC(rc);
1986 pVM->hwaccm.s.svm.cPatches++;
1987 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
1988 return VINF_SUCCESS;
1989}
1990
1991/**
1992 * Attempt to patch TPR mmio instructions
1993 *
1994 * @returns VBox status code.
1995 * @param pVM The VM to operate on.
1996 * @param pVCpu The VM CPU to operate on.
1997 * @param pCtx CPU context
1998 */
1999VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2000{
2001 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, (pVM->hwaccm.s.pGuestPatchMem) ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr, (void *)pVCpu->idCpu);
2002 AssertRC(rc);
2003 return rc;
2004}
2005
2006/**
2007 * Force execution of the current IO code in the recompiler
2008 *
2009 * @returns VBox status code.
2010 * @param pVM The VM to operate on.
2011 * @param pCtx Partial VM execution context
2012 */
2013VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
2014{
2015 PVMCPU pVCpu = VMMGetCpu(pVM);
2016
2017 Assert(pVM->fHWACCMEnabled);
2018 Log(("HWACCMR3EmulateIoBlock\n"));
2019
2020 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
2021 if (HWACCMCanEmulateIoBlockEx(pCtx))
2022 {
2023 Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
2024 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = true;
2025 pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
2026 pVCpu->hwaccm.s.EmulateIoBlock.cr0 = pCtx->cr0;
2027 return VINF_EM_RESCHEDULE_REM;
2028 }
2029 return VINF_SUCCESS;
2030}
2031
2032/**
2033 * Checks if we can currently use hardware accelerated raw mode.
2034 *
2035 * @returns boolean
2036 * @param pVM The VM to operate on.
2037 * @param pCtx Partial VM execution context
2038 */
2039VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
2040{
2041 PVMCPU pVCpu = VMMGetCpu(pVM);
2042
2043 Assert(pVM->fHWACCMEnabled);
2044
2045 /* If we're still executing the IO code, then return false. */
2046 if ( RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
2047 && pCtx->rip < pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
2048 && pCtx->rip > pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
2049 && pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
2050 return false;
2051
2052 pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
2053
2054 /* AMD-V supports real & protected mode with or without paging. */
2055 if (pVM->hwaccm.s.svm.fEnabled)
2056 {
2057 pVCpu->hwaccm.s.fActive = true;
2058 return true;
2059 }
2060
2061 pVCpu->hwaccm.s.fActive = false;
2062
2063 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
2064#ifdef HWACCM_VMX_EMULATE_REALMODE
2065 if (pVM->hwaccm.s.vmx.pRealModeTSS)
2066 {
2067 if (CPUMIsGuestInRealModeEx(pCtx))
2068 {
2069 /* VT-x will not allow high selector bases in v86 mode; fall back to the recompiler in that case.
2070 * The base must also be equal to (sel << 4).
2071 */
2072 if ( ( pCtx->cs != (pCtx->csHid.u64Base >> 4)
2073 && pCtx->csHid.u64Base != 0xffff0000 /* we can deal with the BIOS code as it's also mapped into the lower region. */)
2074 || pCtx->ds != (pCtx->dsHid.u64Base >> 4)
2075 || pCtx->es != (pCtx->esHid.u64Base >> 4)
2076 || pCtx->fs != (pCtx->fsHid.u64Base >> 4)
2077 || pCtx->gs != (pCtx->gsHid.u64Base >> 4)
2078 || pCtx->ss != (pCtx->ssHid.u64Base >> 4))
2079 {
2080 return false;
2081 }
2082 }
2083 else
2084 {
2085 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
2086 /* Verify the requirements for executing code in protected mode. VT-x can't handle the CPU state right after a switch
2087 * from real to protected mode. (all sorts of RPL & DPL assumptions)
2088 */
2089 if ( pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
2090 && enmGuestMode >= PGMMODE_PROTECTED)
2091 {
2092 if ( (pCtx->cs & X86_SEL_RPL)
2093 || (pCtx->ds & X86_SEL_RPL)
2094 || (pCtx->es & X86_SEL_RPL)
2095 || (pCtx->fs & X86_SEL_RPL)
2096 || (pCtx->gs & X86_SEL_RPL)
2097 || (pCtx->ss & X86_SEL_RPL))
2098 {
2099 return false;
2100 }
2101 }
2102 }
2103 }
2104 else
2105#endif /* HWACCM_VMX_EMULATE_REALMODE */
2106 {
2107 if (!CPUMIsGuestInLongModeEx(pCtx))
2108 {
2109 /** @todo This should (probably) be set on every excursion to the REM,
2110 * however it's too risky right now. So, only apply it when we go
2111 * back to REM for real mode execution. (The XP hack below doesn't
2112 * work reliably without this.)
2113 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM. */
2114 pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2115
2116 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
2117 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
2118 return false;
2119
2120 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
2121 /* Windows XP; switch to protected mode; all selectors are marked not present in the
2122 * hidden registers (possible recompiler bug; see load_seg_vm) */
2123 if (pCtx->csHid.Attr.n.u1Present == 0)
2124 return false;
2125 if (pCtx->ssHid.Attr.n.u1Present == 0)
2126 return false;
2127
2128 /* Windows XP: possible same as above, but new recompiler requires new heuristics?
2129 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */
2130 /** @todo This check is actually wrong, it doesn't take the direction of the
2131 * stack segment into account. But, it does the job for now. */
2132 if (pCtx->rsp >= pCtx->ssHid.u32Limit)
2133 return false;
2134#if 0
2135 if ( pCtx->cs >= pCtx->gdtr.cbGdt
2136 || pCtx->ss >= pCtx->gdtr.cbGdt
2137 || pCtx->ds >= pCtx->gdtr.cbGdt
2138 || pCtx->es >= pCtx->gdtr.cbGdt
2139 || pCtx->fs >= pCtx->gdtr.cbGdt
2140 || pCtx->gs >= pCtx->gdtr.cbGdt)
2141 return false;
2142#endif
2143 }
2144 }
2145
2146 if (pVM->hwaccm.s.vmx.fEnabled)
2147 {
2148 uint32_t mask;
2149
2150 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
2151 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
2152 /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
2153 mask &= ~X86_CR0_NE;
2154
2155#ifdef HWACCM_VMX_EMULATE_REALMODE
2156 if (pVM->hwaccm.s.vmx.pRealModeTSS)
2157 {
2158 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
2159 mask &= ~(X86_CR0_PG|X86_CR0_PE);
2160 }
2161 else
2162#endif
2163 {
2164 /* We support protected mode without paging using identity mapping. */
2165 mask &= ~X86_CR0_PG;
2166 }
2167 if ((pCtx->cr0 & mask) != mask)
2168 return false;
2169
2170 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
2171 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
2172 if ((pCtx->cr0 & mask) != 0)
2173 return false;
2174
2175 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
2176 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
2177 mask &= ~X86_CR4_VMXE;
2178 if ((pCtx->cr4 & mask) != mask)
2179 return false;
2180
2181 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
2182 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
2183 if ((pCtx->cr4 & mask) != 0)
2184 return false;
2185
2186 pVCpu->hwaccm.s.fActive = true;
2187 return true;
2188 }
2189
2190 return false;
2191}
2192
2193/**
2194 * Notifcation from EM about a rescheduling into hardware assisted execution
2195 * mode.
2196 *
2197 * @param pVCpu Pointer to the current virtual cpu structure.
2198 */
2199VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
2200{
2201 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2202}
2203
2204/**
2205 * Notifcation from EM about returning from instruction emulation (REM / EM).
2206 *
2207 * @param pVCpu Pointer to the current virtual cpu structure.
2208 */
2209VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
2210{
2211 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
2212}
2213
2214/**
2215 * Checks if we are currently using hardware accelerated raw mode.
2216 *
2217 * @returns boolean
2218 * @param pVCpu The VMCPU to operate on.
2219 */
2220VMMR3DECL(bool) HWACCMR3IsActive(PVMCPU pVCpu)
2221{
2222 return pVCpu->hwaccm.s.fActive;
2223}
2224
2225/**
2226 * Checks if we are currently using nested paging.
2227 *
2228 * @returns boolean
2229 * @param pVM The VM to operate on.
2230 */
2231VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
2232{
2233 return pVM->hwaccm.s.fNestedPaging;
2234}
2235
2236/**
2237 * Checks if we are currently using VPID in VT-x mode.
2238 *
2239 * @returns boolean
2240 * @param pVM The VM to operate on.
2241 */
2242VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
2243{
2244 return pVM->hwaccm.s.vmx.fVPID;
2245}
2246
2247
2248/**
2249 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
2250 *
2251 * @returns boolean
2252 * @param pVM The VM to operate on.
2253 */
2254VMMR3DECL(bool) HWACCMR3IsEventPending(PVMCPU pVCpu)
2255{
2256 return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
2257}
2258
2259/**
2260 * Restart an I/O instruction that was refused in ring-0
2261 *
2262 * @returns Strict VBox status code. Informational status codes other than the one documented
2263 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2264 * @retval VINF_SUCCESS Success.
2265 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2266 * status code must be passed on to EM.
2267 * @retval VERR_NOT_FOUND if no pending I/O instruction.
2268 *
2269 * @param pVM The VM to operate on.
2270 * @param pVCpu The VMCPU to operate on.
2271 * @param pCtx VCPU register context
2272 */
2273VMMR3DECL(VBOXSTRICTRC) HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2274{
2275 HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
2276
2277 pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
2278
2279 if ( pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
2280 || enmType == HWACCMPENDINGIO_INVALID)
2281 return VERR_NOT_FOUND;
2282
2283 VBOXSTRICTRC rcStrict;
2284 switch (enmType)
2285 {
2286 case HWACCMPENDINGIO_PORT_READ:
2287 {
2288 uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
2289 uint32_t u32Val = 0;
2290
2291 rcStrict = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2292 &u32Val,
2293 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2294 if (IOM_SUCCESS(rcStrict))
2295 {
2296 /* Write back to the EAX register. */
2297 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
2298 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2299 }
2300 break;
2301 }
2302
2303 case HWACCMPENDINGIO_PORT_WRITE:
2304 rcStrict = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
2305 pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal,
2306 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
2307 if (IOM_SUCCESS(rcStrict))
2308 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
2309 break;
2310
2311 default:
2312 AssertFailed();
2313 return VERR_INTERNAL_ERROR;
2314 }
2315
2316 return rcStrict;
2317}
2318
2319/**
2320 * Inject an NMI into a running VM (only VCPU 0!)
2321 *
2322 * @returns boolean
2323 * @param pVM The VM to operate on.
2324 */
2325VMMR3DECL(int) HWACCMR3InjectNMI(PVM pVM)
2326{
2327 VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
2328 return VINF_SUCCESS;
2329}
2330
2331/**
2332 * Check fatal VT-x/AMD-V error and produce some meaningful
2333 * log release message.
2334 *
2335 * @param pVM The VM to operate on.
2336 * @param iStatusCode VBox status code
2337 */
2338VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
2339{
2340 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2341 {
2342 switch(iStatusCode)
2343 {
2344 case VERR_VMX_INVALID_VMCS_FIELD:
2345 break;
2346
2347 case VERR_VMX_INVALID_VMCS_PTR:
2348 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
2349 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
2350 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
2351 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
2352 break;
2353
2354 case VERR_VMX_UNABLE_TO_START_VM:
2355 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2356 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2357#if 0 /* @todo dump the current control fields to the release log */
2358 if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
2359 {
2360
2361 }
2362#endif
2363 break;
2364
2365 case VERR_VMX_UNABLE_TO_RESUME_VM:
2366 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
2367 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
2368 break;
2369
2370 case VERR_VMX_INVALID_VMXON_PTR:
2371 break;
2372 }
2373 }
2374}
2375
2376/**
2377 * Execute state save operation.
2378 *
2379 * @returns VBox status code.
2380 * @param pVM VM Handle.
2381 * @param pSSM SSM operation handle.
2382 */
2383static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
2384{
2385 int rc;
2386
2387 Log(("hwaccmR3Save:\n"));
2388
2389 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2390 {
2391 /*
2392 * Save the basic bits - fortunately all the other things can be resynced on load.
2393 */
2394 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
2395 AssertRCReturn(rc, rc);
2396 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
2397 AssertRCReturn(rc, rc);
2398 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
2399 AssertRCReturn(rc, rc);
2400
2401 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
2402 AssertRCReturn(rc, rc);
2403 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
2404 AssertRCReturn(rc, rc);
2405 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
2406 AssertRCReturn(rc, rc);
2407 }
2408#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2409 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
2410 AssertRCReturn(rc, rc);
2411 rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
2412 AssertRCReturn(rc, rc);
2413 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
2414 AssertRCReturn(rc, rc);
2415
2416 /* Store all the guest patch records too. */
2417 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.svm.cPatches);
2418 AssertRCReturn(rc, rc);
2419
2420 for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
2421 {
2422 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
2423
2424 rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
2425 AssertRCReturn(rc, rc);
2426
2427 rc = SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2428 AssertRCReturn(rc, rc);
2429
2430 rc = SSMR3PutU32(pSSM, pPatch->cbOp);
2431 AssertRCReturn(rc, rc);
2432
2433 rc = SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2434 AssertRCReturn(rc, rc);
2435
2436 rc = SSMR3PutU32(pSSM, pPatch->cbNewOp);
2437 AssertRCReturn(rc, rc);
2438
2439 AssertCompileSize(HWACCMTPRINSTR, 4);
2440 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
2441 AssertRCReturn(rc, rc);
2442
2443 rc = SSMR3PutU32(pSSM, pPatch->uSrcOperand);
2444 AssertRCReturn(rc, rc);
2445
2446 rc = SSMR3PutU32(pSSM, pPatch->uDstOperand);
2447 AssertRCReturn(rc, rc);
2448
2449 rc = SSMR3PutU32(pSSM, pPatch->pJumpTarget);
2450 AssertRCReturn(rc, rc);
2451
2452 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
2453 AssertRCReturn(rc, rc);
2454 }
2455#endif
2456 return VINF_SUCCESS;
2457}
2458
2459/**
2460 * Execute state load operation.
2461 *
2462 * @returns VBox status code.
2463 * @param pVM VM Handle.
2464 * @param pSSM SSM operation handle.
2465 * @param uVersion Data layout version.
2466 * @param uPass The data pass.
2467 */
2468static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2469{
2470 int rc;
2471
2472 Log(("hwaccmR3Load:\n"));
2473 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
2474
2475 /*
2476 * Validate version.
2477 */
2478 if ( uVersion != HWACCM_SSM_VERSION
2479 && uVersion != HWACCM_SSM_VERSION_NO_PATCHING
2480 && uVersion != HWACCM_SSM_VERSION_2_0_X)
2481 {
2482 AssertMsgFailed(("hwaccmR3Load: Invalid version uVersion=%d!\n", uVersion));
2483 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2484 }
2485 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2486 {
2487 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
2488 AssertRCReturn(rc, rc);
2489 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
2490 AssertRCReturn(rc, rc);
2491 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
2492 AssertRCReturn(rc, rc);
2493
2494 if (uVersion >= HWACCM_SSM_VERSION_NO_PATCHING)
2495 {
2496 uint32_t val;
2497
2498 rc = SSMR3GetU32(pSSM, &val);
2499 AssertRCReturn(rc, rc);
2500 pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
2501
2502 rc = SSMR3GetU32(pSSM, &val);
2503 AssertRCReturn(rc, rc);
2504 pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
2505
2506 rc = SSMR3GetU32(pSSM, &val);
2507 AssertRCReturn(rc, rc);
2508 pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
2509 }
2510 }
2511#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
2512 if (uVersion > HWACCM_SSM_VERSION_NO_PATCHING)
2513 {
2514 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
2515 AssertRCReturn(rc, rc);
2516 rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
2517 AssertRCReturn(rc, rc);
2518 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
2519 AssertRCReturn(rc, rc);
2520
2521 /* Fetch all TPR patch records. */
2522 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.svm.cPatches);
2523 AssertRCReturn(rc, rc);
2524
2525 for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
2526 {
2527 PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
2528
2529 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
2530 AssertRCReturn(rc, rc);
2531
2532 rc = SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
2533 AssertRCReturn(rc, rc);
2534
2535 rc = SSMR3GetU32(pSSM, &pPatch->cbOp);
2536 AssertRCReturn(rc, rc);
2537
2538 rc = SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
2539 AssertRCReturn(rc, rc);
2540
2541 rc = SSMR3GetU32(pSSM, &pPatch->cbNewOp);
2542 AssertRCReturn(rc, rc);
2543
2544 rc = SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
2545 AssertRCReturn(rc, rc);
2546
2547 if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
2548 pVM->hwaccm.s.svm.fTPRPatchingActive = true;
2549
2550 Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.svm.fTPRPatchingActive == false);
2551
2552 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
2553 AssertRCReturn(rc, rc);
2554
2555 rc = SSMR3GetU32(pSSM, &pPatch->uDstOperand);
2556 AssertRCReturn(rc, rc);
2557
2558 rc = SSMR3GetU32(pSSM, &pPatch->cFaults);
2559 AssertRCReturn(rc, rc);
2560
2561 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
2562 AssertRCReturn(rc, rc);
2563
2564 Log(("hwaccmR3Load: patch %d\n", i));
2565 Log(("Key = %x\n", pPatch->Core.Key));
2566 Log(("cbOp = %d\n", pPatch->cbOp));
2567 Log(("cbNewOp = %d\n", pPatch->cbNewOp));
2568 Log(("type = %d\n", pPatch->enmType));
2569 Log(("srcop = %d\n", pPatch->uSrcOperand));
2570 Log(("dstop = %d\n", pPatch->uDstOperand));
2571 Log(("cFaults = %d\n", pPatch->cFaults));
2572 Log(("target = %x\n", pPatch->pJumpTarget));
2573 rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
2574 AssertRC(rc);
2575 }
2576 }
2577#endif
2578 return VINF_SUCCESS;
2579}
2580
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette