VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCM.cpp@ 9375

Last change on this file since 9375 was 9375, checked in by vboxsync, 17 years ago

Use another define to actually enable 64 bits guest support.
Long mode implies PAE.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 38.2 KB
Line 
1/* $Id: HWACCM.cpp 9375 2008-06-04 08:42:37Z vboxsync $ */
2/** @file
3 * HWACCM - Intel/AMD VM Hardware Support Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_HWACCM
26#include <VBox/cpum.h>
27#include <VBox/stam.h>
28#include <VBox/mm.h>
29#include <VBox/pdm.h>
30#include <VBox/pgm.h>
31#include <VBox/trpm.h>
32#include <VBox/dbgf.h>
33#include <VBox/hwacc_vmx.h>
34#include <VBox/hwacc_svm.h>
35#include "HWACCMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/err.h>
38#include <VBox/param.h>
39#include <VBox/patm.h>
40#include <VBox/csam.h>
41#include <VBox/selm.h>
42
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47#include <iprt/thread.h>
48
49/* Uncomment to enable experimental nested paging. */
50//#define VBOX_WITH_NESTED_PAGING
51/* Uncomment to enable 64 bits guest support. */
52//#define VBOX_ENABLE_64_BITS_GUESTS
53
54/*******************************************************************************
55* Internal Functions *
56*******************************************************************************/
57static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
58static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
59
60
61/**
62 * Initializes the HWACCM.
63 *
64 * @returns VBox status code.
65 * @param pVM The VM to operate on.
66 */
67HWACCMR3DECL(int) HWACCMR3Init(PVM pVM)
68{
69 LogFlow(("HWACCMR3Init\n"));
70
71 /*
72 * Assert alignment and sizes.
73 */
74 AssertRelease(!(RT_OFFSETOF(VM, hwaccm.s) & 31));
75 AssertRelease(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
76
77 /* Some structure checks. */
78 AssertMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3)));
79 AssertMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));
80 AssertMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));
81 AssertMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));
82
83 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));
84 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4)));
85 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6)));
86 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7)));
87 AssertMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9)));
88 AssertMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10)));
89 AssertMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));
90
91
92 /*
93 * Register the saved state data unit.
94 */
95 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
96 NULL, hwaccmR3Save, NULL,
97 NULL, hwaccmR3Load, NULL);
98 if (VBOX_FAILURE(rc))
99 return rc;
100
101 /* Misc initialisation. */
102 pVM->hwaccm.s.vmx.fSupported = false;
103 pVM->hwaccm.s.svm.fSupported = false;
104 pVM->hwaccm.s.vmx.fEnabled = false;
105 pVM->hwaccm.s.svm.fEnabled = false;
106
107 pVM->hwaccm.s.fActive = false;
108 pVM->hwaccm.s.fNestedPaging = false;
109
110 /* On first entry we'll sync everything. */
111 pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
112
113 pVM->hwaccm.s.vmx.cr0_mask = 0;
114 pVM->hwaccm.s.vmx.cr4_mask = 0;
115
116 /*
117 * Statistics.
118 */
119 STAM_REG(pVM, &pVM->hwaccm.s.StatEntry, STAMTYPE_PROFILE, "/PROF/HWACCM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry");
120 STAM_REG(pVM, &pVM->hwaccm.s.StatExit, STAMTYPE_PROFILE, "/PROF/HWACCM/SwitchFromGC", STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit");
121 STAM_REG(pVM, &pVM->hwaccm.s.StatInGC, STAMTYPE_PROFILE, "/PROF/HWACCM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch");
122
123 STAM_REG(pVM, &pVM->hwaccm.s.StatExitShadowNM, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Shadow/#NM", STAMUNIT_OCCURENCES, "Nr of occurances");
124 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestNM, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#NM", STAMUNIT_OCCURENCES, "Nr of occurances");
125 STAM_REG(pVM, &pVM->hwaccm.s.StatExitShadowPF, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Shadow/#PF", STAMUNIT_OCCURENCES, "Nr of occurances");
126 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestPF, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#PF", STAMUNIT_OCCURENCES, "Nr of occurances");
127 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestUD, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#UD", STAMUNIT_OCCURENCES, "Nr of occurances");
128 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestSS, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#SS", STAMUNIT_OCCURENCES, "Nr of occurances");
129 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestNP, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#NP", STAMUNIT_OCCURENCES, "Nr of occurances");
130 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestGP, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#GP", STAMUNIT_OCCURENCES, "Nr of occurances");
131 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestMF, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#MF", STAMUNIT_OCCURENCES, "Nr of occurances");
132 STAM_REG(pVM, &pVM->hwaccm.s.StatExitGuestDE, STAMTYPE_COUNTER, "/HWACCM/Exit/Trap/Guest/#DE", STAMUNIT_OCCURENCES, "Nr of occurances");
133 STAM_REG(pVM, &pVM->hwaccm.s.StatExitInvpg, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Invlpg", STAMUNIT_OCCURENCES, "Nr of occurances");
134 STAM_REG(pVM, &pVM->hwaccm.s.StatExitInvd, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Invd", STAMUNIT_OCCURENCES, "Nr of occurances");
135 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCpuid, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Cpuid", STAMUNIT_OCCURENCES, "Nr of occurances");
136 STAM_REG(pVM, &pVM->hwaccm.s.StatExitRdtsc, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/Rdtsc", STAMUNIT_OCCURENCES, "Nr of occurances");
137 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCRxWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/CRx/Write", STAMUNIT_OCCURENCES, "Nr of occurances");
138 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCRxRead, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/CRx/Read", STAMUNIT_OCCURENCES, "Nr of occurances");
139 STAM_REG(pVM, &pVM->hwaccm.s.StatExitDRxWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/DRx/Write", STAMUNIT_OCCURENCES, "Nr of occurances");
140 STAM_REG(pVM, &pVM->hwaccm.s.StatExitDRxRead, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/DRx/Read", STAMUNIT_OCCURENCES, "Nr of occurances");
141 STAM_REG(pVM, &pVM->hwaccm.s.StatExitCLTS, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/CLTS", STAMUNIT_OCCURENCES, "Nr of occurances");
142 STAM_REG(pVM, &pVM->hwaccm.s.StatExitLMSW, STAMTYPE_COUNTER, "/HWACCM/Exit/Instr/LMSW", STAMUNIT_OCCURENCES, "Nr of occurances");
143 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIOWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/Write", STAMUNIT_OCCURENCES, "Nr of occurances");
144 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIORead, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/Read", STAMUNIT_OCCURENCES, "Nr of occurances");
145 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIOStringWrite, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/WriteString", STAMUNIT_OCCURENCES, "Nr of occurances");
146 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIOStringRead, STAMTYPE_COUNTER, "/HWACCM/Exit/IO/ReadString", STAMUNIT_OCCURENCES, "Nr of occurances");
147 STAM_REG(pVM, &pVM->hwaccm.s.StatExitIrqWindow, STAMTYPE_COUNTER, "/HWACCM/Exit/GuestIrq/Pending", STAMUNIT_OCCURENCES, "Nr of occurances");
148 STAM_REG(pVM, &pVM->hwaccm.s.StatExitMaxResume, STAMTYPE_COUNTER, "/HWACCM/Exit/Safety/MaxResume", STAMUNIT_OCCURENCES, "Nr of occurances");
149
150 STAM_REG(pVM, &pVM->hwaccm.s.StatSwitchGuestIrq,STAMTYPE_COUNTER, "/HWACCM/Switch/IrqPending", STAMUNIT_OCCURENCES, "Nr of occurances");
151 STAM_REG(pVM, &pVM->hwaccm.s.StatSwitchToR3, STAMTYPE_COUNTER, "/HWACCM/Switch/ToR3", STAMUNIT_OCCURENCES, "Nr of occurances");
152
153 STAM_REG(pVM, &pVM->hwaccm.s.StatIntInject, STAMTYPE_COUNTER, "/HWACCM/Irq/Inject", STAMUNIT_OCCURENCES, "Nr of occurances");
154 STAM_REG(pVM, &pVM->hwaccm.s.StatIntReinject, STAMTYPE_COUNTER, "/HWACCM/Irq/Reinject", STAMUNIT_OCCURENCES, "Nr of occurances");
155 STAM_REG(pVM, &pVM->hwaccm.s.StatPendingHostIrq,STAMTYPE_COUNTER, "/HWACCM/Irq/PendingOnHost", STAMUNIT_OCCURENCES, "Nr of occurances");
156
157 STAM_REG(pVM, &pVM->hwaccm.s.StatFlushPageManual, STAMTYPE_COUNTER, "/HWACCM/Flush/Page/Virt/Manual", STAMUNIT_OCCURENCES, "Nr of occurances");
158 STAM_REG(pVM, &pVM->hwaccm.s.StatFlushPhysPageManual, STAMTYPE_COUNTER, "/HWACCM/Flush/Page/Phys/Manual", STAMUNIT_OCCURENCES, "Nr of occurances");
159 STAM_REG(pVM, &pVM->hwaccm.s.StatFlushTLBManual, STAMTYPE_COUNTER, "/HWACCM/Flush/TLB/Manual", STAMUNIT_OCCURENCES, "Nr of occurances");
160 STAM_REG(pVM, &pVM->hwaccm.s.StatFlushTLBCRxChange, STAMTYPE_COUNTER, "/HWACCM/Flush/TLB/CRx", STAMUNIT_OCCURENCES, "Nr of occurances");
161 STAM_REG(pVM, &pVM->hwaccm.s.StatFlushPageInvlpg, STAMTYPE_COUNTER, "/HWACCM/Flush/Page/Invlpg", STAMUNIT_OCCURENCES, "Nr of occurances");
162 STAM_REG(pVM, &pVM->hwaccm.s.StatFlushTLBWorldSwitch, STAMTYPE_COUNTER, "/HWACCM/Flush/TLB/Switch", STAMUNIT_OCCURENCES, "Nr of occurances");
163 STAM_REG(pVM, &pVM->hwaccm.s.StatNoFlushTLBWorldSwitch, STAMTYPE_COUNTER, "/HWACCM/Flush/TLB/Skipped", STAMUNIT_OCCURENCES, "Nr of occurances");
164 STAM_REG(pVM, &pVM->hwaccm.s.StatFlushASID, STAMTYPE_COUNTER, "/HWACCM/Flush/TLB/ASID", STAMUNIT_OCCURENCES, "Nr of occurances");
165
166 STAM_REG(pVM, &pVM->hwaccm.s.StatTSCOffset, STAMTYPE_COUNTER, "/HWACCM/TSC/Offset", STAMUNIT_OCCURENCES, "Nr of occurances");
167 STAM_REG(pVM, &pVM->hwaccm.s.StatTSCIntercept, STAMTYPE_COUNTER, "/HWACCM/TSC/Intercept", STAMUNIT_OCCURENCES, "Nr of occurances");
168
169 pVM->hwaccm.s.pStatExitReason = 0;
170
171#ifdef VBOX_WITH_STATISTICS
172 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVM->hwaccm.s.pStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVM->hwaccm.s.pStatExitReason);
173 AssertRC(rc);
174 if (VBOX_SUCCESS(rc))
175 {
176 for (int i=0;i<MAX_EXITREASON_STAT;i++)
177 {
178 char szName[64];
179 RTStrPrintf(szName, sizeof(szName), "/HWACCM/Exit/Reason/%02x", i);
180 int rc = STAMR3Register(pVM, &pVM->hwaccm.s.pStatExitReason[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "Exit reason");
181 AssertRC(rc);
182 }
183 int rc = STAMR3Register(pVM, &pVM->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, "/HWACCM/Exit/Reason/#NPF", STAMUNIT_OCCURENCES, "Exit reason");
184 AssertRC(rc);
185 }
186 pVM->hwaccm.s.pStatExitReasonR0 = MMHyperR3ToR0(pVM, pVM->hwaccm.s.pStatExitReason);
187 Assert(pVM->hwaccm.s.pStatExitReasonR0);
188#endif
189
190 /* Disabled by default. */
191 pVM->fHWACCMEnabled = false;
192
193 /* HWACCM support must be explicitely enabled in the configuration file. */
194 pVM->hwaccm.s.fAllowed = false;
195 CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "HWVirtExt/"), "Enabled", &pVM->hwaccm.s.fAllowed);
196
197 return VINF_SUCCESS;
198}
199
200
201/**
202 * Turns off normal raw mode features
203 *
204 * @param pVM The VM to operate on.
205 */
206static void hwaccmr3DisableRawMode(PVM pVM)
207{
208 /* Disable PATM & CSAM. */
209 PATMR3AllowPatching(pVM, false);
210 CSAMDisableScanning(pVM);
211
212 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
213 SELMR3DisableMonitoring(pVM);
214 TRPMR3DisableMonitoring(pVM);
215
216 /* The hidden selector registers are now valid. */
217 CPUMSetHiddenSelRegsValid(pVM, true);
218
219 /* Disable the switcher code (safety precaution). */
220 VMMR3DisableSwitcher(pVM);
221
222 /* Disable mapping of the hypervisor into the shadow page table. */
223 PGMR3ChangeShwPDMappings(pVM, false);
224
225 /* Disable the switcher */
226 VMMR3DisableSwitcher(pVM);
227
228 if (pVM->hwaccm.s.fNestedPaging)
229 {
230 /* Reinit the paging mode to force the new shadow mode. */
231 PGMR3ChangeMode(pVM, PGMMODE_REAL);
232 }
233}
234
235/**
236 * Initialize VT-x or AMD-V.
237 *
238 * @returns VBox status code.
239 * @param pVM The VM handle.
240 */
241HWACCMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
242{
243 int rc;
244
245 if ( !pVM->hwaccm.s.vmx.fSupported
246 && !pVM->hwaccm.s.svm.fSupported)
247 {
248 LogRel(("HWACCM: No VMX or SVM CPU extension found. Reason %Vrc\n", pVM->hwaccm.s.lLastError));
249 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%VX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
250 return VINF_SUCCESS;
251 }
252
253 /*
254 * Note that we have a global setting for VT-x/AMD-V usage. VMX root mode changes the way the CPU operates. Our 64 bits switcher will trap
255 * because it turns off paging, which is not allowed in VMX root mode.
256 *
257 * To simplify matters we'll just force all running VMs to either use raw or hwaccm mode. No mixing allowed.
258 *
259 */
260 /* If we enabled or disabled hwaccm mode, then it can't be changed until all the VMs are shutdown. */
261 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_ENABLE, (pVM->hwaccm.s.fAllowed) ? HWACCMSTATE_ENABLED : HWACCMSTATE_DISABLED, NULL);
262 if (VBOX_FAILURE(rc))
263 {
264 LogRel(("HWACCMR3InitFinalize: SUPCallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Vrc\n", rc));
265 LogRel(("HWACCMR3InitFinalize: disallowed %s of HWACCM\n", pVM->hwaccm.s.fAllowed ? "enabling" : "disabling"));
266 /* Invert the selection */
267 pVM->hwaccm.s.fAllowed ^= 1;
268 LogRel(("HWACCMR3InitFinalize: new HWACCM status = %s\n", pVM->hwaccm.s.fAllowed ? "enabled" : "disabled"));
269
270 if (pVM->hwaccm.s.fAllowed)
271 {
272 if (pVM->hwaccm.s.vmx.fSupported)
273 VMSetRuntimeError(pVM, false, "HwAccmModeChangeDisallowed", "An active VM already uses Intel VT-x hardware acceleration. It is not allowed to simultaneously use software virtualization, therefore this VM will be run using VT-x as well.\n");
274 else
275 VMSetRuntimeError(pVM, false, "HwAccmModeChangeDisallowed", "An active VM already uses AMD-V hardware acceleration. It is not allowed to simultaneously use software virtualization, therefore this VM will be run using AMD-V as well.\n");
276 }
277 else
278 VMSetRuntimeError(pVM, false, "HwAccmModeChangeDisallowed", "An active VM already uses software virtualization. It is not allowed to simultaneously use VT-x or AMD-V, therefore this VM will be run using software virtualization as well.\n");
279 }
280
281 if (pVM->hwaccm.s.fAllowed == false)
282 return VINF_SUCCESS; /* disabled */
283
284 Assert(!pVM->fHWACCMEnabled);
285
286 if (pVM->hwaccm.s.vmx.fSupported)
287 {
288 Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
289
290 if ( pVM->hwaccm.s.fInitialized == false
291 && pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
292 {
293 uint64_t val;
294
295 LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
296 LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL = %VX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
297 LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
298 LogRel(("HWACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
299 LogRel(("HWACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
300 LogRel(("HWACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
301 LogRel(("HWACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
302 LogRel(("HWACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
303
304 LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls));
305 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls >> 32ULL;
306 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
307 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
308 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
309 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
310 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls;
311 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
312 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
313 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
314 LogRel(("HWACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
315
316 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls));
317 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls >> 32ULL;
318 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
319 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
320 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
321 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
322 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
323 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
324 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
325 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
326 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
327 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
328 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
329 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
330 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
331 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
332 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
333 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
334 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
335 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
336 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
337 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
338 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
339 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
340 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
341 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
342 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
343 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
344 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
345 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
346 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
347 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
348 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
349 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
350 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls;
351 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
352 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
353 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
354 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
355 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
356 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
357 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
358 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
359 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
360 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
361 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
362 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
363 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
364 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
365 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
366 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
367 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
368 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
369 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
370 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
371 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
372 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
373 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
374 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
375 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
376 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
377 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
378 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
379 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
380 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
381 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
382 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
383
384 LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry));
385 val = pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL;
386 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
387 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
388 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
389 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
390 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
391 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
392 val = pVM->hwaccm.s.vmx.msr.vmx_entry;
393 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
394 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
395 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
396 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
397 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
398 LogRel(("HWACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
399
400 LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit));
401 val = pVM->hwaccm.s.vmx.msr.vmx_exit >> 32ULL;
402 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
403 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
404 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
405 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
406 val = pVM->hwaccm.s.vmx.msr.vmx_exit;
407 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
408 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
409 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
410 LogRel(("HWACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
411
412 LogRel(("HWACCM: MSR_IA32_VMX_MISC = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
413 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
414 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
415 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
416 LogRel(("HWACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
417
418 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
419 LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
420 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
421 LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1 = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
422 LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM = %VX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
423
424 /* Only try once. */
425 pVM->hwaccm.s.fInitialized = true;
426
427 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
428 AssertRC(rc);
429 if (rc == VINF_SUCCESS)
430 {
431 pVM->fHWACCMEnabled = true;
432 pVM->hwaccm.s.vmx.fEnabled = true;
433 hwaccmr3DisableRawMode(pVM);
434
435 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
436#ifdef VBOX_ENABLE_64_BITS_GUESTS
437 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
438 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
439 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
440 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
441 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
442#endif
443 LogRel(("HWACCM: VMX enabled!\n"));
444 }
445 else
446 {
447 LogRel(("HWACCM: VMX setup failed with rc=%Vrc!\n", rc));
448 LogRel(("HWACCM: Last instruction error %x\n", pVM->hwaccm.s.vmx.ulLastInstrError));
449 pVM->fHWACCMEnabled = false;
450 }
451 }
452 }
453 else
454 if (pVM->hwaccm.s.svm.fSupported)
455 {
456 Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
457
458 if (pVM->hwaccm.s.fInitialized == false)
459 {
460 /* Erratum 170 which requires a forced TLB flush for each world switch:
461 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
462 *
463 * All BH-G1/2 and DH-G1/2 models include a fix:
464 * Athlon X2: 0x6b 1/2
465 * 0x68 1/2
466 * Athlon 64: 0x7f 1
467 * 0x6f 2
468 * Sempron: 0x7f 1/2
469 * 0x6f 2
470 * 0x6c 2
471 * 0x7c 2
472 * Turion 64: 0x68 2
473 *
474 */
475 uint32_t u32Dummy;
476 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
477 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
478 u32BaseFamily= (u32Version >> 8) & 0xf;
479 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
480 u32Model = ((u32Version >> 4) & 0xf);
481 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
482 u32Stepping = u32Version & 0xf;
483 if ( u32Family == 0xf
484 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
485 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
486 {
487 LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
488 }
489
490 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %VX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
491 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %VX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
492 LogRel(("HWACCM: SVM revision = %X\n", pVM->hwaccm.s.svm.u32Rev));
493 LogRel(("HWACCM: SVM max ASID = %d\n", pVM->hwaccm.s.svm.u32MaxASID));
494 LogRel(("HWACCM: SVM features = %X\n", pVM->hwaccm.s.svm.u32Features));
495
496 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
497 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING\n"));
498 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT)
499 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_LBR_VIRT\n"));
500 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK)
501 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SVM_LOCK\n"));
502 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
503 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE\n"));
504 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE)
505 LogRel(("HWACCM: AMD_CPUID_SVM_FEATURE_EDX_SSE_3_5_DISABLE\n"));
506
507 /* Only try once. */
508 pVM->hwaccm.s.fInitialized = true;
509
510#ifdef VBOX_WITH_NESTED_PAGING
511 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
512 pVM->hwaccm.s.fNestedPaging = true;
513#endif
514
515 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
516 AssertRC(rc);
517 if (rc == VINF_SUCCESS)
518 {
519 pVM->fHWACCMEnabled = true;
520 pVM->hwaccm.s.svm.fEnabled = true;
521
522 hwaccmr3DisableRawMode(pVM);
523 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
524 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
525#ifdef VBOX_ENABLE_64_BITS_GUESTS
526 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
527 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
528 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
529 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
530#endif
531 }
532 else
533 {
534 pVM->fHWACCMEnabled = false;
535 }
536 }
537 }
538 return VINF_SUCCESS;
539}
540
541/**
542 * Applies relocations to data and code managed by this
543 * component. This function will be called at init and
544 * whenever the VMM need to relocate it self inside the GC.
545 *
546 * @param pVM The VM.
547 */
548HWACCMR3DECL(void) HWACCMR3Relocate(PVM pVM)
549{
550 Log(("HWACCMR3Relocate to %VGv\n", MMHyperGetArea(pVM, 0)));
551 return;
552}
553
554
555/**
556 * Checks hardware accelerated raw mode is allowed.
557 *
558 * @returns boolean
559 * @param pVM The VM to operate on.
560 */
561HWACCMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
562{
563 return pVM->hwaccm.s.fAllowed;
564}
565
566
567/**
568 * Notification callback which is called whenever there is a chance that a CR3
569 * value might have changed.
570 * This is called by PGM.
571 *
572 * @param pVM The VM to operate on.
573 * @param enmShadowMode New paging mode.
574 */
575HWACCMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PGMMODE enmShadowMode)
576{
577 pVM->hwaccm.s.enmShadowMode = enmShadowMode;
578}
579
580/**
581 * Terminates the HWACCM.
582 *
583 * Termination means cleaning up and freeing all resources,
584 * the VM it self is at this point powered off or suspended.
585 *
586 * @returns VBox status code.
587 * @param pVM The VM to operate on.
588 */
589HWACCMR3DECL(int) HWACCMR3Term(PVM pVM)
590{
591 if (pVM->hwaccm.s.pStatExitReason)
592 {
593 MMHyperFree(pVM, pVM->hwaccm.s.pStatExitReason);
594 pVM->hwaccm.s.pStatExitReason = 0;
595 }
596 return 0;
597}
598
599
600/**
601 * The VM is being reset.
602 *
603 * For the HWACCM component this means that any GDT/LDT/TSS monitors
604 * needs to be removed.
605 *
606 * @param pVM VM handle.
607 */
608HWACCMR3DECL(void) HWACCMR3Reset(PVM pVM)
609{
610 LogFlow(("HWACCMR3Reset:\n"));
611
612 if (pVM->fHWACCMEnabled)
613 hwaccmr3DisableRawMode(pVM);
614
615 /* On first entry we'll sync everything. */
616 pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
617
618 pVM->hwaccm.s.vmx.cr0_mask = 0;
619 pVM->hwaccm.s.vmx.cr4_mask = 0;
620
621 pVM->hwaccm.s.Event.fPending = false;
622}
623
624/**
625 * Checks if we can currently use hardware accelerated raw mode.
626 *
627 * @returns boolean
628 * @param pVM The VM to operate on.
629 * @param pCtx Partial VM execution context
630 */
631HWACCMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
632{
633 Assert(pVM->fHWACCMEnabled);
634
635 /* AMD SVM supports real & protected mode with or without paging. */
636 if (pVM->hwaccm.s.svm.fEnabled)
637 {
638 pVM->hwaccm.s.fActive = true;
639 return true;
640 }
641
642 /* @todo we can support real-mode by using v86 and protected mode without paging with identity mapped pages.
643 * (but do we really care?)
644 */
645
646 pVM->hwaccm.s.fActive = false;
647
648 /** @note The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
649
650#ifndef HWACCM_VMX_EMULATE_ALL
651 /* Too early for VMX. */
652 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr == 0)
653 return false;
654
655 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */
656 if (pCtx->csHid.Attr.n.u1Present == 0)
657 return false;
658 if (pCtx->ssHid.Attr.n.u1Present == 0)
659 return false;
660#endif
661
662 if (pVM->hwaccm.s.vmx.fEnabled)
663 {
664 uint32_t mask;
665
666 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
667 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
668 /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
669 mask &= ~X86_CR0_NE;
670#ifdef HWACCM_VMX_EMULATE_ALL
671 /* Note: We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
672 mask &= ~(X86_CR0_PG|X86_CR0_PE);
673#endif
674 if ((pCtx->cr0 & mask) != mask)
675 return false;
676
677 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
678 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
679 if ((pCtx->cr0 & mask) != 0)
680 return false;
681
682 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
683 mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
684 mask &= ~X86_CR4_VMXE;
685 if ((pCtx->cr4 & mask) != mask)
686 return false;
687
688 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
689 mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
690 if ((pCtx->cr4 & mask) != 0)
691 return false;
692
693 pVM->hwaccm.s.fActive = true;
694 return true;
695 }
696
697 return false;
698}
699
700/**
701 * Checks if we are currently using hardware accelerated raw mode.
702 *
703 * @returns boolean
704 * @param pVM The VM to operate on.
705 */
706HWACCMR3DECL(bool) HWACCMR3IsActive(PVM pVM)
707{
708 return pVM->hwaccm.s.fActive;
709}
710
711/**
712 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
713 *
714 * @returns boolean
715 * @param pVM The VM to operate on.
716 */
717HWACCMR3DECL(bool) HWACCMR3IsEventPending(PVM pVM)
718{
719 return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.Event.fPending;
720}
721
722/**
723 * Execute state save operation.
724 *
725 * @returns VBox status code.
726 * @param pVM VM Handle.
727 * @param pSSM SSM operation handle.
728 */
729static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
730{
731 int rc;
732
733 Log(("hwaccmR3Save:\n"));
734
735 /*
736 * Save the basic bits - fortunately all the other things can be resynced on load.
737 */
738 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.fPending);
739 AssertRCReturn(rc, rc);
740 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.errCode);
741 AssertRCReturn(rc, rc);
742 rc = SSMR3PutU64(pSSM, pVM->hwaccm.s.Event.intInfo);
743 AssertRCReturn(rc, rc);
744
745 return VINF_SUCCESS;
746}
747
748
749/**
750 * Execute state load operation.
751 *
752 * @returns VBox status code.
753 * @param pVM VM Handle.
754 * @param pSSM SSM operation handle.
755 * @param u32Version Data layout version.
756 */
757static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
758{
759 int rc;
760
761 Log(("hwaccmR3Load:\n"));
762
763 /*
764 * Validate version.
765 */
766 if (u32Version != HWACCM_SSM_VERSION)
767 {
768 Log(("hwaccmR3Load: Invalid version u32Version=%d!\n", u32Version));
769 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
770 }
771 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.fPending);
772 AssertRCReturn(rc, rc);
773 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.errCode);
774 AssertRCReturn(rc, rc);
775 rc = SSMR3GetU64(pSSM, &pVM->hwaccm.s.Event.intInfo);
776 AssertRCReturn(rc, rc);
777
778 return VINF_SUCCESS;
779}
780
781
782
783
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette