VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 10687

Last change on this file since 10687 was 10687, checked in by vboxsync, 17 years ago

Save the FPU control word and MXCSR on entry and restore them afterwards. (VT-x & AMD-V)
Security measure so the guest can't cause fpu/sse exceptions as we no longer restore the entire
host fpu state.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 43.2 KB
Line 
1/* $Id: HWACCMR0.cpp 10687 2008-07-16 09:22:28Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/memobj.h>
44#include <iprt/cpuset.h>
45#include "HWVMXR0.h"
46#include "HWSVMR0.h"
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
52static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
55
56/*******************************************************************************
57* Local Variables *
58*******************************************************************************/
59
60static struct
61{
62 HWACCM_CPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
63
64 /** Ring 0 handlers for VT-x and AMD-V. */
65 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PHWACCM_CPUINFO pCpu));
66 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM));
67 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM));
68 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, CPUMCTX *pCtx));
69 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, CPUMCTX *pCtx));
70 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
71 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
72 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM));
73 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM));
74 DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM));
75
76 struct
77 {
78 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
79 bool fSupported;
80
81 /** Host CR4 value (set by ring-0 VMX init) */
82 uint64_t hostCR4;
83
84 /** VMX MSR values */
85 struct
86 {
87 uint64_t feature_ctrl;
88 uint64_t vmx_basic_info;
89 VMX_CAPABILITY vmx_pin_ctls;
90 VMX_CAPABILITY vmx_proc_ctls;
91 VMX_CAPABILITY vmx_exit;
92 VMX_CAPABILITY vmx_entry;
93 uint64_t vmx_misc;
94 uint64_t vmx_cr0_fixed0;
95 uint64_t vmx_cr0_fixed1;
96 uint64_t vmx_cr4_fixed0;
97 uint64_t vmx_cr4_fixed1;
98 uint64_t vmx_vmcs_enum;
99 } msr;
100 /* Last instruction error */
101 uint32_t ulLastInstrError;
102 } vmx;
103 struct
104 {
105 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
106 bool fSupported;
107
108 /** SVM revision. */
109 uint32_t u32Rev;
110
111 /** Maximum ASID allowed. */
112 uint32_t u32MaxASID;
113
114 /** SVM feature bits from cpuid 0x8000000a */
115 uint32_t u32Features;
116 } svm;
117 /** Saved error from detection */
118 int32_t lLastError;
119
120 struct
121 {
122 uint32_t u32AMDFeatureECX;
123 uint32_t u32AMDFeatureEDX;
124 } cpuid;
125
126 HWACCMSTATE enmHwAccmState;
127} HWACCMR0Globals;
128
129
130
131/**
132 * Does global Ring-0 HWACCM initialization.
133 *
134 * @returns VBox status code.
135 */
136HWACCMR0DECL(int) HWACCMR0Init()
137{
138 int rc;
139
140 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
141 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
142 for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
143 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
144
145 /* Fill in all callbacks with placeholders. */
146 HWACCMR0Globals.pfnEnterSession = HWACCMR0DummyEnter;
147 HWACCMR0Globals.pfnLeaveSession = HWACCMR0DummyLeave;
148 HWACCMR0Globals.pfnSaveHostState = HWACCMR0DummySaveHostState;
149 HWACCMR0Globals.pfnLoadGuestState = HWACCMR0DummyLoadGuestState;
150 HWACCMR0Globals.pfnRunGuestCode = HWACCMR0DummyRunGuestCode;
151 HWACCMR0Globals.pfnEnableCpu = HWACCMR0DummyEnableCpu;
152 HWACCMR0Globals.pfnDisableCpu = HWACCMR0DummyDisableCpu;
153 HWACCMR0Globals.pfnInitVM = HWACCMR0DummyInitVM;
154 HWACCMR0Globals.pfnTermVM = HWACCMR0DummyTermVM;
155 HWACCMR0Globals.pfnSetupVM = HWACCMR0DummySetupVM;
156
157#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
158
159 /*
160 * Check for VT-x and AMD-V capabilities
161 */
162 if (ASMHasCpuId())
163 {
164 uint32_t u32FeaturesECX;
165 uint32_t u32Dummy;
166 uint32_t u32FeaturesEDX;
167 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
168
169 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
170 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
171 /* Query AMD features. */
172 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
173
174 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
175 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
176 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
177 )
178 {
179 /*
180 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
181 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
182 */
183 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
184 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
185 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
186 )
187 {
188 int aRc[RTCPUSET_MAX_CPUS];
189 RTCPUID idCpu = 0;
190
191 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
192
193 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
194 memset(aRc, 0, sizeof(aRc));
195 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
196
197 /* Check the return code of all invocations. */
198 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
199 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
200
201 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
202 {
203 /* Reread in case we've changed it. */
204 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
205
206 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
207 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
208 {
209 RTR0MEMOBJ pScatchMemObj;
210 void *pvScatchPage;
211 RTHCPHYS pScatchPagePhys;
212
213 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
214 HWACCMR0Globals.vmx.msr.vmx_pin_ctls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
215 HWACCMR0Globals.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
216 HWACCMR0Globals.vmx.msr.vmx_exit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
217 HWACCMR0Globals.vmx.msr.vmx_entry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
218 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
219 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
220 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
221 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
222 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
223 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
224 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
225
226 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
227 if (RT_FAILURE(rc))
228 return rc;
229
230 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
231 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
232 memset(pvScatchPage, 0, PAGE_SIZE);
233
234 /* Set revision dword at the beginning of the structure. */
235 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
236
237 /* Make sure we don't get rescheduled to another cpu during this probe. */
238 RTCCUINTREG fFlags = ASMIntDisableFlags();
239
240 /*
241 * Check CR4.VMXE
242 */
243 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
244 {
245 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
246 * try to execute the VMX instructions...
247 */
248 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
249 }
250
251 /* Enter VMX Root Mode */
252 rc = VMXEnable(pScatchPagePhys);
253 if (VBOX_FAILURE(rc))
254 {
255 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
256 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
257 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)
258 *
259 * They should fix their code, but until they do we simply refuse to run.
260 */
261 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
262 }
263 else
264 {
265 HWACCMR0Globals.vmx.fSupported = true;
266 VMXDisable();
267 }
268
269 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
270 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
271 ASMSetFlags(fFlags);
272
273 RTR0MemObjFree(pScatchMemObj, false);
274 if (VBOX_FAILURE(HWACCMR0Globals.lLastError))
275 return HWACCMR0Globals.lLastError;
276 }
277 else
278 {
279 AssertFailed(); /* can't hit this case anymore */
280 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
281 }
282 }
283#ifdef LOG_ENABLED
284 else
285 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
286#endif
287 }
288 else
289 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
290 }
291 else
292 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
293 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
294 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
295 )
296 {
297 /*
298 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
299 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
300 */
301 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
302 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
303 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
304 )
305 {
306 int aRc[RTCPUSET_MAX_CPUS];
307 RTCPUID idCpu = 0;
308
309 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
310 memset(aRc, 0, sizeof(aRc));
311 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
312 AssertRC(rc);
313
314 /* Check the return code of all invocations. */
315 if (VBOX_SUCCESS(rc))
316 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
317
318 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
319
320 if (VBOX_SUCCESS(rc))
321 {
322 /* Query AMD features. */
323 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
324
325 HWACCMR0Globals.svm.fSupported = true;
326 }
327 else
328 HWACCMR0Globals.lLastError = rc;
329 }
330 else
331 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
332 }
333 else
334 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
335 }
336 else
337 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
338
339#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
340
341 if (HWACCMR0Globals.vmx.fSupported)
342 {
343 HWACCMR0Globals.pfnEnterSession = VMXR0Enter;
344 HWACCMR0Globals.pfnLeaveSession = VMXR0Leave;
345 HWACCMR0Globals.pfnSaveHostState = VMXR0SaveHostState;
346 HWACCMR0Globals.pfnLoadGuestState = VMXR0LoadGuestState;
347 HWACCMR0Globals.pfnRunGuestCode = VMXR0RunGuestCode;
348 HWACCMR0Globals.pfnEnableCpu = VMXR0EnableCpu;
349 HWACCMR0Globals.pfnDisableCpu = VMXR0DisableCpu;
350 HWACCMR0Globals.pfnInitVM = VMXR0InitVM;
351 HWACCMR0Globals.pfnTermVM = VMXR0TermVM;
352 HWACCMR0Globals.pfnSetupVM = VMXR0SetupVM;
353 }
354 else
355 if (HWACCMR0Globals.svm.fSupported)
356 {
357 HWACCMR0Globals.pfnEnterSession = SVMR0Enter;
358 HWACCMR0Globals.pfnLeaveSession = SVMR0Leave;
359 HWACCMR0Globals.pfnSaveHostState = SVMR0SaveHostState;
360 HWACCMR0Globals.pfnLoadGuestState = SVMR0LoadGuestState;
361 HWACCMR0Globals.pfnRunGuestCode = SVMR0RunGuestCode;
362 HWACCMR0Globals.pfnEnableCpu = SVMR0EnableCpu;
363 HWACCMR0Globals.pfnDisableCpu = SVMR0DisableCpu;
364 HWACCMR0Globals.pfnInitVM = SVMR0InitVM;
365 HWACCMR0Globals.pfnTermVM = SVMR0TermVM;
366 HWACCMR0Globals.pfnSetupVM = SVMR0SetupVM;
367 }
368
369 return VINF_SUCCESS;
370}
371
372
373/**
374 * Checks the error code array filled in for each cpu in the system.
375 *
376 * @returns VBox status code.
377 * @param paRc Error code array
378 * @param cErrorCodes Array size
379 * @param pidCpu Value of the first cpu that set an error (out)
380 */
381static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
382{
383 int rc = VINF_SUCCESS;
384
385 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
386
387 for (unsigned i=0;i<cErrorCodes;i++)
388 {
389 if (RTMpIsCpuOnline(i))
390 {
391 if (VBOX_FAILURE(paRc[i]))
392 {
393 rc = paRc[i];
394 *pidCpu = i;
395 break;
396 }
397 }
398 }
399 return rc;
400}
401
402/**
403 * Does global Ring-0 HWACCM termination.
404 *
405 * @returns VBox status code.
406 */
407HWACCMR0DECL(int) HWACCMR0Term()
408{
409 int aRc[RTCPUSET_MAX_CPUS];
410
411 memset(aRc, 0, sizeof(aRc));
412 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
413 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
414
415 /* Free the per-cpu pages used for VT-x and AMD-V */
416 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
417 {
418 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
419 if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ)
420 {
421 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
422 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
423 }
424 }
425 return rc;
426}
427
428
429/**
430 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
431 * is to be called on the target cpus.
432 *
433 * @param idCpu The identifier for the CPU the function is called on.
434 * @param pvUser1 The 1st user argument.
435 * @param pvUser2 The 2nd user argument.
436 */
437static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
438{
439 unsigned u32VendorEBX = (uintptr_t)pvUser1;
440 int *paRc = (int *)pvUser2;
441 uint64_t val;
442
443#ifdef LOG_ENABLED
444 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
445#endif
446 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
447
448 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
449 {
450 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
451
452 /*
453 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
454 * Once the lock bit is set, this MSR can no longer be modified.
455 */
456 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
457 {
458 /* MSR is not yet locked; we can change it ourselves here */
459 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
460 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
461 }
462 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
463 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
464 paRc[idCpu] = VINF_SUCCESS;
465 else
466 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
467 }
468 else
469 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
470 {
471 /* Check if SVM is disabled */
472 val = ASMRdMsr(MSR_K8_VM_CR);
473 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
474 {
475 /* Turn on SVM in the EFER MSR. */
476 val = ASMRdMsr(MSR_K6_EFER);
477 if (!(val & MSR_K6_EFER_SVME))
478 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
479
480 /* Paranoia. */
481 val = ASMRdMsr(MSR_K6_EFER);
482 if (val & MSR_K6_EFER_SVME)
483 paRc[idCpu] = VINF_SUCCESS;
484 else
485 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
486 }
487 else
488 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
489 }
490 else
491 AssertFailed(); /* can't happen */
492 return;
493}
494
495
496/**
497 * Sets up HWACCM on all cpus.
498 *
499 * @returns VBox status code.
500 * @param pVM The VM to operate on.
501 * @param enmNewHwAccmState New hwaccm state
502 *
503 */
504HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
505{
506 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
507 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
508 {
509 int aRc[RTCPUSET_MAX_CPUS];
510 RTCPUID idCpu = 0;
511
512 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
513 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
514 return VINF_SUCCESS;
515
516 memset(aRc, 0, sizeof(aRc));
517
518 /* Allocate one page per cpu for the global vt-x and amd-v pages */
519 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
520 {
521 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
522
523 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
524 if (RTMpIsCpuOnline(i))
525 {
526 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
527 AssertRC(rc);
528 if (RT_FAILURE(rc))
529 return rc;
530
531 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
532 Assert(pvR0);
533 ASMMemZeroPage(pvR0);
534
535#ifdef LOG_ENABLED
536 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
537#endif
538 }
539 }
540 /* First time, so initialize each cpu/core */
541 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
542
543 /* Check the return code of all invocations. */
544 if (VBOX_SUCCESS(rc))
545 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
546
547 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
548 return rc;
549 }
550
551 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
552 return VINF_SUCCESS;
553
554 /* Request to change the mode is not allowed */
555 return VERR_ACCESS_DENIED;
556}
557
558/**
559 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
560 * is to be called on the target cpus.
561 *
562 * @param idCpu The identifier for the CPU the function is called on.
563 * @param pvUser1 The 1st user argument.
564 * @param pvUser2 The 2nd user argument.
565 */
566static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
567{
568 PVM pVM = (PVM)pvUser1;
569 int *paRc = (int *)pvUser2;
570 void *pvPageCpu;
571 RTHCPHYS pPageCpuPhys;
572 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
573
574 Assert(pVM);
575 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
576 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
577
578 pCpu->idCpu = idCpu;
579
580 /* Make sure we start with a clean TLB. */
581 pCpu->fFlushTLB = true;
582
583 /* Should never happen */
584 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
585 {
586 AssertFailed();
587 paRc[idCpu] = VERR_INTERNAL_ERROR;
588 return;
589 }
590
591 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
592 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
593
594 paRc[idCpu] = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
595 AssertRC(paRc[idCpu]);
596 if (VBOX_SUCCESS(paRc[idCpu]))
597 HWACCMR0Globals.aCpuInfo[idCpu].fConfigured = true;
598
599 return;
600}
601
602/**
603 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
604 * is to be called on the target cpus.
605 *
606 * @param idCpu The identifier for the CPU the function is called on.
607 * @param pvUser1 The 1st user argument.
608 * @param pvUser2 The 2nd user argument.
609 */
610static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
611{
612 void *pvPageCpu;
613 RTHCPHYS pPageCpuPhys;
614 int *paRc = (int *)pvUser1;
615
616 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
617 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
618
619 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
620 return;
621
622 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
623 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
624
625 paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu(&HWACCMR0Globals.aCpuInfo[idCpu], pvPageCpu, pPageCpuPhys);
626 AssertRC(paRc[idCpu]);
627 HWACCMR0Globals.aCpuInfo[idCpu].fConfigured = false;
628 return;
629}
630
631
632/**
633 * Does Ring-0 per VM HWACCM initialization.
634 *
635 * This is mainly to check that the Host CPU mode is compatible
636 * with VMX.
637 *
638 * @returns VBox status code.
639 * @param pVM The VM to operate on.
640 */
641HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
642{
643 AssertReturn(pVM, VERR_INVALID_PARAMETER);
644
645#ifdef LOG_ENABLED
646 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
647#endif
648
649 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
650 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
651
652 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
653 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
654 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
655 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
656 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
657 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
658 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
659 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
660 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
661 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
662 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
663 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
664 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
665 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
666 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
667 pVM->hwaccm.s.svm.u32Features = HWACCMR0Globals.svm.u32Features;
668 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
669 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
670 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
671
672 /* Init a VT-x or AMD-V VM. */
673 return HWACCMR0Globals.pfnInitVM(pVM);
674}
675
676
677/**
678 * Does Ring-0 per VM HWACCM termination.
679 *
680 * @returns VBox status code.
681 * @param pVM The VM to operate on.
682 */
683HWACCMR0DECL(int) HWACCMR0TermVM(PVM pVM)
684{
685 AssertReturn(pVM, VERR_INVALID_PARAMETER);
686
687#ifdef LOG_ENABLED
688 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
689#endif
690
691 /* Terminate a VT-x or AMD-V VM. */
692 return HWACCMR0Globals.pfnTermVM(pVM);
693}
694
695
696/**
697 * Sets up a VT-x or AMD-V session
698 *
699 * @returns VBox status code.
700 * @param pVM The VM to operate on.
701 */
702HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
703{
704 AssertReturn(pVM, VERR_INVALID_PARAMETER);
705
706#ifdef LOG_ENABLED
707 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
708#endif
709
710 /* Setup VT-x or AMD-V. */
711 return HWACCMR0Globals.pfnSetupVM(pVM);
712}
713
714
715/**
716 * Enters the VT-x or AMD-V session
717 *
718 * @returns VBox status code.
719 * @param pVM The VM to operate on.
720 */
721HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
722{
723 CPUMCTX *pCtx;
724 int rc;
725 RTCPUID idCpu = RTMpCpuId();
726
727 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
728 if (VBOX_FAILURE(rc))
729 return rc;
730
731 /* Always load the guest's FPU/XMM state on-demand. */
732 CPUMDeactivateGuestFPUState(pVM);
733
734 /* Always reload the host context and the guest's CR0 register. (!!!!) */
735 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
736
737 /* Setup the register and mask according to the current execution mode. */
738 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
739 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
740 else
741 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
742
743 rc = HWACCMR0Globals.pfnEnterSession(pVM, &HWACCMR0Globals.aCpuInfo[idCpu]);
744 AssertRC(rc);
745 /* We must save the host context here (VT-x) as we might be rescheduled on a different cpu after a long jump back to ring 3. */
746 rc |= HWACCMR0Globals.pfnSaveHostState(pVM);
747 AssertRC(rc);
748 rc |= HWACCMR0Globals.pfnLoadGuestState(pVM, pCtx);
749 AssertRC(rc);
750 return rc;
751}
752
753
754/**
755 * Leaves the VT-x or AMD-V session
756 *
757 * @returns VBox status code.
758 * @param pVM The VM to operate on.
759 */
760HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
761{
762 CPUMCTX *pCtx;
763 int rc;
764
765 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
766 if (VBOX_FAILURE(rc))
767 return rc;
768
769 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
770 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
771 * or trash somebody else's FPU state.
772 */
773 /* Save the guest FPU and XMM state if necessary. */
774 if (CPUMIsGuestFPUStateActive(pVM))
775 {
776 Log2(("CPUMR0SaveGuestFPU\n"));
777 CPUMR0SaveGuestFPU(pVM, pCtx);
778
779 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
780 }
781
782 return HWACCMR0Globals.pfnLeaveSession(pVM);
783}
784
785/**
786 * Runs guest code in a hardware accelerated VM.
787 *
788 * @returns VBox status code.
789 * @param pVM The VM to operate on.
790 */
791HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
792{
793 CPUMCTX *pCtx;
794 int rc;
795 RTCPUID idCpu = RTMpCpuId();
796
797 Assert(!VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
798 Assert(HWACCMR0Globals.aCpuInfo[idCpu].fConfigured);
799
800 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
801 if (VBOX_FAILURE(rc))
802 return rc;
803
804 return HWACCMR0Globals.pfnRunGuestCode(pVM, pCtx);
805}
806
807/**
808 * Returns the cpu structure for the current cpu.
809 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
810 *
811 * @returns cpu structure pointer
812 * @param pVM The VM to operate on.
813 */
814HWACCMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu()
815{
816 RTCPUID idCpu = RTMpCpuId();
817
818 return &HWACCMR0Globals.aCpuInfo[idCpu];
819}
820
821#ifdef VBOX_STRICT
822#include <iprt/string.h>
823/**
824 * Dumps a descriptor.
825 *
826 * @param pDesc Descriptor to dump.
827 * @param Sel Selector number.
828 * @param pszMsg Message to prepend the log entry with.
829 */
830HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
831{
832 /*
833 * Make variable description string.
834 */
835 static struct
836 {
837 unsigned cch;
838 const char *psz;
839 } const aTypes[32] =
840 {
841 #define STRENTRY(str) { sizeof(str) - 1, str }
842
843 /* system */
844#if HC_ARCH_BITS == 64
845 STRENTRY("Reserved0 "), /* 0x00 */
846 STRENTRY("Reserved1 "), /* 0x01 */
847 STRENTRY("LDT "), /* 0x02 */
848 STRENTRY("Reserved3 "), /* 0x03 */
849 STRENTRY("Reserved4 "), /* 0x04 */
850 STRENTRY("Reserved5 "), /* 0x05 */
851 STRENTRY("Reserved6 "), /* 0x06 */
852 STRENTRY("Reserved7 "), /* 0x07 */
853 STRENTRY("Reserved8 "), /* 0x08 */
854 STRENTRY("TSS64Avail "), /* 0x09 */
855 STRENTRY("ReservedA "), /* 0x0a */
856 STRENTRY("TSS64Busy "), /* 0x0b */
857 STRENTRY("Call64 "), /* 0x0c */
858 STRENTRY("ReservedD "), /* 0x0d */
859 STRENTRY("Int64 "), /* 0x0e */
860 STRENTRY("Trap64 "), /* 0x0f */
861#else
862 STRENTRY("Reserved0 "), /* 0x00 */
863 STRENTRY("TSS16Avail "), /* 0x01 */
864 STRENTRY("LDT "), /* 0x02 */
865 STRENTRY("TSS16Busy "), /* 0x03 */
866 STRENTRY("Call16 "), /* 0x04 */
867 STRENTRY("Task "), /* 0x05 */
868 STRENTRY("Int16 "), /* 0x06 */
869 STRENTRY("Trap16 "), /* 0x07 */
870 STRENTRY("Reserved8 "), /* 0x08 */
871 STRENTRY("TSS32Avail "), /* 0x09 */
872 STRENTRY("ReservedA "), /* 0x0a */
873 STRENTRY("TSS32Busy "), /* 0x0b */
874 STRENTRY("Call32 "), /* 0x0c */
875 STRENTRY("ReservedD "), /* 0x0d */
876 STRENTRY("Int32 "), /* 0x0e */
877 STRENTRY("Trap32 "), /* 0x0f */
878#endif
879 /* non system */
880 STRENTRY("DataRO "), /* 0x10 */
881 STRENTRY("DataRO Accessed "), /* 0x11 */
882 STRENTRY("DataRW "), /* 0x12 */
883 STRENTRY("DataRW Accessed "), /* 0x13 */
884 STRENTRY("DataDownRO "), /* 0x14 */
885 STRENTRY("DataDownRO Accessed "), /* 0x15 */
886 STRENTRY("DataDownRW "), /* 0x16 */
887 STRENTRY("DataDownRW Accessed "), /* 0x17 */
888 STRENTRY("CodeEO "), /* 0x18 */
889 STRENTRY("CodeEO Accessed "), /* 0x19 */
890 STRENTRY("CodeER "), /* 0x1a */
891 STRENTRY("CodeER Accessed "), /* 0x1b */
892 STRENTRY("CodeConfEO "), /* 0x1c */
893 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
894 STRENTRY("CodeConfER "), /* 0x1e */
895 STRENTRY("CodeConfER Accessed ") /* 0x1f */
896 #undef SYSENTRY
897 };
898 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
899 char szMsg[128];
900 char *psz = &szMsg[0];
901 unsigned i = pDesc->Gen.u1DescType << 4 | pDesc->Gen.u4Type;
902 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
903 psz += aTypes[i].cch;
904
905 if (pDesc->Gen.u1Present)
906 ADD_STR(psz, "Present ");
907 else
908 ADD_STR(psz, "Not-Present ");
909#if HC_ARCH_BITS == 64
910 if (pDesc->Gen.u1Long)
911 ADD_STR(psz, "64-bit ");
912 else
913 ADD_STR(psz, "Comp ");
914#else
915 if (pDesc->Gen.u1Granularity)
916 ADD_STR(psz, "Page ");
917 if (pDesc->Gen.u1DefBig)
918 ADD_STR(psz, "32-bit ");
919 else
920 ADD_STR(psz, "16-bit ");
921#endif
922 #undef ADD_STR
923 *psz = '\0';
924
925 /*
926 * Limit and Base and format the output.
927 */
928 uint32_t u32Limit = X86DESC_LIMIT(*pDesc);
929 if (pDesc->Gen.u1Granularity)
930 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
931
932#if HC_ARCH_BITS == 64
933 uint64_t u32Base = X86DESC64_BASE(*pDesc);
934
935 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
936 Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
937#else
938 uint32_t u32Base = X86DESC_BASE(*pDesc);
939
940 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
941 Sel, pDesc->au32[0], pDesc->au32[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
942#endif
943}
944
945/**
946 * Formats a full register dump.
947 *
948 * @param pVM The VM to operate on.
949 * @param pCtx The context to format.
950 */
951HWACCMR0DECL(void) HWACCMDumpRegs(PVM pVM, PCPUMCTX pCtx)
952{
953 /*
954 * Format the flags.
955 */
956 static struct
957 {
958 const char *pszSet; const char *pszClear; uint32_t fFlag;
959 } aFlags[] =
960 {
961 { "vip",NULL, X86_EFL_VIP },
962 { "vif",NULL, X86_EFL_VIF },
963 { "ac", NULL, X86_EFL_AC },
964 { "vm", NULL, X86_EFL_VM },
965 { "rf", NULL, X86_EFL_RF },
966 { "nt", NULL, X86_EFL_NT },
967 { "ov", "nv", X86_EFL_OF },
968 { "dn", "up", X86_EFL_DF },
969 { "ei", "di", X86_EFL_IF },
970 { "tf", NULL, X86_EFL_TF },
971 { "nt", "pl", X86_EFL_SF },
972 { "nz", "zr", X86_EFL_ZF },
973 { "ac", "na", X86_EFL_AF },
974 { "po", "pe", X86_EFL_PF },
975 { "cy", "nc", X86_EFL_CF },
976 };
977 char szEFlags[80];
978 char *psz = szEFlags;
979 uint32_t efl = pCtx->eflags.u32;
980 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
981 {
982 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
983 if (pszAdd)
984 {
985 strcpy(psz, pszAdd);
986 psz += strlen(pszAdd);
987 *psz++ = ' ';
988 }
989 }
990 psz[-1] = '\0';
991
992
993 /*
994 * Format the registers.
995 */
996 if (CPUMIsGuestIn64BitCode(pVM, CPUMCTX2CORE(pCtx)))
997 {
998 Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n"
999 "rsi=%016RX64 rdi=%016RX64 r8 =%016RX64 r9 =%016RX64\n"
1000 "r10=%016RX64 r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1001 "r14=%016RX64 r15=%016RX64\n"
1002 "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 iopl=%d %*s\n"
1003 "cs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1004 "ds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1005 "es={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1006 "fs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1007 "gs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1008 "ss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1009 "cr0=%016RX64 cr2=%016RX64 cr3=%016RX64 cr4=%016RX64\n"
1010 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64 dr3=%016RX64\n"
1011 "dr4=%016RX64 dr5=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1012 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1013 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1014 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1015 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1016 ,
1017 pCtx->rax, pCtx->rbx, pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi,
1018 pCtx->r8, pCtx->r9, pCtx->r10, pCtx->r11, pCtx->r12, pCtx->r13,
1019 pCtx->r14, pCtx->r15,
1020 pCtx->rip, pCtx->rsp, pCtx->rbp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1021 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1022 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1023 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1024 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1025 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1026 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1027 pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4,
1028 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3,
1029 pCtx->dr4, pCtx->dr5, pCtx->dr6, pCtx->dr7,
1030 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1031 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1032 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1033 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1034 }
1035 else
1036 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1037 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1038 "cs={%04x base=%016RX64 limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1039 "ds={%04x base=%016RX64 limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1040 "es={%04x base=%016RX64 limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1041 "fs={%04x base=%016RX64 limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1042 "gs={%04x base=%016RX64 limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1043 "ss={%04x base=%016RX64 limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1044 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1045 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1046 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1047 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1048 ,
1049 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1050 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1051 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
1052 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
1053 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
1054 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7,
1055 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1056 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1057 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1058 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1059 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1060 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1061
1062 Log(("FPU:\n"
1063 "FCW=%04x FSW=%04x FTW=%02x\n"
1064 "res1=%02x FOP=%04x FPUIP=%08x CS=%04x Rsvrd1=%04x\n"
1065 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
1066 ,
1067 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW,
1068 pCtx->fpu.huh1, pCtx->fpu.FOP, pCtx->fpu.FPUIP, pCtx->fpu.CS, pCtx->fpu.Rsvrd1,
1069 pCtx->fpu.FPUDP, pCtx->fpu.DS, pCtx->fpu.Rsrvd2,
1070 pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK));
1071
1072
1073 Log(("MSR:\n"
1074 "EFER =%016RX64\n"
1075 "PAT =%016RX64\n"
1076 "STAR =%016RX64\n"
1077 "CSTAR =%016RX64\n"
1078 "LSTAR =%016RX64\n"
1079 "SFMASK =%016RX64\n"
1080 "KERNELGSBASE =%016RX64\n",
1081 pCtx->msrEFER,
1082 pCtx->msrPAT,
1083 pCtx->msrSTAR,
1084 pCtx->msrCSTAR,
1085 pCtx->msrLSTAR,
1086 pCtx->msrSFMASK,
1087 pCtx->msrKERNELGSBASE));
1088
1089}
1090#endif
1091
1092/* Dummy callback handlers. */
1093HWACCMR0DECL(int) HWACCMR0DummyEnter(PVM pVM, PHWACCM_CPUINFO pCpu)
1094{
1095 return VINF_SUCCESS;
1096}
1097
1098HWACCMR0DECL(int) HWACCMR0DummyLeave(PVM pVM)
1099{
1100 return VINF_SUCCESS;
1101}
1102
1103HWACCMR0DECL(int) HWACCMR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
1104{
1105 return VINF_SUCCESS;
1106}
1107
1108HWACCMR0DECL(int) HWACCMR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
1109{
1110 return VINF_SUCCESS;
1111}
1112
1113HWACCMR0DECL(int) HWACCMR0DummyInitVM(PVM pVM)
1114{
1115 return VINF_SUCCESS;
1116}
1117
1118HWACCMR0DECL(int) HWACCMR0DummyTermVM(PVM pVM)
1119{
1120 return VINF_SUCCESS;
1121}
1122
1123HWACCMR0DECL(int) HWACCMR0DummySetupVM(PVM pVM)
1124{
1125 return VINF_SUCCESS;
1126}
1127
1128HWACCMR0DECL(int) HWACCMR0DummyRunGuestCode(PVM pVM, CPUMCTX *pCtx)
1129{
1130 return VINF_SUCCESS;
1131}
1132
1133HWACCMR0DECL(int) HWACCMR0DummySaveHostState(PVM pVM)
1134{
1135 return VINF_SUCCESS;
1136}
1137
1138HWACCMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, CPUMCTX *pCtx)
1139{
1140 return VINF_SUCCESS;
1141}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette