VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 7478

Last change on this file since 7478 was 7478, checked in by vboxsync, 17 years ago

Disable VT-x and AMD-V on all hosts except Windows for now.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.2 KB
Line 
1/* $Id: HWACCMR0.cpp 7478 2008-03-17 17:57:37Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/hwaccm.h>
24#include "HWACCMInternal.h"
25#include <VBox/vm.h>
26#include <VBox/x86.h>
27#include <VBox/hwacc_vmx.h>
28#include <VBox/hwacc_svm.h>
29#include <VBox/pgm.h>
30#include <VBox/pdm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/selm.h>
34#include <VBox/iom.h>
35#include <iprt/param.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <iprt/memobj.h>
40#include <iprt/cpuset.h>
41#include "HWVMXR0.h"
42#include "HWSVMR0.h"
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
48static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
49static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
50static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
51
52/*******************************************************************************
53* Local Variables *
54*******************************************************************************/
55static struct
56{
57 struct
58 {
59 RTR0MEMOBJ pMemObj;
60 bool fVMXConfigured;
61 bool fSVMConfigured;
62 } aCpuInfo[RTCPUSET_MAX_CPUS];
63
64 struct
65 {
66 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
67 bool fSupported;
68
69 /** Host CR4 value (set by ring-0 VMX init) */
70 uint64_t hostCR4;
71
72 /** VMX MSR values */
73 struct
74 {
75 uint64_t feature_ctrl;
76 uint64_t vmx_basic_info;
77 uint64_t vmx_pin_ctls;
78 uint64_t vmx_proc_ctls;
79 uint64_t vmx_exit;
80 uint64_t vmx_entry;
81 uint64_t vmx_misc;
82 uint64_t vmx_cr0_fixed0;
83 uint64_t vmx_cr0_fixed1;
84 uint64_t vmx_cr4_fixed0;
85 uint64_t vmx_cr4_fixed1;
86 uint64_t vmx_vmcs_enum;
87 } msr;
88 /* Last instruction error */
89 uint32_t ulLastInstrError;
90 } vmx;
91 struct
92 {
93 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
94 bool fSupported;
95
96 /** SVM revision. */
97 uint32_t u32Rev;
98
99 /** Maximum ASID allowed. */
100 uint32_t u32MaxASID;
101 } svm;
102 /** Saved error from detection */
103 int32_t lLastError;
104
105 struct
106 {
107 uint32_t u32AMDFeatureECX;
108 uint32_t u32AMDFeatureEDX;
109 } cpuid;
110
111 HWACCMSTATE enmHwAccmState;
112} HWACCMR0Globals;
113
114
115
116/**
117 * Does global Ring-0 HWACCM initialization.
118 *
119 * @returns VBox status code.
120 */
121HWACCMR0DECL(int) HWACCMR0Init()
122{
123 int rc;
124 RTR0MEMOBJ pScatchMemObj;
125 void *pvScatchPage;
126 RTHCPHYS pScatchPagePhys;
127
128 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
129 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
130
131 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
132 if (RT_FAILURE(rc))
133 return rc;
134
135 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
136 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
137 memset(pvScatchPage, 0, PAGE_SIZE);
138
139#ifdef RT_OS_WINDOWS /* kernel panics on Linux; disabled for now */
140 #ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
141
142 /*
143 * Check for VT-x and AMD-V capabilities
144 */
145 if (ASMHasCpuId())
146 {
147 uint32_t u32FeaturesECX;
148 uint32_t u32Dummy;
149 uint32_t u32FeaturesEDX;
150 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
151
152 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
153 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
154 /* Query AMD features. */
155 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
156
157 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
158 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
159 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
160 )
161 {
162 /*
163 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
164 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
165 */
166 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
167 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
168 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
169 )
170 {
171 int aRc[RTCPUSET_MAX_CPUS];
172 RTCPUID idCpu = 0;
173
174 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
175
176 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
177 memset(aRc, 0, sizeof(aRc));
178 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
179
180 /* Check the return code of all invocations. */
181 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
182 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
183
184 AssertMsg(VBOX_SUCCESS(HWACCMR0Globals.lLastError), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, HWACCMR0Globals.lLastError));
185
186 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
187 {
188 /* Reread in case we've changed it. */
189 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
190
191 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
192 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
193 {
194 HWACCMR0Globals.vmx.fSupported = true;
195 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
196 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
197 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
198 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
199 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
200 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
201 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
202 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
203 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
204 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
205 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
206
207 /* Make sure we don't get rescheduled to another cpu during this probe. */
208 RTCCUINTREG fFlags = ASMIntDisableFlags();
209
210 /*
211 * Check CR4.VMXE
212 */
213 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
214 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
215 {
216 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
217 * try to execute the VMX instructions...
218 */
219 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
220 }
221
222 /* Set revision dword at the beginning of the structure. */
223 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
224
225#if HC_ARCH_BITS == 64
226 /* Enter VMX Root Mode */
227 rc = VMXEnable(pScatchPagePhys);
228 if (VBOX_FAILURE(rc))
229 {
230 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
231 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
232 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode)
233 *
234 * They should fix their code, but until they do we simply refuse to run.
235 */
236 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
237 HWACCMR0Globals.vmx.fSupported = false;
238 }
239 else
240 VMXDisable();
241#endif
242 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
243 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
244
245 ASMSetFlags(fFlags);
246 }
247 else
248 {
249 AssertFailed(); /* can't hit this case anymore */
250 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
251 }
252 }
253 }
254 else
255 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
256 }
257 else
258 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
259 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
260 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
261 )
262 {
263 /*
264 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
265 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
266 */
267 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
268 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
269 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
270 )
271 {
272 int aRc[RTCPUSET_MAX_CPUS];
273 RTCPUID idCpu = 0;
274
275 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
276 memset(aRc, 0, sizeof(aRc));
277 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
278 AssertRC(rc);
279
280 /* Check the return code of all invocations. */
281 if (VBOX_SUCCESS(rc))
282 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
283
284 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
285
286 if (VBOX_SUCCESS(rc))
287 {
288 /* Query AMD features. */
289 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy);
290
291 HWACCMR0Globals.svm.fSupported = true;
292 }
293 else
294 HWACCMR0Globals.lLastError = rc;
295 }
296 else
297 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
298 }
299 else
300 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
301 }
302 else
303 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
304
305 #endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
306#endif /* RT_OS_WINDOWS */
307
308 RTR0MemObjFree(pScatchMemObj, false);
309 return VINF_SUCCESS;
310}
311
312
313/**
314 * Checks the error code array filled in for each cpu in the system.
315 *
316 * @returns VBox status code.
317 * @param paRc Error code array
318 * @param cErrorCodes Array size
319 * @param pidCpu Value of the first cpu that set an error (out)
320 */
321static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
322{
323 int rc = VINF_SUCCESS;
324
325 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
326
327 for (unsigned i=0;i<cErrorCodes;i++)
328 {
329 if (RTMpIsCpuOnline(i))
330 {
331 if (VBOX_FAILURE(paRc[i]))
332 {
333 rc = paRc[i];
334 *pidCpu = i;
335 break;
336 }
337 }
338 }
339 return rc;
340}
341
342/**
343 * Does global Ring-0 HWACCM termination.
344 *
345 * @returns VBox status code.
346 */
347HWACCMR0DECL(int) HWACCMR0Term()
348{
349 int aRc[RTCPUSET_MAX_CPUS];
350
351 memset(aRc, 0, sizeof(aRc));
352 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
353 AssertRC(rc);
354
355 /* Free the per-cpu pages used for VT-x and AMD-V */
356 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
357 {
358 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
359 if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
360 {
361 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
362 HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
363 }
364 }
365 return rc;
366}
367
368
369/**
370 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
371 * is to be called on the target cpus.
372 *
373 * @param idCpu The identifier for the CPU the function is called on.
374 * @param pvUser1 The 1st user argument.
375 * @param pvUser2 The 2nd user argument.
376 */
377static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
378{
379 unsigned u32VendorEBX = (uintptr_t)pvUser1;
380 int *paRc = (int *)pvUser2;
381 uint64_t val;
382
383 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
384 {
385 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
386
387 /*
388 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
389 * Once the lock bit is set, this MSR can no longer be modified.
390 */
391 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
392 {
393 /* MSR is not yet locked; we can change it ourselves here */
394 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
395 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
396 }
397 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
398 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
399 paRc[idCpu] = VINF_SUCCESS;
400 else
401 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
402 }
403 else
404 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
405 {
406 /* Check if SVM is disabled */
407 val = ASMRdMsr(MSR_K8_VM_CR);
408 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
409 {
410 /* Turn on SVM in the EFER MSR. */
411 val = ASMRdMsr(MSR_K6_EFER);
412 if (!(val & MSR_K6_EFER_SVME))
413 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
414
415 /* Paranoia. */
416 val = ASMRdMsr(MSR_K6_EFER);
417 if (val & MSR_K6_EFER_SVME)
418 paRc[idCpu] = VINF_SUCCESS;
419 else
420 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
421 }
422 else
423 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
424 }
425 else
426 AssertFailed(); /* can't happen */
427 return;
428}
429
430
431/**
432 * Sets up HWACCM on all cpus.
433 *
434 * @returns VBox status code.
435 * @param pVM The VM to operate on.
436 * @param enmNewHwAccmState New hwaccm state
437 *
438 */
439HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
440{
441 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
442 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
443 {
444 int aRc[RTCPUSET_MAX_CPUS];
445 RTCPUID idCpu = 0;
446
447 memset(aRc, 0, sizeof(aRc));
448
449 /* Allocate one page per cpu for the global vt-x and amd-v pages */
450 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
451 {
452 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
453
454 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
455 if (RTMpIsCpuOnline(i))
456 {
457 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
458 if (RT_FAILURE(rc))
459 return rc;
460
461 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
462 memset(pvR0, 0, PAGE_SIZE);
463 }
464 }
465
466 /* First time, so initialize each cpu/core */
467 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
468
469 /* Check the return code of all invocations. */
470 if (VBOX_SUCCESS(rc))
471 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
472
473 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
474 return rc;
475 }
476
477 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
478 return VINF_SUCCESS;
479
480 /* Request to change the mode is not allowed */
481 return VERR_ACCESS_DENIED;
482}
483
484/**
485 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
486 * is to be called on the target cpus.
487 *
488 * @param idCpu The identifier for the CPU the function is called on.
489 * @param pvUser1 The 1st user argument.
490 * @param pvUser2 The 2nd user argument.
491 */
492static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
493{
494 PVM pVM = (PVM)pvUser1;
495 int *paRc = (int *)pvUser2;
496 void *pvPageCpu;
497 RTHCPHYS pPageCpuPhys;
498
499 Assert(pVM);
500 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
501
502 /* Should never happen */
503 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
504 {
505 AssertFailed();
506 return;
507 }
508
509 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
510 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
511
512 if (pVM->hwaccm.s.vmx.fSupported)
513 {
514 paRc[idCpu] = VMXR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
515 if (VBOX_SUCCESS(paRc[idCpu]))
516 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
517 }
518 else
519 if (pVM->hwaccm.s.svm.fSupported)
520 {
521 paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
522 if (VBOX_SUCCESS(paRc[idCpu]))
523 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
524 }
525 return;
526}
527
528/**
529 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
530 * is to be called on the target cpus.
531 *
532 * @param idCpu The identifier for the CPU the function is called on.
533 * @param pvUser1 The 1st user argument.
534 * @param pvUser2 The 2nd user argument.
535 */
536static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
537{
538 void *pvPageCpu;
539 RTHCPHYS pPageCpuPhys;
540 int *paRc = (int *)pvUser1;
541
542 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
543
544 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
545 return;
546
547 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
548 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
549
550 if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
551 {
552 paRc[idCpu] = VMXR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
553 AssertRC(paRc[idCpu]);
554 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
555 }
556 else
557 if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
558 {
559 paRc[idCpu] = SVMR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
560 AssertRC(paRc[idCpu]);
561 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
562 }
563 return;
564}
565
566
567/**
568 * Does Ring-0 per VM HWACCM initialization.
569 *
570 * This is mainly to check that the Host CPU mode is compatible
571 * with VMX.
572 *
573 * @returns VBox status code.
574 * @param pVM The VM to operate on.
575 */
576HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
577{
578 LogComFlow(("HWACCMR0Init: %p\n", pVM));
579
580 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
581 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
582
583 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
584 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
585 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
586 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
587 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
588 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
589 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
590 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
591 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
592 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
593 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
594 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
595 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
596 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
597 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
598 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
599 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
600 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
601 return VINF_SUCCESS;
602}
603
604
605
606/**
607 * Sets up a VT-x or AMD-V session
608 *
609 * @returns VBox status code.
610 * @param pVM The VM to operate on.
611 */
612HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
613{
614 int rc = VINF_SUCCESS;
615
616 if (pVM == NULL)
617 return VERR_INVALID_PARAMETER;
618
619 /* Setup Intel VMX. */
620 if (pVM->hwaccm.s.vmx.fSupported)
621 rc = VMXR0SetupVM(pVM);
622 else
623 rc = SVMR0SetupVM(pVM);
624
625 return rc;
626}
627
628
629/**
630 * Enters the VT-x or AMD-V session
631 *
632 * @returns VBox status code.
633 * @param pVM The VM to operate on.
634 */
635HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
636{
637 CPUMCTX *pCtx;
638 int rc;
639
640 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
641 if (VBOX_FAILURE(rc))
642 return rc;
643
644 /* Always load the guest's FPU/XMM state on-demand. */
645 CPUMDeactivateGuestFPUState(pVM);
646
647 /* Always reload the host context and the guest's CR0 register. (!!!!) */
648 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
649
650 if (pVM->hwaccm.s.vmx.fSupported)
651 {
652 rc = VMXR0Enter(pVM);
653 AssertRC(rc);
654 rc |= VMXR0SaveHostState(pVM);
655 AssertRC(rc);
656 rc |= VMXR0LoadGuestState(pVM, pCtx);
657 AssertRC(rc);
658 if (rc != VINF_SUCCESS)
659 return rc;
660 }
661 else
662 {
663 Assert(pVM->hwaccm.s.svm.fSupported);
664 rc = SVMR0Enter(pVM);
665 AssertRC(rc);
666 rc |= SVMR0LoadGuestState(pVM, pCtx);
667 AssertRC(rc);
668 if (rc != VINF_SUCCESS)
669 return rc;
670
671 }
672 return VINF_SUCCESS;
673}
674
675
676/**
677 * Leaves the VT-x or AMD-V session
678 *
679 * @returns VBox status code.
680 * @param pVM The VM to operate on.
681 */
682HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
683{
684 CPUMCTX *pCtx;
685 int rc;
686
687 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
688 if (VBOX_FAILURE(rc))
689 return rc;
690
691 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
692 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
693 * or trash somebody else's FPU state.
694 */
695
696 /* Restore host FPU and XMM state if necessary. */
697 if (CPUMIsGuestFPUStateActive(pVM))
698 {
699 Log2(("CPUMRestoreHostFPUState\n"));
700 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
701 CPUMRestoreHostFPUState(pVM);
702
703 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
704 }
705
706 if (pVM->hwaccm.s.vmx.fSupported)
707 {
708 return VMXR0Leave(pVM);
709 }
710 else
711 {
712 Assert(pVM->hwaccm.s.svm.fSupported);
713 return SVMR0Leave(pVM);
714 }
715}
716
717/**
718 * Runs guest code in a hardware accelerated VM.
719 *
720 * @returns VBox status code.
721 * @param pVM The VM to operate on.
722 */
723HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
724{
725 CPUMCTX *pCtx;
726 int rc;
727
728 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
729 if (VBOX_FAILURE(rc))
730 return rc;
731
732 if (pVM->hwaccm.s.vmx.fSupported)
733 {
734 return VMXR0RunGuestCode(pVM, pCtx);
735 }
736 else
737 {
738 Assert(pVM->hwaccm.s.svm.fSupported);
739 return SVMR0RunGuestCode(pVM, pCtx);
740 }
741}
742
743
744#ifdef VBOX_STRICT
745#include <iprt/string.h>
746/**
747 * Dumps a descriptor.
748 *
749 * @param Desc Descriptor to dump.
750 * @param Sel Selector number.
751 * @param pszMsg Message to prepend the log entry with.
752 */
753HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
754{
755 /*
756 * Make variable description string.
757 */
758 static struct
759 {
760 unsigned cch;
761 const char *psz;
762 } const aTypes[32] =
763 {
764 #define STRENTRY(str) { sizeof(str) - 1, str }
765
766 /* system */
767#if HC_ARCH_BITS == 64
768 STRENTRY("Reserved0 "), /* 0x00 */
769 STRENTRY("Reserved1 "), /* 0x01 */
770 STRENTRY("LDT "), /* 0x02 */
771 STRENTRY("Reserved3 "), /* 0x03 */
772 STRENTRY("Reserved4 "), /* 0x04 */
773 STRENTRY("Reserved5 "), /* 0x05 */
774 STRENTRY("Reserved6 "), /* 0x06 */
775 STRENTRY("Reserved7 "), /* 0x07 */
776 STRENTRY("Reserved8 "), /* 0x08 */
777 STRENTRY("TSS64Avail "), /* 0x09 */
778 STRENTRY("ReservedA "), /* 0x0a */
779 STRENTRY("TSS64Busy "), /* 0x0b */
780 STRENTRY("Call64 "), /* 0x0c */
781 STRENTRY("ReservedD "), /* 0x0d */
782 STRENTRY("Int64 "), /* 0x0e */
783 STRENTRY("Trap64 "), /* 0x0f */
784#else
785 STRENTRY("Reserved0 "), /* 0x00 */
786 STRENTRY("TSS16Avail "), /* 0x01 */
787 STRENTRY("LDT "), /* 0x02 */
788 STRENTRY("TSS16Busy "), /* 0x03 */
789 STRENTRY("Call16 "), /* 0x04 */
790 STRENTRY("Task "), /* 0x05 */
791 STRENTRY("Int16 "), /* 0x06 */
792 STRENTRY("Trap16 "), /* 0x07 */
793 STRENTRY("Reserved8 "), /* 0x08 */
794 STRENTRY("TSS32Avail "), /* 0x09 */
795 STRENTRY("ReservedA "), /* 0x0a */
796 STRENTRY("TSS32Busy "), /* 0x0b */
797 STRENTRY("Call32 "), /* 0x0c */
798 STRENTRY("ReservedD "), /* 0x0d */
799 STRENTRY("Int32 "), /* 0x0e */
800 STRENTRY("Trap32 "), /* 0x0f */
801#endif
802 /* non system */
803 STRENTRY("DataRO "), /* 0x10 */
804 STRENTRY("DataRO Accessed "), /* 0x11 */
805 STRENTRY("DataRW "), /* 0x12 */
806 STRENTRY("DataRW Accessed "), /* 0x13 */
807 STRENTRY("DataDownRO "), /* 0x14 */
808 STRENTRY("DataDownRO Accessed "), /* 0x15 */
809 STRENTRY("DataDownRW "), /* 0x16 */
810 STRENTRY("DataDownRW Accessed "), /* 0x17 */
811 STRENTRY("CodeEO "), /* 0x18 */
812 STRENTRY("CodeEO Accessed "), /* 0x19 */
813 STRENTRY("CodeER "), /* 0x1a */
814 STRENTRY("CodeER Accessed "), /* 0x1b */
815 STRENTRY("CodeConfEO "), /* 0x1c */
816 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
817 STRENTRY("CodeConfER "), /* 0x1e */
818 STRENTRY("CodeConfER Accessed ") /* 0x1f */
819 #undef SYSENTRY
820 };
821 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
822 char szMsg[128];
823 char *psz = &szMsg[0];
824 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
825 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
826 psz += aTypes[i].cch;
827
828 if (Desc->Gen.u1Present)
829 ADD_STR(psz, "Present ");
830 else
831 ADD_STR(psz, "Not-Present ");
832#if HC_ARCH_BITS == 64
833 if (Desc->Gen.u1Long)
834 ADD_STR(psz, "64-bit ");
835 else
836 ADD_STR(psz, "Comp ");
837#else
838 if (Desc->Gen.u1Granularity)
839 ADD_STR(psz, "Page ");
840 if (Desc->Gen.u1DefBig)
841 ADD_STR(psz, "32-bit ");
842 else
843 ADD_STR(psz, "16-bit ");
844#endif
845 #undef ADD_STR
846 *psz = '\0';
847
848 /*
849 * Limit and Base and format the output.
850 */
851 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
852 if (Desc->Gen.u1Granularity)
853 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
854
855#if HC_ARCH_BITS == 64
856 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
857
858 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
859 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
860#else
861 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
862
863 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
864 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
865#endif
866}
867
868/**
869 * Formats a full register dump.
870 *
871 * @param pCtx The context to format.
872 */
873HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
874{
875 /*
876 * Format the flags.
877 */
878 static struct
879 {
880 const char *pszSet; const char *pszClear; uint32_t fFlag;
881 } aFlags[] =
882 {
883 { "vip",NULL, X86_EFL_VIP },
884 { "vif",NULL, X86_EFL_VIF },
885 { "ac", NULL, X86_EFL_AC },
886 { "vm", NULL, X86_EFL_VM },
887 { "rf", NULL, X86_EFL_RF },
888 { "nt", NULL, X86_EFL_NT },
889 { "ov", "nv", X86_EFL_OF },
890 { "dn", "up", X86_EFL_DF },
891 { "ei", "di", X86_EFL_IF },
892 { "tf", NULL, X86_EFL_TF },
893 { "nt", "pl", X86_EFL_SF },
894 { "nz", "zr", X86_EFL_ZF },
895 { "ac", "na", X86_EFL_AF },
896 { "po", "pe", X86_EFL_PF },
897 { "cy", "nc", X86_EFL_CF },
898 };
899 char szEFlags[80];
900 char *psz = szEFlags;
901 uint32_t efl = pCtx->eflags.u32;
902 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
903 {
904 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
905 if (pszAdd)
906 {
907 strcpy(psz, pszAdd);
908 psz += strlen(pszAdd);
909 *psz++ = ' ';
910 }
911 }
912 psz[-1] = '\0';
913
914
915 /*
916 * Format the registers.
917 */
918 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
919 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
920 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
921 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
922 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
923 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
924 ,
925 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
926 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
927 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
928 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
929 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
930 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
931
932 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
933 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
934 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
935 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
936 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
937 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
938 "FCW=%04x FSW=%04x FTW=%04x\n",
939 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
940 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
941 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
942 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
943 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
944 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
945 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
946
947
948}
949#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette