VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 7533

Last change on this file since 7533 was 7524, checked in by vboxsync, 17 years ago

Linux VT-x/AMD-V enabled again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 35.8 KB
Line 
1/* $Id: HWACCMR0.cpp 7524 2008-03-25 09:54:26Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/hwaccm.h>
24#include "HWACCMInternal.h"
25#include <VBox/vm.h>
26#include <VBox/x86.h>
27#include <VBox/hwacc_vmx.h>
28#include <VBox/hwacc_svm.h>
29#include <VBox/pgm.h>
30#include <VBox/pdm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/selm.h>
34#include <VBox/iom.h>
35#include <iprt/param.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <iprt/memobj.h>
40#include <iprt/cpuset.h>
41#include "HWVMXR0.h"
42#include "HWSVMR0.h"
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
48static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
49static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
50static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
51
52/*******************************************************************************
53* Local Variables *
54*******************************************************************************/
55static struct
56{
57 struct
58 {
59 RTR0MEMOBJ pMemObj;
60 bool fVMXConfigured;
61 bool fSVMConfigured;
62 } aCpuInfo[RTCPUSET_MAX_CPUS];
63
64 struct
65 {
66 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
67 bool fSupported;
68
69 /** Host CR4 value (set by ring-0 VMX init) */
70 uint64_t hostCR4;
71
72 /** VMX MSR values */
73 struct
74 {
75 uint64_t feature_ctrl;
76 uint64_t vmx_basic_info;
77 uint64_t vmx_pin_ctls;
78 uint64_t vmx_proc_ctls;
79 uint64_t vmx_exit;
80 uint64_t vmx_entry;
81 uint64_t vmx_misc;
82 uint64_t vmx_cr0_fixed0;
83 uint64_t vmx_cr0_fixed1;
84 uint64_t vmx_cr4_fixed0;
85 uint64_t vmx_cr4_fixed1;
86 uint64_t vmx_vmcs_enum;
87 } msr;
88 /* Last instruction error */
89 uint32_t ulLastInstrError;
90 } vmx;
91 struct
92 {
93 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
94 bool fSupported;
95
96 /** SVM revision. */
97 uint32_t u32Rev;
98
99 /** Maximum ASID allowed. */
100 uint32_t u32MaxASID;
101 } svm;
102 /** Saved error from detection */
103 int32_t lLastError;
104
105 struct
106 {
107 uint32_t u32AMDFeatureECX;
108 uint32_t u32AMDFeatureEDX;
109 } cpuid;
110
111 HWACCMSTATE enmHwAccmState;
112} HWACCMR0Globals;
113
114
115
116/**
117 * Does global Ring-0 HWACCM initialization.
118 *
119 * @returns VBox status code.
120 */
121HWACCMR0DECL(int) HWACCMR0Init()
122{
123 int rc;
124
125 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
126 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
127
128#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
129
130 /*
131 * Check for VT-x and AMD-V capabilities
132 */
133 if (ASMHasCpuId())
134 {
135 uint32_t u32FeaturesECX;
136 uint32_t u32Dummy;
137 uint32_t u32FeaturesEDX;
138 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
139
140 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
141 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
142 /* Query AMD features. */
143 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
144
145 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
146 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
147 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
148 )
149 {
150 /*
151 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
152 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
153 */
154 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
155 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
156 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
157 )
158 {
159 int aRc[RTCPUSET_MAX_CPUS];
160 RTCPUID idCpu = 0;
161
162 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
163
164 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
165 memset(aRc, 0, sizeof(aRc));
166 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
167
168 /* Check the return code of all invocations. */
169 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
170 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
171
172 AssertMsg(VBOX_SUCCESS(HWACCMR0Globals.lLastError), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, HWACCMR0Globals.lLastError));
173 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
174 {
175 /* Reread in case we've changed it. */
176 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
177
178 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
179 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
180 {
181 HWACCMR0Globals.vmx.fSupported = true;
182 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
183 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
184 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
185 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
186 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
187 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
188 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
189 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
190 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
191 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
192 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
193 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
194
195#if HC_ARCH_BITS == 64
196 RTR0MEMOBJ pScatchMemObj;
197 void *pvScatchPage;
198 RTHCPHYS pScatchPagePhys;
199
200 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
201 if (RT_FAILURE(rc))
202 return rc;
203
204 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
205 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
206 memset(pvScatchPage, 0, PAGE_SIZE);
207
208 /* Set revision dword at the beginning of the structure. */
209 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
210
211 /* Make sure we don't get rescheduled to another cpu during this probe. */
212 RTCCUINTREG fFlags = ASMIntDisableFlags();
213
214 /*
215 * Check CR4.VMXE
216 */
217 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
218 {
219 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
220 * try to execute the VMX instructions...
221 */
222 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
223 }
224
225 /* Enter VMX Root Mode */
226 rc = VMXEnable(pScatchPagePhys);
227 if (VBOX_FAILURE(rc))
228 {
229 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
230 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
231 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode)
232 *
233 * They should fix their code, but until they do we simply refuse to run.
234 */
235 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
236 HWACCMR0Globals.vmx.fSupported = false;
237 }
238 else
239 VMXDisable();
240
241 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
242 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
243 ASMSetFlags(fFlags);
244
245 RTR0MemObjFree(pScatchMemObj, false);
246#endif
247 }
248 else
249 {
250 AssertFailed(); /* can't hit this case anymore */
251 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
252 }
253 }
254 }
255 else
256 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
257 }
258 else
259 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
260 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
261 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
262 )
263 {
264 /*
265 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
266 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
267 */
268 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
269 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
270 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
271 )
272 {
273 int aRc[RTCPUSET_MAX_CPUS];
274 RTCPUID idCpu = 0;
275
276 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
277 memset(aRc, 0, sizeof(aRc));
278 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
279 AssertRC(rc);
280
281 /* Check the return code of all invocations. */
282 if (VBOX_SUCCESS(rc))
283 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
284
285 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
286
287 if (VBOX_SUCCESS(rc))
288 {
289 /* Query AMD features. */
290 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy);
291
292 HWACCMR0Globals.svm.fSupported = true;
293 }
294 else
295 HWACCMR0Globals.lLastError = rc;
296 }
297 else
298 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
299 }
300 else
301 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
302 }
303 else
304 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
305
306#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
307
308 return VINF_SUCCESS;
309}
310
311
312/**
313 * Checks the error code array filled in for each cpu in the system.
314 *
315 * @returns VBox status code.
316 * @param paRc Error code array
317 * @param cErrorCodes Array size
318 * @param pidCpu Value of the first cpu that set an error (out)
319 */
320static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
321{
322 int rc = VINF_SUCCESS;
323
324 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
325
326 for (unsigned i=0;i<cErrorCodes;i++)
327 {
328 if (RTMpIsCpuOnline(i))
329 {
330 if (VBOX_FAILURE(paRc[i]))
331 {
332 rc = paRc[i];
333 *pidCpu = i;
334 break;
335 }
336 }
337 }
338 return rc;
339}
340
341/**
342 * Does global Ring-0 HWACCM termination.
343 *
344 * @returns VBox status code.
345 */
346HWACCMR0DECL(int) HWACCMR0Term()
347{
348 int aRc[RTCPUSET_MAX_CPUS];
349
350 memset(aRc, 0, sizeof(aRc));
351 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
352 AssertRC(rc);
353
354 /* Free the per-cpu pages used for VT-x and AMD-V */
355 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
356 {
357 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
358 if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
359 {
360 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
361 HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
362 }
363 }
364 return rc;
365}
366
367
368/**
369 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
370 * is to be called on the target cpus.
371 *
372 * @param idCpu The identifier for the CPU the function is called on.
373 * @param pvUser1 The 1st user argument.
374 * @param pvUser2 The 2nd user argument.
375 */
376static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
377{
378 unsigned u32VendorEBX = (uintptr_t)pvUser1;
379 int *paRc = (int *)pvUser2;
380 uint64_t val;
381
382#ifdef LOG_ENABLED
383 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
384#endif
385
386 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
387 {
388 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
389
390 /*
391 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
392 * Once the lock bit is set, this MSR can no longer be modified.
393 */
394 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
395 {
396 /* MSR is not yet locked; we can change it ourselves here */
397 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
398 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
399 }
400 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
401 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
402 paRc[idCpu] = VINF_SUCCESS;
403 else
404 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
405 }
406 else
407 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
408 {
409 /* Check if SVM is disabled */
410 val = ASMRdMsr(MSR_K8_VM_CR);
411 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
412 {
413 /* Turn on SVM in the EFER MSR. */
414 val = ASMRdMsr(MSR_K6_EFER);
415 if (!(val & MSR_K6_EFER_SVME))
416 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
417
418 /* Paranoia. */
419 val = ASMRdMsr(MSR_K6_EFER);
420 if (val & MSR_K6_EFER_SVME)
421 paRc[idCpu] = VINF_SUCCESS;
422 else
423 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
424 }
425 else
426 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
427 }
428 else
429 AssertFailed(); /* can't happen */
430 return;
431}
432
433
434/**
435 * Sets up HWACCM on all cpus.
436 *
437 * @returns VBox status code.
438 * @param pVM The VM to operate on.
439 * @param enmNewHwAccmState New hwaccm state
440 *
441 */
442HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
443{
444 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
445 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
446 {
447 int aRc[RTCPUSET_MAX_CPUS];
448 RTCPUID idCpu = 0;
449
450 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
451 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
452 return VINF_SUCCESS;
453
454 memset(aRc, 0, sizeof(aRc));
455
456 /* Allocate one page per cpu for the global vt-x and amd-v pages */
457 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
458 {
459 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
460
461 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
462 if (RTMpIsCpuOnline(i))
463 {
464 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
465 AssertRC(rc);
466 if (RT_FAILURE(rc))
467 return rc;
468
469 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
470 Assert(pvR0);
471 memset(pvR0, 0, PAGE_SIZE);
472
473#ifdef LOG_ENABLED
474 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
475#endif
476 }
477 }
478 /* First time, so initialize each cpu/core */
479 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
480
481 /* Check the return code of all invocations. */
482 if (VBOX_SUCCESS(rc))
483 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
484
485 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
486 return rc;
487 }
488
489 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
490 return VINF_SUCCESS;
491
492 /* Request to change the mode is not allowed */
493 return VERR_ACCESS_DENIED;
494}
495
496/**
497 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
498 * is to be called on the target cpus.
499 *
500 * @param idCpu The identifier for the CPU the function is called on.
501 * @param pvUser1 The 1st user argument.
502 * @param pvUser2 The 2nd user argument.
503 */
504static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
505{
506 PVM pVM = (PVM)pvUser1;
507 int *paRc = (int *)pvUser2;
508 void *pvPageCpu;
509 RTHCPHYS pPageCpuPhys;
510
511 Assert(pVM);
512 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
513
514 /* Should never happen */
515 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
516 {
517 AssertFailed();
518 return;
519 }
520
521 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
522 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
523
524 if (pVM->hwaccm.s.vmx.fSupported)
525 {
526 paRc[idCpu] = VMXR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
527 AssertRC(paRc[idCpu]);
528 if (VBOX_SUCCESS(paRc[idCpu]))
529 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
530 }
531 else
532 if (pVM->hwaccm.s.svm.fSupported)
533 {
534 paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
535 AssertRC(paRc[idCpu]);
536 if (VBOX_SUCCESS(paRc[idCpu]))
537 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
538 }
539 return;
540}
541
542/**
543 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
544 * is to be called on the target cpus.
545 *
546 * @param idCpu The identifier for the CPU the function is called on.
547 * @param pvUser1 The 1st user argument.
548 * @param pvUser2 The 2nd user argument.
549 */
550static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
551{
552 void *pvPageCpu;
553 RTHCPHYS pPageCpuPhys;
554 int *paRc = (int *)pvUser1;
555
556 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
557
558 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
559 return;
560
561 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
562 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
563
564 if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
565 {
566 paRc[idCpu] = VMXR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
567 AssertRC(paRc[idCpu]);
568 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
569 }
570 else
571 if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
572 {
573 paRc[idCpu] = SVMR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
574 AssertRC(paRc[idCpu]);
575 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
576 }
577 return;
578}
579
580
581/**
582 * Does Ring-0 per VM HWACCM initialization.
583 *
584 * This is mainly to check that the Host CPU mode is compatible
585 * with VMX.
586 *
587 * @returns VBox status code.
588 * @param pVM The VM to operate on.
589 */
590HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
591{
592 int rc = VINF_SUCCESS;
593
594 AssertReturn(pVM, VERR_INVALID_PARAMETER);
595
596#ifdef LOG_ENABLED
597 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
598#endif
599
600 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
601 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
602
603 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
604 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
605 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
606 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
607 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
608 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
609 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
610 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
611 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
612 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
613 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
614 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
615 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
616 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
617 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
618 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
619 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
620 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
621
622 /* Init a VT-x or AMD-V VM. */
623 if (pVM->hwaccm.s.vmx.fSupported)
624 rc = VMXR0InitVM(pVM);
625 else
626 if (pVM->hwaccm.s.svm.fSupported)
627 rc = SVMR0InitVM(pVM);
628
629 return rc;
630}
631
632
633/**
634 * Does Ring-0 per VM HWACCM termination.
635 *
636 * @returns VBox status code.
637 * @param pVM The VM to operate on.
638 */
639HWACCMR0DECL(int) HWACCMR0TermVM(PVM pVM)
640{
641 int rc = VINF_SUCCESS;
642
643 AssertReturn(pVM, VERR_INVALID_PARAMETER);
644
645#ifdef LOG_ENABLED
646 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
647#endif
648
649 /* Terminate a VT-x or AMD-V VM. */
650 if (pVM->hwaccm.s.vmx.fSupported)
651 rc = VMXR0TermVM(pVM);
652 else
653 if (pVM->hwaccm.s.svm.fSupported)
654 rc = SVMR0TermVM(pVM);
655
656 return rc;
657}
658
659
660/**
661 * Sets up a VT-x or AMD-V session
662 *
663 * @returns VBox status code.
664 * @param pVM The VM to operate on.
665 */
666HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
667{
668 int rc = VINF_SUCCESS;
669
670 AssertReturn(pVM, VERR_INVALID_PARAMETER);
671
672#ifdef LOG_ENABLED
673 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
674#endif
675
676 /* Setup VT-x or AMD-V. */
677 if (pVM->hwaccm.s.vmx.fSupported)
678 rc = VMXR0SetupVM(pVM);
679 else
680 if (pVM->hwaccm.s.svm.fSupported)
681 rc = SVMR0SetupVM(pVM);
682
683 return rc;
684}
685
686
687/**
688 * Enters the VT-x or AMD-V session
689 *
690 * @returns VBox status code.
691 * @param pVM The VM to operate on.
692 */
693HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
694{
695 CPUMCTX *pCtx;
696 int rc;
697
698 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
699 if (VBOX_FAILURE(rc))
700 return rc;
701
702 /* Always load the guest's FPU/XMM state on-demand. */
703 CPUMDeactivateGuestFPUState(pVM);
704
705 /* Always reload the host context and the guest's CR0 register. (!!!!) */
706 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
707
708 if (pVM->hwaccm.s.vmx.fSupported)
709 {
710 rc = VMXR0Enter(pVM);
711 AssertRC(rc);
712 rc |= VMXR0SaveHostState(pVM);
713 AssertRC(rc);
714 rc |= VMXR0LoadGuestState(pVM, pCtx);
715 AssertRC(rc);
716 if (rc != VINF_SUCCESS)
717 return rc;
718 }
719 else
720 {
721 Assert(pVM->hwaccm.s.svm.fSupported);
722 rc = SVMR0Enter(pVM);
723 AssertRC(rc);
724 rc |= SVMR0LoadGuestState(pVM, pCtx);
725 AssertRC(rc);
726 if (rc != VINF_SUCCESS)
727 return rc;
728
729 }
730 return VINF_SUCCESS;
731}
732
733
734/**
735 * Leaves the VT-x or AMD-V session
736 *
737 * @returns VBox status code.
738 * @param pVM The VM to operate on.
739 */
740HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
741{
742 CPUMCTX *pCtx;
743 int rc;
744
745 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
746 if (VBOX_FAILURE(rc))
747 return rc;
748
749 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
750 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
751 * or trash somebody else's FPU state.
752 */
753
754 /* Restore host FPU and XMM state if necessary. */
755 if (CPUMIsGuestFPUStateActive(pVM))
756 {
757 Log2(("CPUMRestoreHostFPUState\n"));
758 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
759 CPUMRestoreHostFPUState(pVM);
760
761 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
762 }
763
764 if (pVM->hwaccm.s.vmx.fSupported)
765 {
766 return VMXR0Leave(pVM);
767 }
768 else
769 {
770 Assert(pVM->hwaccm.s.svm.fSupported);
771 return SVMR0Leave(pVM);
772 }
773}
774
775/**
776 * Runs guest code in a hardware accelerated VM.
777 *
778 * @returns VBox status code.
779 * @param pVM The VM to operate on.
780 */
781HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
782{
783 CPUMCTX *pCtx;
784 int rc;
785
786 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
787 if (VBOX_FAILURE(rc))
788 return rc;
789
790 if (pVM->hwaccm.s.vmx.fSupported)
791 {
792 return VMXR0RunGuestCode(pVM, pCtx);
793 }
794 else
795 {
796 Assert(pVM->hwaccm.s.svm.fSupported);
797 return SVMR0RunGuestCode(pVM, pCtx);
798 }
799}
800
801
802#ifdef VBOX_STRICT
803#include <iprt/string.h>
804/**
805 * Dumps a descriptor.
806 *
807 * @param Desc Descriptor to dump.
808 * @param Sel Selector number.
809 * @param pszMsg Message to prepend the log entry with.
810 */
811HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
812{
813 /*
814 * Make variable description string.
815 */
816 static struct
817 {
818 unsigned cch;
819 const char *psz;
820 } const aTypes[32] =
821 {
822 #define STRENTRY(str) { sizeof(str) - 1, str }
823
824 /* system */
825#if HC_ARCH_BITS == 64
826 STRENTRY("Reserved0 "), /* 0x00 */
827 STRENTRY("Reserved1 "), /* 0x01 */
828 STRENTRY("LDT "), /* 0x02 */
829 STRENTRY("Reserved3 "), /* 0x03 */
830 STRENTRY("Reserved4 "), /* 0x04 */
831 STRENTRY("Reserved5 "), /* 0x05 */
832 STRENTRY("Reserved6 "), /* 0x06 */
833 STRENTRY("Reserved7 "), /* 0x07 */
834 STRENTRY("Reserved8 "), /* 0x08 */
835 STRENTRY("TSS64Avail "), /* 0x09 */
836 STRENTRY("ReservedA "), /* 0x0a */
837 STRENTRY("TSS64Busy "), /* 0x0b */
838 STRENTRY("Call64 "), /* 0x0c */
839 STRENTRY("ReservedD "), /* 0x0d */
840 STRENTRY("Int64 "), /* 0x0e */
841 STRENTRY("Trap64 "), /* 0x0f */
842#else
843 STRENTRY("Reserved0 "), /* 0x00 */
844 STRENTRY("TSS16Avail "), /* 0x01 */
845 STRENTRY("LDT "), /* 0x02 */
846 STRENTRY("TSS16Busy "), /* 0x03 */
847 STRENTRY("Call16 "), /* 0x04 */
848 STRENTRY("Task "), /* 0x05 */
849 STRENTRY("Int16 "), /* 0x06 */
850 STRENTRY("Trap16 "), /* 0x07 */
851 STRENTRY("Reserved8 "), /* 0x08 */
852 STRENTRY("TSS32Avail "), /* 0x09 */
853 STRENTRY("ReservedA "), /* 0x0a */
854 STRENTRY("TSS32Busy "), /* 0x0b */
855 STRENTRY("Call32 "), /* 0x0c */
856 STRENTRY("ReservedD "), /* 0x0d */
857 STRENTRY("Int32 "), /* 0x0e */
858 STRENTRY("Trap32 "), /* 0x0f */
859#endif
860 /* non system */
861 STRENTRY("DataRO "), /* 0x10 */
862 STRENTRY("DataRO Accessed "), /* 0x11 */
863 STRENTRY("DataRW "), /* 0x12 */
864 STRENTRY("DataRW Accessed "), /* 0x13 */
865 STRENTRY("DataDownRO "), /* 0x14 */
866 STRENTRY("DataDownRO Accessed "), /* 0x15 */
867 STRENTRY("DataDownRW "), /* 0x16 */
868 STRENTRY("DataDownRW Accessed "), /* 0x17 */
869 STRENTRY("CodeEO "), /* 0x18 */
870 STRENTRY("CodeEO Accessed "), /* 0x19 */
871 STRENTRY("CodeER "), /* 0x1a */
872 STRENTRY("CodeER Accessed "), /* 0x1b */
873 STRENTRY("CodeConfEO "), /* 0x1c */
874 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
875 STRENTRY("CodeConfER "), /* 0x1e */
876 STRENTRY("CodeConfER Accessed ") /* 0x1f */
877 #undef SYSENTRY
878 };
879 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
880 char szMsg[128];
881 char *psz = &szMsg[0];
882 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
883 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
884 psz += aTypes[i].cch;
885
886 if (Desc->Gen.u1Present)
887 ADD_STR(psz, "Present ");
888 else
889 ADD_STR(psz, "Not-Present ");
890#if HC_ARCH_BITS == 64
891 if (Desc->Gen.u1Long)
892 ADD_STR(psz, "64-bit ");
893 else
894 ADD_STR(psz, "Comp ");
895#else
896 if (Desc->Gen.u1Granularity)
897 ADD_STR(psz, "Page ");
898 if (Desc->Gen.u1DefBig)
899 ADD_STR(psz, "32-bit ");
900 else
901 ADD_STR(psz, "16-bit ");
902#endif
903 #undef ADD_STR
904 *psz = '\0';
905
906 /*
907 * Limit and Base and format the output.
908 */
909 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
910 if (Desc->Gen.u1Granularity)
911 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
912
913#if HC_ARCH_BITS == 64
914 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
915
916 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
917 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
918#else
919 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
920
921 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
922 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
923#endif
924}
925
926/**
927 * Formats a full register dump.
928 *
929 * @param pCtx The context to format.
930 */
931HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
932{
933 /*
934 * Format the flags.
935 */
936 static struct
937 {
938 const char *pszSet; const char *pszClear; uint32_t fFlag;
939 } aFlags[] =
940 {
941 { "vip",NULL, X86_EFL_VIP },
942 { "vif",NULL, X86_EFL_VIF },
943 { "ac", NULL, X86_EFL_AC },
944 { "vm", NULL, X86_EFL_VM },
945 { "rf", NULL, X86_EFL_RF },
946 { "nt", NULL, X86_EFL_NT },
947 { "ov", "nv", X86_EFL_OF },
948 { "dn", "up", X86_EFL_DF },
949 { "ei", "di", X86_EFL_IF },
950 { "tf", NULL, X86_EFL_TF },
951 { "nt", "pl", X86_EFL_SF },
952 { "nz", "zr", X86_EFL_ZF },
953 { "ac", "na", X86_EFL_AF },
954 { "po", "pe", X86_EFL_PF },
955 { "cy", "nc", X86_EFL_CF },
956 };
957 char szEFlags[80];
958 char *psz = szEFlags;
959 uint32_t efl = pCtx->eflags.u32;
960 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
961 {
962 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
963 if (pszAdd)
964 {
965 strcpy(psz, pszAdd);
966 psz += strlen(pszAdd);
967 *psz++ = ' ';
968 }
969 }
970 psz[-1] = '\0';
971
972
973 /*
974 * Format the registers.
975 */
976 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
977 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
978 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
979 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
980 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
981 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
982 ,
983 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
984 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
985 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
986 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
987 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
988 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
989
990 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
991 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
992 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
993 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
994 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
995 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
996 "FCW=%04x FSW=%04x FTW=%04x\n",
997 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
998 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
999 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1000 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1001 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1002 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1003 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
1004
1005
1006}
1007#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette