VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 8213

Last change on this file since 8213 was 8155, checked in by vboxsync, 17 years ago

The Big Sun Rebranding Header Change

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.3 KB
Line 
1/* $Id: HWACCMR0.cpp 8155 2008-04-18 15:16:47Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/memobj.h>
44#include <iprt/cpuset.h>
45#include "HWVMXR0.h"
46#include "HWSVMR0.h"
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
52static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
55
56/*******************************************************************************
57* Local Variables *
58*******************************************************************************/
59static struct
60{
61 struct
62 {
63 RTR0MEMOBJ pMemObj;
64 bool fVMXConfigured;
65 bool fSVMConfigured;
66 } aCpuInfo[RTCPUSET_MAX_CPUS];
67
68 struct
69 {
70 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
71 bool fSupported;
72
73 /** Host CR4 value (set by ring-0 VMX init) */
74 uint64_t hostCR4;
75
76 /** VMX MSR values */
77 struct
78 {
79 uint64_t feature_ctrl;
80 uint64_t vmx_basic_info;
81 uint64_t vmx_pin_ctls;
82 uint64_t vmx_proc_ctls;
83 uint64_t vmx_exit;
84 uint64_t vmx_entry;
85 uint64_t vmx_misc;
86 uint64_t vmx_cr0_fixed0;
87 uint64_t vmx_cr0_fixed1;
88 uint64_t vmx_cr4_fixed0;
89 uint64_t vmx_cr4_fixed1;
90 uint64_t vmx_vmcs_enum;
91 } msr;
92 /* Last instruction error */
93 uint32_t ulLastInstrError;
94 } vmx;
95 struct
96 {
97 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
98 bool fSupported;
99
100 /** SVM revision. */
101 uint32_t u32Rev;
102
103 /** Maximum ASID allowed. */
104 uint32_t u32MaxASID;
105 } svm;
106 /** Saved error from detection */
107 int32_t lLastError;
108
109 struct
110 {
111 uint32_t u32AMDFeatureECX;
112 uint32_t u32AMDFeatureEDX;
113 } cpuid;
114
115 HWACCMSTATE enmHwAccmState;
116} HWACCMR0Globals;
117
118
119
120/**
121 * Does global Ring-0 HWACCM initialization.
122 *
123 * @returns VBox status code.
124 */
125HWACCMR0DECL(int) HWACCMR0Init()
126{
127 int rc;
128
129 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
130 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
131
132#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
133
134 /*
135 * Check for VT-x and AMD-V capabilities
136 */
137 if (ASMHasCpuId())
138 {
139 uint32_t u32FeaturesECX;
140 uint32_t u32Dummy;
141 uint32_t u32FeaturesEDX;
142 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
143
144 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
145 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
146 /* Query AMD features. */
147 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
148
149 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
150 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
151 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
152 )
153 {
154 /*
155 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
156 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
157 */
158 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
159 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
160 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
161 )
162 {
163 int aRc[RTCPUSET_MAX_CPUS];
164 RTCPUID idCpu = 0;
165
166 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
167
168 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
169 memset(aRc, 0, sizeof(aRc));
170 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
171
172 /* Check the return code of all invocations. */
173 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
174 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
175
176 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
177 {
178 /* Reread in case we've changed it. */
179 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
180
181 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
182 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
183 {
184 HWACCMR0Globals.vmx.fSupported = true;
185 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
186 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
187 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
188 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
189 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
190 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
191 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
192 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
193 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
194 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
195 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
196 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
197
198#if HC_ARCH_BITS == 64
199 RTR0MEMOBJ pScatchMemObj;
200 void *pvScatchPage;
201 RTHCPHYS pScatchPagePhys;
202
203 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
204 if (RT_FAILURE(rc))
205 return rc;
206
207 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
208 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
209 memset(pvScatchPage, 0, PAGE_SIZE);
210
211 /* Set revision dword at the beginning of the structure. */
212 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
213
214 /* Make sure we don't get rescheduled to another cpu during this probe. */
215 RTCCUINTREG fFlags = ASMIntDisableFlags();
216
217 /*
218 * Check CR4.VMXE
219 */
220 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
221 {
222 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
223 * try to execute the VMX instructions...
224 */
225 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
226 }
227
228 /* Enter VMX Root Mode */
229 rc = VMXEnable(pScatchPagePhys);
230 if (VBOX_FAILURE(rc))
231 {
232 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
233 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
234 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode)
235 *
236 * They should fix their code, but until they do we simply refuse to run.
237 */
238 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
239 HWACCMR0Globals.vmx.fSupported = false;
240 }
241 else
242 VMXDisable();
243
244 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
245 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
246 ASMSetFlags(fFlags);
247
248 RTR0MemObjFree(pScatchMemObj, false);
249#endif
250 }
251 else
252 {
253 AssertFailed(); /* can't hit this case anymore */
254 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
255 }
256 }
257#ifdef LOG_ENABLED
258 else
259 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
260#endif
261 }
262 else
263 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
264 }
265 else
266 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
267 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
268 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
269 )
270 {
271 /*
272 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
273 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
274 */
275 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
276 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
277 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
278 )
279 {
280 int aRc[RTCPUSET_MAX_CPUS];
281 RTCPUID idCpu = 0;
282
283 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
284 memset(aRc, 0, sizeof(aRc));
285 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
286 AssertRC(rc);
287
288 /* Check the return code of all invocations. */
289 if (VBOX_SUCCESS(rc))
290 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
291
292 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
293
294 if (VBOX_SUCCESS(rc))
295 {
296 /* Query AMD features. */
297 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy);
298
299 HWACCMR0Globals.svm.fSupported = true;
300 }
301 else
302 HWACCMR0Globals.lLastError = rc;
303 }
304 else
305 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
306 }
307 else
308 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
309 }
310 else
311 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
312
313#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
314
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Checks the error code array filled in for each cpu in the system.
321 *
322 * @returns VBox status code.
323 * @param paRc Error code array
324 * @param cErrorCodes Array size
325 * @param pidCpu Value of the first cpu that set an error (out)
326 */
327static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
328{
329 int rc = VINF_SUCCESS;
330
331 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
332
333 for (unsigned i=0;i<cErrorCodes;i++)
334 {
335 if (RTMpIsCpuOnline(i))
336 {
337 if (VBOX_FAILURE(paRc[i]))
338 {
339 rc = paRc[i];
340 *pidCpu = i;
341 break;
342 }
343 }
344 }
345 return rc;
346}
347
348/**
349 * Does global Ring-0 HWACCM termination.
350 *
351 * @returns VBox status code.
352 */
353HWACCMR0DECL(int) HWACCMR0Term()
354{
355 int aRc[RTCPUSET_MAX_CPUS];
356
357 memset(aRc, 0, sizeof(aRc));
358 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
359 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
360
361 /* Free the per-cpu pages used for VT-x and AMD-V */
362 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
363 {
364 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
365 if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
366 {
367 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
368 HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
369 }
370 }
371 return rc;
372}
373
374
375/**
376 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
377 * is to be called on the target cpus.
378 *
379 * @param idCpu The identifier for the CPU the function is called on.
380 * @param pvUser1 The 1st user argument.
381 * @param pvUser2 The 2nd user argument.
382 */
383static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
384{
385 unsigned u32VendorEBX = (uintptr_t)pvUser1;
386 int *paRc = (int *)pvUser2;
387 uint64_t val;
388
389#ifdef LOG_ENABLED
390 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
391#endif
392 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
393
394 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
395 {
396 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
397
398 /*
399 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
400 * Once the lock bit is set, this MSR can no longer be modified.
401 */
402 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
403 {
404 /* MSR is not yet locked; we can change it ourselves here */
405 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
406 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
407 }
408 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
409 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
410 paRc[idCpu] = VINF_SUCCESS;
411 else
412 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
413 }
414 else
415 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
416 {
417 /* Check if SVM is disabled */
418 val = ASMRdMsr(MSR_K8_VM_CR);
419 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
420 {
421 /* Turn on SVM in the EFER MSR. */
422 val = ASMRdMsr(MSR_K6_EFER);
423 if (!(val & MSR_K6_EFER_SVME))
424 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
425
426 /* Paranoia. */
427 val = ASMRdMsr(MSR_K6_EFER);
428 if (val & MSR_K6_EFER_SVME)
429 paRc[idCpu] = VINF_SUCCESS;
430 else
431 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
432 }
433 else
434 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
435 }
436 else
437 AssertFailed(); /* can't happen */
438 return;
439}
440
441
442/**
443 * Sets up HWACCM on all cpus.
444 *
445 * @returns VBox status code.
446 * @param pVM The VM to operate on.
447 * @param enmNewHwAccmState New hwaccm state
448 *
449 */
450HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
451{
452 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
453 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
454 {
455 int aRc[RTCPUSET_MAX_CPUS];
456 RTCPUID idCpu = 0;
457
458 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
459 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
460 return VINF_SUCCESS;
461
462 memset(aRc, 0, sizeof(aRc));
463
464 /* Allocate one page per cpu for the global vt-x and amd-v pages */
465 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
466 {
467 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
468
469 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
470 if (RTMpIsCpuOnline(i))
471 {
472 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
473 AssertRC(rc);
474 if (RT_FAILURE(rc))
475 return rc;
476
477 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
478 Assert(pvR0);
479 memset(pvR0, 0, PAGE_SIZE);
480
481#ifdef LOG_ENABLED
482 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
483#endif
484 }
485 }
486 /* First time, so initialize each cpu/core */
487 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
488
489 /* Check the return code of all invocations. */
490 if (VBOX_SUCCESS(rc))
491 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
492
493 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
494 return rc;
495 }
496
497 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
498 return VINF_SUCCESS;
499
500 /* Request to change the mode is not allowed */
501 return VERR_ACCESS_DENIED;
502}
503
504/**
505 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
506 * is to be called on the target cpus.
507 *
508 * @param idCpu The identifier for the CPU the function is called on.
509 * @param pvUser1 The 1st user argument.
510 * @param pvUser2 The 2nd user argument.
511 */
512static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
513{
514 PVM pVM = (PVM)pvUser1;
515 int *paRc = (int *)pvUser2;
516 void *pvPageCpu;
517 RTHCPHYS pPageCpuPhys;
518
519 Assert(pVM);
520 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
521 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
522
523 /* Should never happen */
524 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
525 {
526 AssertFailed();
527 return;
528 }
529
530 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
531 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
532
533 if (pVM->hwaccm.s.vmx.fSupported)
534 {
535 paRc[idCpu] = VMXR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
536 AssertRC(paRc[idCpu]);
537 if (VBOX_SUCCESS(paRc[idCpu]))
538 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
539 }
540 else
541 if (pVM->hwaccm.s.svm.fSupported)
542 {
543 paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
544 AssertRC(paRc[idCpu]);
545 if (VBOX_SUCCESS(paRc[idCpu]))
546 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
547 }
548 return;
549}
550
551/**
552 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
553 * is to be called on the target cpus.
554 *
555 * @param idCpu The identifier for the CPU the function is called on.
556 * @param pvUser1 The 1st user argument.
557 * @param pvUser2 The 2nd user argument.
558 */
559static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
560{
561 void *pvPageCpu;
562 RTHCPHYS pPageCpuPhys;
563 int *paRc = (int *)pvUser1;
564
565 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
566 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
567
568 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
569 return;
570
571 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
572 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
573
574 if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
575 {
576 paRc[idCpu] = VMXR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
577 AssertRC(paRc[idCpu]);
578 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
579 }
580 else
581 if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
582 {
583 paRc[idCpu] = SVMR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
584 AssertRC(paRc[idCpu]);
585 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
586 }
587 return;
588}
589
590
591/**
592 * Does Ring-0 per VM HWACCM initialization.
593 *
594 * This is mainly to check that the Host CPU mode is compatible
595 * with VMX.
596 *
597 * @returns VBox status code.
598 * @param pVM The VM to operate on.
599 */
600HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
601{
602 int rc = VINF_SUCCESS;
603
604 AssertReturn(pVM, VERR_INVALID_PARAMETER);
605
606#ifdef LOG_ENABLED
607 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
608#endif
609
610 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
611 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
612
613 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
614 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
615 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
616 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
617 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
618 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
619 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
620 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
621 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
622 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
623 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
624 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
625 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
626 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
627 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
628 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
629 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
630 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
631
632 /* Init a VT-x or AMD-V VM. */
633 if (pVM->hwaccm.s.vmx.fSupported)
634 rc = VMXR0InitVM(pVM);
635 else
636 if (pVM->hwaccm.s.svm.fSupported)
637 rc = SVMR0InitVM(pVM);
638
639 return rc;
640}
641
642
643/**
644 * Does Ring-0 per VM HWACCM termination.
645 *
646 * @returns VBox status code.
647 * @param pVM The VM to operate on.
648 */
649HWACCMR0DECL(int) HWACCMR0TermVM(PVM pVM)
650{
651 int rc = VINF_SUCCESS;
652
653 AssertReturn(pVM, VERR_INVALID_PARAMETER);
654
655#ifdef LOG_ENABLED
656 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
657#endif
658
659 /* Terminate a VT-x or AMD-V VM. */
660 if (pVM->hwaccm.s.vmx.fSupported)
661 rc = VMXR0TermVM(pVM);
662 else
663 if (pVM->hwaccm.s.svm.fSupported)
664 rc = SVMR0TermVM(pVM);
665
666 return rc;
667}
668
669
670/**
671 * Sets up a VT-x or AMD-V session
672 *
673 * @returns VBox status code.
674 * @param pVM The VM to operate on.
675 */
676HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
677{
678 int rc = VINF_SUCCESS;
679
680 AssertReturn(pVM, VERR_INVALID_PARAMETER);
681
682#ifdef LOG_ENABLED
683 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
684#endif
685
686 /* Setup VT-x or AMD-V. */
687 if (pVM->hwaccm.s.vmx.fSupported)
688 rc = VMXR0SetupVM(pVM);
689 else
690 if (pVM->hwaccm.s.svm.fSupported)
691 rc = SVMR0SetupVM(pVM);
692
693 return rc;
694}
695
696
697/**
698 * Enters the VT-x or AMD-V session
699 *
700 * @returns VBox status code.
701 * @param pVM The VM to operate on.
702 */
703HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
704{
705 CPUMCTX *pCtx;
706 int rc;
707
708 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
709 if (VBOX_FAILURE(rc))
710 return rc;
711
712 /* Always load the guest's FPU/XMM state on-demand. */
713 CPUMDeactivateGuestFPUState(pVM);
714
715 /* Always reload the host context and the guest's CR0 register. (!!!!) */
716 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
717
718 if (pVM->hwaccm.s.vmx.fSupported)
719 {
720 rc = VMXR0Enter(pVM);
721 AssertRC(rc);
722 rc |= VMXR0SaveHostState(pVM);
723 AssertRC(rc);
724 rc |= VMXR0LoadGuestState(pVM, pCtx);
725 AssertRC(rc);
726 if (rc != VINF_SUCCESS)
727 return rc;
728 }
729 else
730 {
731 Assert(pVM->hwaccm.s.svm.fSupported);
732 rc = SVMR0Enter(pVM);
733 AssertRC(rc);
734 rc |= SVMR0LoadGuestState(pVM, pCtx);
735 AssertRC(rc);
736 if (rc != VINF_SUCCESS)
737 return rc;
738
739 }
740 return VINF_SUCCESS;
741}
742
743
744/**
745 * Leaves the VT-x or AMD-V session
746 *
747 * @returns VBox status code.
748 * @param pVM The VM to operate on.
749 */
750HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
751{
752 CPUMCTX *pCtx;
753 int rc;
754
755 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
756 if (VBOX_FAILURE(rc))
757 return rc;
758
759 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
760 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
761 * or trash somebody else's FPU state.
762 */
763
764 /* Restore host FPU and XMM state if necessary. */
765 if (CPUMIsGuestFPUStateActive(pVM))
766 {
767 Log2(("CPUMRestoreHostFPUState\n"));
768 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
769 CPUMRestoreHostFPUState(pVM);
770
771 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
772 }
773
774 if (pVM->hwaccm.s.vmx.fSupported)
775 {
776 return VMXR0Leave(pVM);
777 }
778 else
779 {
780 Assert(pVM->hwaccm.s.svm.fSupported);
781 return SVMR0Leave(pVM);
782 }
783}
784
785/**
786 * Runs guest code in a hardware accelerated VM.
787 *
788 * @returns VBox status code.
789 * @param pVM The VM to operate on.
790 */
791HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
792{
793 CPUMCTX *pCtx;
794 int rc;
795
796 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
797 if (VBOX_FAILURE(rc))
798 return rc;
799
800 if (pVM->hwaccm.s.vmx.fSupported)
801 {
802 return VMXR0RunGuestCode(pVM, pCtx);
803 }
804 else
805 {
806 Assert(pVM->hwaccm.s.svm.fSupported);
807 return SVMR0RunGuestCode(pVM, pCtx);
808 }
809}
810
811
812#ifdef VBOX_STRICT
813#include <iprt/string.h>
814/**
815 * Dumps a descriptor.
816 *
817 * @param Desc Descriptor to dump.
818 * @param Sel Selector number.
819 * @param pszMsg Message to prepend the log entry with.
820 */
821HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
822{
823 /*
824 * Make variable description string.
825 */
826 static struct
827 {
828 unsigned cch;
829 const char *psz;
830 } const aTypes[32] =
831 {
832 #define STRENTRY(str) { sizeof(str) - 1, str }
833
834 /* system */
835#if HC_ARCH_BITS == 64
836 STRENTRY("Reserved0 "), /* 0x00 */
837 STRENTRY("Reserved1 "), /* 0x01 */
838 STRENTRY("LDT "), /* 0x02 */
839 STRENTRY("Reserved3 "), /* 0x03 */
840 STRENTRY("Reserved4 "), /* 0x04 */
841 STRENTRY("Reserved5 "), /* 0x05 */
842 STRENTRY("Reserved6 "), /* 0x06 */
843 STRENTRY("Reserved7 "), /* 0x07 */
844 STRENTRY("Reserved8 "), /* 0x08 */
845 STRENTRY("TSS64Avail "), /* 0x09 */
846 STRENTRY("ReservedA "), /* 0x0a */
847 STRENTRY("TSS64Busy "), /* 0x0b */
848 STRENTRY("Call64 "), /* 0x0c */
849 STRENTRY("ReservedD "), /* 0x0d */
850 STRENTRY("Int64 "), /* 0x0e */
851 STRENTRY("Trap64 "), /* 0x0f */
852#else
853 STRENTRY("Reserved0 "), /* 0x00 */
854 STRENTRY("TSS16Avail "), /* 0x01 */
855 STRENTRY("LDT "), /* 0x02 */
856 STRENTRY("TSS16Busy "), /* 0x03 */
857 STRENTRY("Call16 "), /* 0x04 */
858 STRENTRY("Task "), /* 0x05 */
859 STRENTRY("Int16 "), /* 0x06 */
860 STRENTRY("Trap16 "), /* 0x07 */
861 STRENTRY("Reserved8 "), /* 0x08 */
862 STRENTRY("TSS32Avail "), /* 0x09 */
863 STRENTRY("ReservedA "), /* 0x0a */
864 STRENTRY("TSS32Busy "), /* 0x0b */
865 STRENTRY("Call32 "), /* 0x0c */
866 STRENTRY("ReservedD "), /* 0x0d */
867 STRENTRY("Int32 "), /* 0x0e */
868 STRENTRY("Trap32 "), /* 0x0f */
869#endif
870 /* non system */
871 STRENTRY("DataRO "), /* 0x10 */
872 STRENTRY("DataRO Accessed "), /* 0x11 */
873 STRENTRY("DataRW "), /* 0x12 */
874 STRENTRY("DataRW Accessed "), /* 0x13 */
875 STRENTRY("DataDownRO "), /* 0x14 */
876 STRENTRY("DataDownRO Accessed "), /* 0x15 */
877 STRENTRY("DataDownRW "), /* 0x16 */
878 STRENTRY("DataDownRW Accessed "), /* 0x17 */
879 STRENTRY("CodeEO "), /* 0x18 */
880 STRENTRY("CodeEO Accessed "), /* 0x19 */
881 STRENTRY("CodeER "), /* 0x1a */
882 STRENTRY("CodeER Accessed "), /* 0x1b */
883 STRENTRY("CodeConfEO "), /* 0x1c */
884 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
885 STRENTRY("CodeConfER "), /* 0x1e */
886 STRENTRY("CodeConfER Accessed ") /* 0x1f */
887 #undef SYSENTRY
888 };
889 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
890 char szMsg[128];
891 char *psz = &szMsg[0];
892 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
893 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
894 psz += aTypes[i].cch;
895
896 if (Desc->Gen.u1Present)
897 ADD_STR(psz, "Present ");
898 else
899 ADD_STR(psz, "Not-Present ");
900#if HC_ARCH_BITS == 64
901 if (Desc->Gen.u1Long)
902 ADD_STR(psz, "64-bit ");
903 else
904 ADD_STR(psz, "Comp ");
905#else
906 if (Desc->Gen.u1Granularity)
907 ADD_STR(psz, "Page ");
908 if (Desc->Gen.u1DefBig)
909 ADD_STR(psz, "32-bit ");
910 else
911 ADD_STR(psz, "16-bit ");
912#endif
913 #undef ADD_STR
914 *psz = '\0';
915
916 /*
917 * Limit and Base and format the output.
918 */
919 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
920 if (Desc->Gen.u1Granularity)
921 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
922
923#if HC_ARCH_BITS == 64
924 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
925
926 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
927 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
928#else
929 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
930
931 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
932 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
933#endif
934}
935
936/**
937 * Formats a full register dump.
938 *
939 * @param pCtx The context to format.
940 */
941HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
942{
943 /*
944 * Format the flags.
945 */
946 static struct
947 {
948 const char *pszSet; const char *pszClear; uint32_t fFlag;
949 } aFlags[] =
950 {
951 { "vip",NULL, X86_EFL_VIP },
952 { "vif",NULL, X86_EFL_VIF },
953 { "ac", NULL, X86_EFL_AC },
954 { "vm", NULL, X86_EFL_VM },
955 { "rf", NULL, X86_EFL_RF },
956 { "nt", NULL, X86_EFL_NT },
957 { "ov", "nv", X86_EFL_OF },
958 { "dn", "up", X86_EFL_DF },
959 { "ei", "di", X86_EFL_IF },
960 { "tf", NULL, X86_EFL_TF },
961 { "nt", "pl", X86_EFL_SF },
962 { "nz", "zr", X86_EFL_ZF },
963 { "ac", "na", X86_EFL_AF },
964 { "po", "pe", X86_EFL_PF },
965 { "cy", "nc", X86_EFL_CF },
966 };
967 char szEFlags[80];
968 char *psz = szEFlags;
969 uint32_t efl = pCtx->eflags.u32;
970 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
971 {
972 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
973 if (pszAdd)
974 {
975 strcpy(psz, pszAdd);
976 psz += strlen(pszAdd);
977 *psz++ = ' ';
978 }
979 }
980 psz[-1] = '\0';
981
982
983 /*
984 * Format the registers.
985 */
986 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
987 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
988 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
989 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
990 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
991 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
992 ,
993 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
994 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
995 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
996 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
997 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
998 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
999
1000 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1001 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1002 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
1003 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
1004 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
1005 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1006 "FCW=%04x FSW=%04x FTW=%04x\n",
1007 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1008 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1009 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1010 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1011 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1012 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1013 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
1014
1015
1016}
1017#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette