VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 8553

Last change on this file since 8553 was 8553, checked in by vboxsync, 17 years ago

Moved VMX root mode check around.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.4 KB
Line 
1/* $Id: HWACCMR0.cpp 8553 2008-05-05 08:01:48Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/memobj.h>
44#include <iprt/cpuset.h>
45#include "HWVMXR0.h"
46#include "HWSVMR0.h"
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
52static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
55
56/*******************************************************************************
57* Local Variables *
58*******************************************************************************/
59static struct
60{
61 struct
62 {
63 RTR0MEMOBJ pMemObj;
64 bool fVMXConfigured;
65 bool fSVMConfigured;
66 } aCpuInfo[RTCPUSET_MAX_CPUS];
67
68 struct
69 {
70 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
71 bool fSupported;
72
73 /** Host CR4 value (set by ring-0 VMX init) */
74 uint64_t hostCR4;
75
76 /** VMX MSR values */
77 struct
78 {
79 uint64_t feature_ctrl;
80 uint64_t vmx_basic_info;
81 uint64_t vmx_pin_ctls;
82 uint64_t vmx_proc_ctls;
83 uint64_t vmx_exit;
84 uint64_t vmx_entry;
85 uint64_t vmx_misc;
86 uint64_t vmx_cr0_fixed0;
87 uint64_t vmx_cr0_fixed1;
88 uint64_t vmx_cr4_fixed0;
89 uint64_t vmx_cr4_fixed1;
90 uint64_t vmx_vmcs_enum;
91 } msr;
92 /* Last instruction error */
93 uint32_t ulLastInstrError;
94 } vmx;
95 struct
96 {
97 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
98 bool fSupported;
99
100 /** SVM revision. */
101 uint32_t u32Rev;
102
103 /** Maximum ASID allowed. */
104 uint32_t u32MaxASID;
105 } svm;
106 /** Saved error from detection */
107 int32_t lLastError;
108
109 struct
110 {
111 uint32_t u32AMDFeatureECX;
112 uint32_t u32AMDFeatureEDX;
113 } cpuid;
114
115 HWACCMSTATE enmHwAccmState;
116} HWACCMR0Globals;
117
118
119
120/**
121 * Does global Ring-0 HWACCM initialization.
122 *
123 * @returns VBox status code.
124 */
125HWACCMR0DECL(int) HWACCMR0Init()
126{
127 int rc;
128
129 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
130 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
131
132#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */
133
134 /*
135 * Check for VT-x and AMD-V capabilities
136 */
137 if (ASMHasCpuId())
138 {
139 uint32_t u32FeaturesECX;
140 uint32_t u32Dummy;
141 uint32_t u32FeaturesEDX;
142 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
143
144 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
145 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
146 /* Query AMD features. */
147 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
148
149 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
150 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
151 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
152 )
153 {
154 /*
155 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
156 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
157 */
158 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
159 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
160 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
161 )
162 {
163 int aRc[RTCPUSET_MAX_CPUS];
164 RTCPUID idCpu = 0;
165
166 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
167
168 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
169 memset(aRc, 0, sizeof(aRc));
170 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
171
172 /* Check the return code of all invocations. */
173 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
174 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
175
176 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError))
177 {
178 /* Reread in case we've changed it. */
179 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
180
181 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
182 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
183 {
184 HWACCMR0Globals.vmx.fSupported = true;
185 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
186 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
187 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
188 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
189 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
190 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
191 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
192 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
193 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
194 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
195 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
196 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
197
198#if HC_ARCH_BITS == 64
199 RTR0MEMOBJ pScatchMemObj;
200 void *pvScatchPage;
201 RTHCPHYS pScatchPagePhys;
202
203 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
204 if (RT_FAILURE(rc))
205 return rc;
206
207 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
208 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
209 memset(pvScatchPage, 0, PAGE_SIZE);
210
211 /* Set revision dword at the beginning of the structure. */
212 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
213
214 /* Make sure we don't get rescheduled to another cpu during this probe. */
215 RTCCUINTREG fFlags = ASMIntDisableFlags();
216
217 /*
218 * Check CR4.VMXE
219 */
220 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
221 {
222 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
223 * try to execute the VMX instructions...
224 */
225 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
226 }
227
228 /* Enter VMX Root Mode */
229 rc = VMXEnable(pScatchPagePhys);
230 if (VBOX_FAILURE(rc))
231 {
232 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
233 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
234 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode)
235 *
236 * They should fix their code, but until they do we simply refuse to run.
237 */
238 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
239 HWACCMR0Globals.vmx.fSupported = false;
240 }
241 else
242 VMXDisable();
243
244 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
245 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
246 ASMSetFlags(fFlags);
247
248 RTR0MemObjFree(pScatchMemObj, false);
249 if (VBOX_FAILURE(HWACCMR0Globals.lLastError))
250 return HWACCMR0Globals.lLastError ;
251#endif
252 }
253 else
254 {
255 AssertFailed(); /* can't hit this case anymore */
256 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
257 }
258 }
259#ifdef LOG_ENABLED
260 else
261 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
262#endif
263 }
264 else
265 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
266 }
267 else
268 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
269 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
270 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
271 )
272 {
273 /*
274 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
275 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
276 */
277 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
278 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
279 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
280 )
281 {
282 int aRc[RTCPUSET_MAX_CPUS];
283 RTCPUID idCpu = 0;
284
285 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
286 memset(aRc, 0, sizeof(aRc));
287 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
288 AssertRC(rc);
289
290 /* Check the return code of all invocations. */
291 if (VBOX_SUCCESS(rc))
292 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
293
294 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
295
296 if (VBOX_SUCCESS(rc))
297 {
298 /* Query AMD features. */
299 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy);
300
301 HWACCMR0Globals.svm.fSupported = true;
302 }
303 else
304 HWACCMR0Globals.lLastError = rc;
305 }
306 else
307 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
308 }
309 else
310 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
311 }
312 else
313 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
314
315#endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */
316
317 return VINF_SUCCESS;
318}
319
320
321/**
322 * Checks the error code array filled in for each cpu in the system.
323 *
324 * @returns VBox status code.
325 * @param paRc Error code array
326 * @param cErrorCodes Array size
327 * @param pidCpu Value of the first cpu that set an error (out)
328 */
329static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
330{
331 int rc = VINF_SUCCESS;
332
333 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
334
335 for (unsigned i=0;i<cErrorCodes;i++)
336 {
337 if (RTMpIsCpuOnline(i))
338 {
339 if (VBOX_FAILURE(paRc[i]))
340 {
341 rc = paRc[i];
342 *pidCpu = i;
343 break;
344 }
345 }
346 }
347 return rc;
348}
349
350/**
351 * Does global Ring-0 HWACCM termination.
352 *
353 * @returns VBox status code.
354 */
355HWACCMR0DECL(int) HWACCMR0Term()
356{
357 int aRc[RTCPUSET_MAX_CPUS];
358
359 memset(aRc, 0, sizeof(aRc));
360 int rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL);
361 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
362
363 /* Free the per-cpu pages used for VT-x and AMD-V */
364 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
365 {
366 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
367 if (HWACCMR0Globals.aCpuInfo[i].pMemObj)
368 {
369 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
370 HWACCMR0Globals.aCpuInfo[i].pMemObj = NULL;
371 }
372 }
373 return rc;
374}
375
376
377/**
378 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
379 * is to be called on the target cpus.
380 *
381 * @param idCpu The identifier for the CPU the function is called on.
382 * @param pvUser1 The 1st user argument.
383 * @param pvUser2 The 2nd user argument.
384 */
385static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
386{
387 unsigned u32VendorEBX = (uintptr_t)pvUser1;
388 int *paRc = (int *)pvUser2;
389 uint64_t val;
390
391#ifdef LOG_ENABLED
392 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
393#endif
394 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
395
396 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
397 {
398 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
399
400 /*
401 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
402 * Once the lock bit is set, this MSR can no longer be modified.
403 */
404 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)))
405 {
406 /* MSR is not yet locked; we can change it ourselves here */
407 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
408 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
409 }
410 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
411 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
412 paRc[idCpu] = VINF_SUCCESS;
413 else
414 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
415 }
416 else
417 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
418 {
419 /* Check if SVM is disabled */
420 val = ASMRdMsr(MSR_K8_VM_CR);
421 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
422 {
423 /* Turn on SVM in the EFER MSR. */
424 val = ASMRdMsr(MSR_K6_EFER);
425 if (!(val & MSR_K6_EFER_SVME))
426 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
427
428 /* Paranoia. */
429 val = ASMRdMsr(MSR_K6_EFER);
430 if (val & MSR_K6_EFER_SVME)
431 paRc[idCpu] = VINF_SUCCESS;
432 else
433 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
434 }
435 else
436 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
437 }
438 else
439 AssertFailed(); /* can't happen */
440 return;
441}
442
443
444/**
445 * Sets up HWACCM on all cpus.
446 *
447 * @returns VBox status code.
448 * @param pVM The VM to operate on.
449 * @param enmNewHwAccmState New hwaccm state
450 *
451 */
452HWACCMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM, HWACCMSTATE enmNewHwAccmState)
453{
454 Assert(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
455 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED))
456 {
457 int aRc[RTCPUSET_MAX_CPUS];
458 RTCPUID idCpu = 0;
459
460 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */
461 if (enmNewHwAccmState == HWACCMSTATE_DISABLED)
462 return VINF_SUCCESS;
463
464 memset(aRc, 0, sizeof(aRc));
465
466 /* Allocate one page per cpu for the global vt-x and amd-v pages */
467 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
468 {
469 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
470
471 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
472 if (RTMpIsCpuOnline(i))
473 {
474 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
475 AssertRC(rc);
476 if (RT_FAILURE(rc))
477 return rc;
478
479 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
480 Assert(pvR0);
481 memset(pvR0, 0, PAGE_SIZE);
482
483#ifdef LOG_ENABLED
484 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
485#endif
486 }
487 }
488 /* First time, so initialize each cpu/core */
489 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc);
490
491 /* Check the return code of all invocations. */
492 if (VBOX_SUCCESS(rc))
493 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
494
495 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
496 return rc;
497 }
498
499 if (HWACCMR0Globals.enmHwAccmState == enmNewHwAccmState)
500 return VINF_SUCCESS;
501
502 /* Request to change the mode is not allowed */
503 return VERR_ACCESS_DENIED;
504}
505
506/**
507 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
508 * is to be called on the target cpus.
509 *
510 * @param idCpu The identifier for the CPU the function is called on.
511 * @param pvUser1 The 1st user argument.
512 * @param pvUser2 The 2nd user argument.
513 */
514static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
515{
516 PVM pVM = (PVM)pvUser1;
517 int *paRc = (int *)pvUser2;
518 void *pvPageCpu;
519 RTHCPHYS pPageCpuPhys;
520
521 Assert(pVM);
522 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
523 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
524
525 /* Should never happen */
526 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
527 {
528 AssertFailed();
529 return;
530 }
531
532 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
533 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
534
535 if (pVM->hwaccm.s.vmx.fSupported)
536 {
537 paRc[idCpu] = VMXR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
538 AssertRC(paRc[idCpu]);
539 if (VBOX_SUCCESS(paRc[idCpu]))
540 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = true;
541 }
542 else
543 if (pVM->hwaccm.s.svm.fSupported)
544 {
545 paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys);
546 AssertRC(paRc[idCpu]);
547 if (VBOX_SUCCESS(paRc[idCpu]))
548 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = true;
549 }
550 return;
551}
552
553/**
554 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
555 * is to be called on the target cpus.
556 *
557 * @param idCpu The identifier for the CPU the function is called on.
558 * @param pvUser1 The 1st user argument.
559 * @param pvUser2 The 2nd user argument.
560 */
561static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
562{
563 void *pvPageCpu;
564 RTHCPHYS pPageCpuPhys;
565 int *paRc = (int *)pvUser1;
566
567 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
568 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
569
570 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
571 return;
572
573 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
574 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
575
576 if (HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured)
577 {
578 paRc[idCpu] = VMXR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
579 AssertRC(paRc[idCpu]);
580 HWACCMR0Globals.aCpuInfo[idCpu].fVMXConfigured = false;
581 }
582 else
583 if (HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured)
584 {
585 paRc[idCpu] = SVMR0DisableCpu(idCpu, pvPageCpu, pPageCpuPhys);
586 AssertRC(paRc[idCpu]);
587 HWACCMR0Globals.aCpuInfo[idCpu].fSVMConfigured = false;
588 }
589 return;
590}
591
592
593/**
594 * Does Ring-0 per VM HWACCM initialization.
595 *
596 * This is mainly to check that the Host CPU mode is compatible
597 * with VMX.
598 *
599 * @returns VBox status code.
600 * @param pVM The VM to operate on.
601 */
602HWACCMR0DECL(int) HWACCMR0InitVM(PVM pVM)
603{
604 int rc = VINF_SUCCESS;
605
606 AssertReturn(pVM, VERR_INVALID_PARAMETER);
607
608#ifdef LOG_ENABLED
609 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
610#endif
611
612 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
613 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
614
615 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
616 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
617 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
618 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
619 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
620 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
621 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
622 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
623 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
624 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
625 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
626 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
627 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
628 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
629 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;
630 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
631 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
632 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
633
634 /* Init a VT-x or AMD-V VM. */
635 if (pVM->hwaccm.s.vmx.fSupported)
636 rc = VMXR0InitVM(pVM);
637 else
638 if (pVM->hwaccm.s.svm.fSupported)
639 rc = SVMR0InitVM(pVM);
640
641 return rc;
642}
643
644
645/**
646 * Does Ring-0 per VM HWACCM termination.
647 *
648 * @returns VBox status code.
649 * @param pVM The VM to operate on.
650 */
651HWACCMR0DECL(int) HWACCMR0TermVM(PVM pVM)
652{
653 int rc = VINF_SUCCESS;
654
655 AssertReturn(pVM, VERR_INVALID_PARAMETER);
656
657#ifdef LOG_ENABLED
658 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
659#endif
660
661 /* Terminate a VT-x or AMD-V VM. */
662 if (pVM->hwaccm.s.vmx.fSupported)
663 rc = VMXR0TermVM(pVM);
664 else
665 if (pVM->hwaccm.s.svm.fSupported)
666 rc = SVMR0TermVM(pVM);
667
668 return rc;
669}
670
671
672/**
673 * Sets up a VT-x or AMD-V session
674 *
675 * @returns VBox status code.
676 * @param pVM The VM to operate on.
677 */
678HWACCMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
679{
680 int rc = VINF_SUCCESS;
681
682 AssertReturn(pVM, VERR_INVALID_PARAMETER);
683
684#ifdef LOG_ENABLED
685 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
686#endif
687
688 /* Setup VT-x or AMD-V. */
689 if (pVM->hwaccm.s.vmx.fSupported)
690 rc = VMXR0SetupVM(pVM);
691 else
692 if (pVM->hwaccm.s.svm.fSupported)
693 rc = SVMR0SetupVM(pVM);
694
695 return rc;
696}
697
698
699/**
700 * Enters the VT-x or AMD-V session
701 *
702 * @returns VBox status code.
703 * @param pVM The VM to operate on.
704 */
705HWACCMR0DECL(int) HWACCMR0Enter(PVM pVM)
706{
707 CPUMCTX *pCtx;
708 int rc;
709
710 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
711 if (VBOX_FAILURE(rc))
712 return rc;
713
714 /* Always load the guest's FPU/XMM state on-demand. */
715 CPUMDeactivateGuestFPUState(pVM);
716
717 /* Always reload the host context and the guest's CR0 register. (!!!!) */
718 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
719
720 if (pVM->hwaccm.s.vmx.fSupported)
721 {
722 rc = VMXR0Enter(pVM);
723 AssertRC(rc);
724 rc |= VMXR0SaveHostState(pVM);
725 AssertRC(rc);
726 rc |= VMXR0LoadGuestState(pVM, pCtx);
727 AssertRC(rc);
728 if (rc != VINF_SUCCESS)
729 return rc;
730 }
731 else
732 {
733 Assert(pVM->hwaccm.s.svm.fSupported);
734 rc = SVMR0Enter(pVM);
735 AssertRC(rc);
736 rc |= SVMR0LoadGuestState(pVM, pCtx);
737 AssertRC(rc);
738 if (rc != VINF_SUCCESS)
739 return rc;
740
741 }
742 return VINF_SUCCESS;
743}
744
745
746/**
747 * Leaves the VT-x or AMD-V session
748 *
749 * @returns VBox status code.
750 * @param pVM The VM to operate on.
751 */
752HWACCMR0DECL(int) HWACCMR0Leave(PVM pVM)
753{
754 CPUMCTX *pCtx;
755 int rc;
756
757 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
758 if (VBOX_FAILURE(rc))
759 return rc;
760
761 /** @note It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. */
762 /* We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
763 * or trash somebody else's FPU state.
764 */
765
766 /* Restore host FPU and XMM state if necessary. */
767 if (CPUMIsGuestFPUStateActive(pVM))
768 {
769 Log2(("CPUMRestoreHostFPUState\n"));
770 /** @note CPUMRestoreHostFPUState keeps the current CR0 intact. */
771 CPUMRestoreHostFPUState(pVM);
772
773 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
774 }
775
776 if (pVM->hwaccm.s.vmx.fSupported)
777 {
778 return VMXR0Leave(pVM);
779 }
780 else
781 {
782 Assert(pVM->hwaccm.s.svm.fSupported);
783 return SVMR0Leave(pVM);
784 }
785}
786
787/**
788 * Runs guest code in a hardware accelerated VM.
789 *
790 * @returns VBox status code.
791 * @param pVM The VM to operate on.
792 */
793HWACCMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM)
794{
795 CPUMCTX *pCtx;
796 int rc;
797
798 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
799 if (VBOX_FAILURE(rc))
800 return rc;
801
802 if (pVM->hwaccm.s.vmx.fSupported)
803 {
804 return VMXR0RunGuestCode(pVM, pCtx);
805 }
806 else
807 {
808 Assert(pVM->hwaccm.s.svm.fSupported);
809 return SVMR0RunGuestCode(pVM, pCtx);
810 }
811}
812
813
814#ifdef VBOX_STRICT
815#include <iprt/string.h>
816/**
817 * Dumps a descriptor.
818 *
819 * @param Desc Descriptor to dump.
820 * @param Sel Selector number.
821 * @param pszMsg Message to prepend the log entry with.
822 */
823HWACCMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg)
824{
825 /*
826 * Make variable description string.
827 */
828 static struct
829 {
830 unsigned cch;
831 const char *psz;
832 } const aTypes[32] =
833 {
834 #define STRENTRY(str) { sizeof(str) - 1, str }
835
836 /* system */
837#if HC_ARCH_BITS == 64
838 STRENTRY("Reserved0 "), /* 0x00 */
839 STRENTRY("Reserved1 "), /* 0x01 */
840 STRENTRY("LDT "), /* 0x02 */
841 STRENTRY("Reserved3 "), /* 0x03 */
842 STRENTRY("Reserved4 "), /* 0x04 */
843 STRENTRY("Reserved5 "), /* 0x05 */
844 STRENTRY("Reserved6 "), /* 0x06 */
845 STRENTRY("Reserved7 "), /* 0x07 */
846 STRENTRY("Reserved8 "), /* 0x08 */
847 STRENTRY("TSS64Avail "), /* 0x09 */
848 STRENTRY("ReservedA "), /* 0x0a */
849 STRENTRY("TSS64Busy "), /* 0x0b */
850 STRENTRY("Call64 "), /* 0x0c */
851 STRENTRY("ReservedD "), /* 0x0d */
852 STRENTRY("Int64 "), /* 0x0e */
853 STRENTRY("Trap64 "), /* 0x0f */
854#else
855 STRENTRY("Reserved0 "), /* 0x00 */
856 STRENTRY("TSS16Avail "), /* 0x01 */
857 STRENTRY("LDT "), /* 0x02 */
858 STRENTRY("TSS16Busy "), /* 0x03 */
859 STRENTRY("Call16 "), /* 0x04 */
860 STRENTRY("Task "), /* 0x05 */
861 STRENTRY("Int16 "), /* 0x06 */
862 STRENTRY("Trap16 "), /* 0x07 */
863 STRENTRY("Reserved8 "), /* 0x08 */
864 STRENTRY("TSS32Avail "), /* 0x09 */
865 STRENTRY("ReservedA "), /* 0x0a */
866 STRENTRY("TSS32Busy "), /* 0x0b */
867 STRENTRY("Call32 "), /* 0x0c */
868 STRENTRY("ReservedD "), /* 0x0d */
869 STRENTRY("Int32 "), /* 0x0e */
870 STRENTRY("Trap32 "), /* 0x0f */
871#endif
872 /* non system */
873 STRENTRY("DataRO "), /* 0x10 */
874 STRENTRY("DataRO Accessed "), /* 0x11 */
875 STRENTRY("DataRW "), /* 0x12 */
876 STRENTRY("DataRW Accessed "), /* 0x13 */
877 STRENTRY("DataDownRO "), /* 0x14 */
878 STRENTRY("DataDownRO Accessed "), /* 0x15 */
879 STRENTRY("DataDownRW "), /* 0x16 */
880 STRENTRY("DataDownRW Accessed "), /* 0x17 */
881 STRENTRY("CodeEO "), /* 0x18 */
882 STRENTRY("CodeEO Accessed "), /* 0x19 */
883 STRENTRY("CodeER "), /* 0x1a */
884 STRENTRY("CodeER Accessed "), /* 0x1b */
885 STRENTRY("CodeConfEO "), /* 0x1c */
886 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
887 STRENTRY("CodeConfER "), /* 0x1e */
888 STRENTRY("CodeConfER Accessed ") /* 0x1f */
889 #undef SYSENTRY
890 };
891 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
892 char szMsg[128];
893 char *psz = &szMsg[0];
894 unsigned i = Desc->Gen.u1DescType << 4 | Desc->Gen.u4Type;
895 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
896 psz += aTypes[i].cch;
897
898 if (Desc->Gen.u1Present)
899 ADD_STR(psz, "Present ");
900 else
901 ADD_STR(psz, "Not-Present ");
902#if HC_ARCH_BITS == 64
903 if (Desc->Gen.u1Long)
904 ADD_STR(psz, "64-bit ");
905 else
906 ADD_STR(psz, "Comp ");
907#else
908 if (Desc->Gen.u1Granularity)
909 ADD_STR(psz, "Page ");
910 if (Desc->Gen.u1DefBig)
911 ADD_STR(psz, "32-bit ");
912 else
913 ADD_STR(psz, "16-bit ");
914#endif
915 #undef ADD_STR
916 *psz = '\0';
917
918 /*
919 * Limit and Base and format the output.
920 */
921 uint32_t u32Limit = Desc->Gen.u4LimitHigh << 16 | Desc->Gen.u16LimitLow;
922 if (Desc->Gen.u1Granularity)
923 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
924
925#if HC_ARCH_BITS == 64
926 uint64_t u32Base = ((uintptr_t)Desc->Gen.u32BaseHigh3 << 32ULL) | Desc->Gen.u8BaseHigh2 << 24ULL | Desc->Gen.u8BaseHigh1 << 16ULL | Desc->Gen.u16BaseLow;
927
928 Log(("%s %04x - %VX64 %VX64 - base=%VX64 limit=%08x dpl=%d %s\n", pszMsg,
929 Sel, Desc->au64[0], Desc->au64[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
930#else
931 uint32_t u32Base = Desc->Gen.u8BaseHigh2 << 24 | Desc->Gen.u8BaseHigh1 << 16 | Desc->Gen.u16BaseLow;
932
933 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
934 Sel, Desc->au32[0], Desc->au32[1], u32Base, u32Limit, Desc->Gen.u2Dpl, szMsg));
935#endif
936}
937
938/**
939 * Formats a full register dump.
940 *
941 * @param pCtx The context to format.
942 */
943HWACCMR0DECL(void) HWACCMDumpRegs(PCPUMCTX pCtx)
944{
945 /*
946 * Format the flags.
947 */
948 static struct
949 {
950 const char *pszSet; const char *pszClear; uint32_t fFlag;
951 } aFlags[] =
952 {
953 { "vip",NULL, X86_EFL_VIP },
954 { "vif",NULL, X86_EFL_VIF },
955 { "ac", NULL, X86_EFL_AC },
956 { "vm", NULL, X86_EFL_VM },
957 { "rf", NULL, X86_EFL_RF },
958 { "nt", NULL, X86_EFL_NT },
959 { "ov", "nv", X86_EFL_OF },
960 { "dn", "up", X86_EFL_DF },
961 { "ei", "di", X86_EFL_IF },
962 { "tf", NULL, X86_EFL_TF },
963 { "nt", "pl", X86_EFL_SF },
964 { "nz", "zr", X86_EFL_ZF },
965 { "ac", "na", X86_EFL_AF },
966 { "po", "pe", X86_EFL_PF },
967 { "cy", "nc", X86_EFL_CF },
968 };
969 char szEFlags[80];
970 char *psz = szEFlags;
971 uint32_t efl = pCtx->eflags.u32;
972 for (unsigned i = 0; i < ELEMENTS(aFlags); i++)
973 {
974 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
975 if (pszAdd)
976 {
977 strcpy(psz, pszAdd);
978 psz += strlen(pszAdd);
979 *psz++ = ' ';
980 }
981 }
982 psz[-1] = '\0';
983
984
985 /*
986 * Format the registers.
987 */
988 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
989 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
990 "cs={%04x base=%08x limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
991 "ds={%04x base=%08x limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
992 "es={%04x base=%08x limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
993 "fs={%04x base=%08x limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
994 ,
995 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
996 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
997 (RTSEL)pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr0, pCtx->dr1,
998 (RTSEL)pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr2, pCtx->dr3,
999 (RTSEL)pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr4, pCtx->dr5,
1000 (RTSEL)pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr6, pCtx->dr7));
1001
1002 Log(("gs={%04x base=%08x limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1003 "ss={%04x base=%08x limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1004 "gdtr=%08x:%04x idtr=%08x:%04x eflags=%08x\n"
1005 "ldtr={%04x base=%08x limit=%08x flags=%08x}\n"
1006 "tr ={%04x base=%08x limit=%08x flags=%08x}\n"
1007 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1008 "FCW=%04x FSW=%04x FTW=%04x\n",
1009 (RTSEL)pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1010 (RTSEL)pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1011 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1012 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u32Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1013 (RTSEL)pCtx->tr, pCtx->trHid.u32Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1014 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1015 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW));
1016
1017
1018}
1019#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette