VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 19169

Last change on this file since 19169 was 19141, checked in by vboxsync, 16 years ago

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 58.7 KB
Line 
1/* $Id: HWACCMR0.cpp 19141 2009-04-23 13:52:18Z vboxsync $ */
2/** @file
3 * HWACCM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_vmx.h>
32#include <VBox/hwacc_svm.h>
33#include <VBox/pgm.h>
34#include <VBox/pdm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/selm.h>
38#include <VBox/iom.h>
39#include <iprt/param.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/memobj.h>
44#include <iprt/cpuset.h>
45#include <iprt/power.h>
46#include "HWVMXR0.h"
47#include "HWSVMR0.h"
48
49/*******************************************************************************
50* Internal Functions *
51*******************************************************************************/
52static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);
55static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu);
56static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
57
58/*******************************************************************************
59* Global Variables *
60*******************************************************************************/
61
62static struct
63{
64 HWACCM_CPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
65
66 /** Ring 0 handlers for VT-x and AMD-V. */
67 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu));
68 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
69 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
70 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
71 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
72 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
73 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys));
74 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM));
75 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM));
76 DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM));
77
78 /** Maximum ASID allowed. */
79 uint32_t uMaxASID;
80
81 struct
82 {
83 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
84 bool fSupported;
85 /** Whether we're using SUPR0EnableVTx or not. */
86 bool fUsingSUPR0EnableVTx;
87
88 /** Host CR4 value (set by ring-0 VMX init) */
89 uint64_t hostCR4;
90
91 /** VMX MSR values */
92 struct
93 {
94 uint64_t feature_ctrl;
95 uint64_t vmx_basic_info;
96 VMX_CAPABILITY vmx_pin_ctls;
97 VMX_CAPABILITY vmx_proc_ctls;
98 VMX_CAPABILITY vmx_proc_ctls2;
99 VMX_CAPABILITY vmx_exit;
100 VMX_CAPABILITY vmx_entry;
101 uint64_t vmx_misc;
102 uint64_t vmx_cr0_fixed0;
103 uint64_t vmx_cr0_fixed1;
104 uint64_t vmx_cr4_fixed0;
105 uint64_t vmx_cr4_fixed1;
106 uint64_t vmx_vmcs_enum;
107 uint64_t vmx_eptcaps;
108 } msr;
109 /* Last instruction error */
110 uint32_t ulLastInstrError;
111 } vmx;
112 struct
113 {
114 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
115 bool fSupported;
116
117 /** SVM revision. */
118 uint32_t u32Rev;
119
120 /** SVM feature bits from cpuid 0x8000000a */
121 uint32_t u32Features;
122 } svm;
123 /** Saved error from detection */
124 int32_t lLastError;
125
126 struct
127 {
128 uint32_t u32AMDFeatureECX;
129 uint32_t u32AMDFeatureEDX;
130 } cpuid;
131
132 HWACCMSTATE enmHwAccmState;
133
134 volatile bool fSuspended;
135} HWACCMR0Globals;
136
137
138
139/**
140 * Does global Ring-0 HWACCM initialization.
141 *
142 * @returns VBox status code.
143 */
144VMMR0DECL(int) HWACCMR0Init(void)
145{
146 int rc;
147
148 memset(&HWACCMR0Globals, 0, sizeof(HWACCMR0Globals));
149 HWACCMR0Globals.enmHwAccmState = HWACCMSTATE_UNINITIALIZED;
150 for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)
151 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
152
153 /* Fill in all callbacks with placeholders. */
154 HWACCMR0Globals.pfnEnterSession = HWACCMR0DummyEnter;
155 HWACCMR0Globals.pfnLeaveSession = HWACCMR0DummyLeave;
156 HWACCMR0Globals.pfnSaveHostState = HWACCMR0DummySaveHostState;
157 HWACCMR0Globals.pfnLoadGuestState = HWACCMR0DummyLoadGuestState;
158 HWACCMR0Globals.pfnRunGuestCode = HWACCMR0DummyRunGuestCode;
159 HWACCMR0Globals.pfnEnableCpu = HWACCMR0DummyEnableCpu;
160 HWACCMR0Globals.pfnDisableCpu = HWACCMR0DummyDisableCpu;
161 HWACCMR0Globals.pfnInitVM = HWACCMR0DummyInitVM;
162 HWACCMR0Globals.pfnTermVM = HWACCMR0DummyTermVM;
163 HWACCMR0Globals.pfnSetupVM = HWACCMR0DummySetupVM;
164
165 /*
166 * Check for VT-x and AMD-V capabilities
167 */
168 if (ASMHasCpuId())
169 {
170 uint32_t u32FeaturesECX;
171 uint32_t u32Dummy;
172 uint32_t u32FeaturesEDX;
173 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
174
175 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
176 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
177 /* Query AMD features. */
178 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX, &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);
179
180 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
181 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
182 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX
183 )
184 {
185 /*
186 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)
187 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.
188 */
189 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
190 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
191 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
192 )
193 {
194 int aRc[RTCPUSET_MAX_CPUS];
195 RTCPUID idCpu = 0;
196
197 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
198
199 /*
200 * First try use native kernel API for controlling VT-x.
201 * (This is only supported by some Mac OS X kernels atm.)
202 */
203 HWACCMR0Globals.lLastError = rc = SUPR0EnableVTx(true /* fEnable */);
204 if (rc != VERR_NOT_SUPPORTED)
205 {
206 AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
207 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = true;
208 if (RT_SUCCESS(rc))
209 {
210 HWACCMR0Globals.vmx.fSupported = true;
211 rc = SUPR0EnableVTx(false /* fEnable */);
212 AssertRC(rc);
213 }
214 }
215 else
216 {
217 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = false;
218
219 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */
220 memset(aRc, 0, sizeof(aRc));
221 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
222
223 /* Check the return code of all invocations. */
224 if (RT_SUCCESS(HWACCMR0Globals.lLastError))
225 HWACCMR0Globals.lLastError = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
226 }
227 if (RT_SUCCESS(HWACCMR0Globals.lLastError))
228 {
229 /* Reread in case we've changed it. */
230 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
231
232 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
233 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
234 {
235 RTR0MEMOBJ pScatchMemObj;
236 void *pvScatchPage;
237 RTHCPHYS pScatchPagePhys;
238
239 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
240 HWACCMR0Globals.vmx.msr.vmx_pin_ctls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
241 HWACCMR0Globals.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
242 HWACCMR0Globals.vmx.msr.vmx_exit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
243 HWACCMR0Globals.vmx.msr.vmx_entry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
244 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
245 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
246 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
247 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
248 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
249 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
250 /* VPID 16 bits ASID. */
251 HWACCMR0Globals.uMaxASID = 0x10000; /* exclusive */
252
253 if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
254 {
255 HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
256 if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT|VMX_VMCS_CTRL_PROC_EXEC2_VPID))
257 HWACCMR0Globals.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS);
258 }
259
260 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
261 {
262 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();
263
264 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
265 if (RT_FAILURE(rc))
266 return rc;
267
268 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);
269 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);
270 memset(pvScatchPage, 0, PAGE_SIZE);
271
272 /* Set revision dword at the beginning of the structure. */
273 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);
274
275 /* Make sure we don't get rescheduled to another cpu during this probe. */
276 RTCCUINTREG fFlags = ASMIntDisableFlags();
277
278 /*
279 * Check CR4.VMXE
280 */
281 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))
282 {
283 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we
284 * try to execute the VMX instructions...
285 */
286 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);
287 }
288
289 /* Enter VMX Root Mode */
290 rc = VMXEnable(pScatchPagePhys);
291 if (RT_FAILURE(rc))
292 {
293 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because
294 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)
295 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)
296 *
297 * They should fix their code, but until they do we simply refuse to run.
298 */
299 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
300 }
301 else
302 {
303 HWACCMR0Globals.vmx.fSupported = true;
304 VMXDisable();
305 }
306
307 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */
308 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);
309 ASMSetFlags(fFlags);
310
311 RTR0MemObjFree(pScatchMemObj, false);
312 if (RT_FAILURE(HWACCMR0Globals.lLastError))
313 return HWACCMR0Globals.lLastError;
314 }
315 }
316 else
317 {
318 AssertFailed(); /* can't hit this case anymore */
319 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
320 }
321 }
322#ifdef LOG_ENABLED
323 else
324 SUPR0Printf("HWACCMR0InitCPU failed with rc=%d\n", HWACCMR0Globals.lLastError);
325#endif
326 }
327 else
328 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;
329 }
330 else
331 if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
332 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
333 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX
334 )
335 {
336 /*
337 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
338 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
339 */
340 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
341 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
342 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
343 )
344 {
345 int aRc[RTCPUSET_MAX_CPUS];
346 RTCPUID idCpu = 0;
347
348 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */
349 memset(aRc, 0, sizeof(aRc));
350 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc);
351 AssertRC(rc);
352
353 /* Check the return code of all invocations. */
354 if (RT_SUCCESS(rc))
355 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
356
357 AssertMsgRC(rc, ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc));
358
359 if (RT_SUCCESS(rc))
360 {
361 /* Query AMD features. */
362 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
363
364 HWACCMR0Globals.svm.fSupported = true;
365 }
366 else
367 HWACCMR0Globals.lLastError = rc;
368 }
369 else
370 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;
371 }
372 else
373 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;
374 }
375 else
376 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;
377
378 if (HWACCMR0Globals.vmx.fSupported)
379 {
380 HWACCMR0Globals.pfnEnterSession = VMXR0Enter;
381 HWACCMR0Globals.pfnLeaveSession = VMXR0Leave;
382 HWACCMR0Globals.pfnSaveHostState = VMXR0SaveHostState;
383 HWACCMR0Globals.pfnLoadGuestState = VMXR0LoadGuestState;
384 HWACCMR0Globals.pfnRunGuestCode = VMXR0RunGuestCode;
385 HWACCMR0Globals.pfnEnableCpu = VMXR0EnableCpu;
386 HWACCMR0Globals.pfnDisableCpu = VMXR0DisableCpu;
387 HWACCMR0Globals.pfnInitVM = VMXR0InitVM;
388 HWACCMR0Globals.pfnTermVM = VMXR0TermVM;
389 HWACCMR0Globals.pfnSetupVM = VMXR0SetupVM;
390 }
391 else
392 if (HWACCMR0Globals.svm.fSupported)
393 {
394 HWACCMR0Globals.pfnEnterSession = SVMR0Enter;
395 HWACCMR0Globals.pfnLeaveSession = SVMR0Leave;
396 HWACCMR0Globals.pfnSaveHostState = SVMR0SaveHostState;
397 HWACCMR0Globals.pfnLoadGuestState = SVMR0LoadGuestState;
398 HWACCMR0Globals.pfnRunGuestCode = SVMR0RunGuestCode;
399 HWACCMR0Globals.pfnEnableCpu = SVMR0EnableCpu;
400 HWACCMR0Globals.pfnDisableCpu = SVMR0DisableCpu;
401 HWACCMR0Globals.pfnInitVM = SVMR0InitVM;
402 HWACCMR0Globals.pfnTermVM = SVMR0TermVM;
403 HWACCMR0Globals.pfnSetupVM = SVMR0SetupVM;
404 }
405
406 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
407 {
408 rc = RTPowerNotificationRegister(hwaccmR0PowerCallback, 0);
409 AssertRC(rc);
410 }
411
412 return VINF_SUCCESS;
413}
414
415
416/**
417 * Checks the error code array filled in for each cpu in the system.
418 *
419 * @returns VBox status code.
420 * @param paRc Error code array
421 * @param cErrorCodes Array size
422 * @param pidCpu Value of the first cpu that set an error (out)
423 */
424static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu)
425{
426 int rc = VINF_SUCCESS;
427
428 Assert(cErrorCodes == RTCPUSET_MAX_CPUS);
429
430 for (unsigned i=0;i<cErrorCodes;i++)
431 {
432 if (RTMpIsCpuOnline(i))
433 {
434 if (RT_FAILURE(paRc[i]))
435 {
436 rc = paRc[i];
437 *pidCpu = i;
438 break;
439 }
440 }
441 }
442 return rc;
443}
444
445/**
446 * Does global Ring-0 HWACCM termination.
447 *
448 * @returns VBox status code.
449 */
450VMMR0DECL(int) HWACCMR0Term(void)
451{
452 int rc;
453 if ( HWACCMR0Globals.vmx.fSupported
454 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
455 {
456 rc = SUPR0EnableVTx(false /* fEnable */);
457 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
458 {
459 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = false;
460 Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
461 }
462 }
463 else
464 {
465 int aRc[RTCPUSET_MAX_CPUS];
466
467 Assert(!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
468 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
469 {
470 rc = RTPowerNotificationDeregister(hwaccmR0PowerCallback, 0);
471 Assert(RT_SUCCESS(rc));
472 }
473
474 memset(aRc, 0, sizeof(aRc));
475 rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL);
476 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
477
478 /* Free the per-cpu pages used for VT-x and AMD-V */
479 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
480 {
481 AssertMsgRC(aRc[i], ("hwaccmR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i]));
482 if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ)
483 {
484 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false);
485 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ;
486 }
487 }
488 }
489 return rc;
490}
491
492
493/**
494 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
495 * is to be called on the target cpus.
496 *
497 * @param idCpu The identifier for the CPU the function is called on.
498 * @param pvUser1 The 1st user argument.
499 * @param pvUser2 The 2nd user argument.
500 */
501static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
502{
503 unsigned u32VendorEBX = (uintptr_t)pvUser1;
504 int *paRc = (int *)pvUser2;
505 uint64_t val;
506
507#if defined(LOG_ENABLED) && !defined(DEBUG_bird)
508 SUPR0Printf("HWACCMR0InitCPU cpu %d\n", idCpu);
509#endif
510 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
511
512 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX)
513 {
514 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
515
516 /*
517 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
518 * Once the lock bit is set, this MSR can no longer be modified.
519 */
520 if ( !(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
521 || ((val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) == MSR_IA32_FEATURE_CONTROL_VMXON) /* Some BIOSes forget to set the locked bit. */
522 )
523 {
524 /* MSR is not yet locked; we can change it ourselves here */
525 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
526 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
527 }
528 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
529 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))
530 paRc[idCpu] = VINF_SUCCESS;
531 else
532 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED;
533 }
534 else
535 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)
536 {
537 /* Check if SVM is disabled */
538 val = ASMRdMsr(MSR_K8_VM_CR);
539 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))
540 {
541 /* Turn on SVM in the EFER MSR. */
542 val = ASMRdMsr(MSR_K6_EFER);
543 if (!(val & MSR_K6_EFER_SVME))
544 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
545
546 /* Paranoia. */
547 val = ASMRdMsr(MSR_K6_EFER);
548 if (val & MSR_K6_EFER_SVME)
549 {
550 /* Restore previous value. */
551 ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
552 paRc[idCpu] = VINF_SUCCESS;
553 }
554 else
555 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR;
556 }
557 else
558 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;
559 }
560 else
561 AssertFailed(); /* can't happen */
562 return;
563}
564
565
566/**
567 * Sets up HWACCM on all cpus.
568 *
569 * @returns VBox status code.
570 * @param pVM The VM to operate on.
571 *
572 */
573VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM)
574{
575 AssertCompile(sizeof(HWACCMR0Globals.enmHwAccmState) == sizeof(uint32_t));
576
577 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
578 if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
579 return VERR_HWACCM_SUSPEND_PENDING;
580
581 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, HWACCMSTATE_ENABLED, HWACCMSTATE_UNINITIALIZED))
582 {
583 int rc;
584
585 if ( HWACCMR0Globals.vmx.fSupported
586 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)
587 {
588 rc = SUPR0EnableVTx(true /* fEnable */);
589 if (RT_SUCCESS(rc))
590 {
591 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++)
592 {
593 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true;
594 Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ);
595 }
596 }
597 else
598 AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc));
599 }
600 else
601 {
602 int aRc[RTCPUSET_MAX_CPUS];
603 RTCPUID idCpu = 0;
604
605 memset(aRc, 0, sizeof(aRc));
606
607 /* Allocate one page per cpu for the global vt-x and amd-v pages */
608 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++)
609 {
610 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj);
611
612 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */
613 if (RTMpIsCpuOnline(i))
614 {
615 rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
616 AssertRC(rc);
617 if (RT_FAILURE(rc))
618 return rc;
619
620 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj);
621 Assert(pvR0);
622 ASMMemZeroPage(pvR0);
623
624#if defined(LOG_ENABLED) && !defined(DEBUG_bird)
625 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));
626#endif
627 }
628 }
629 /* First time, so initialize each cpu/core */
630 rc = RTMpOnAll(hwaccmR0EnableCPU, (void *)pVM, aRc);
631
632 /* Check the return code of all invocations. */
633 if (RT_SUCCESS(rc))
634 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
635 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc));
636 }
637
638 return rc;
639 }
640 return VINF_SUCCESS;
641}
642
643/**
644 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
645 * is to be called on the target cpus.
646 *
647 * @param idCpu The identifier for the CPU the function is called on.
648 * @param pvUser1 The 1st user argument.
649 * @param pvUser2 The 2nd user argument.
650 */
651static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
652{
653 PVM pVM = (PVM)pvUser1; /* can be NULL! */
654 int *paRc = (int *)pvUser2;
655 void *pvPageCpu;
656 RTHCPHYS pPageCpuPhys;
657 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
658
659 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
660 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
661 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
662 Assert(!pCpu->fConfigured);
663 Assert(ASMAtomicReadBool(&pCpu->fInUse) == false);
664
665 pCpu->idCpu = idCpu;
666
667 /* Make sure we start with a clean TLB. */
668 pCpu->fFlushTLB = true;
669
670 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */
671 pCpu->cTLBFlushes = 0;
672
673 /* Should never happen */
674 if (!pCpu->pMemObj)
675 {
676 AssertFailed();
677 paRc[idCpu] = VERR_INTERNAL_ERROR;
678 return;
679 }
680
681 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
682 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
683
684 paRc[idCpu] = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
685 AssertRC(paRc[idCpu]);
686 if (RT_SUCCESS(paRc[idCpu]))
687 pCpu->fConfigured = true;
688
689 return;
690}
691
692/**
693 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
694 * is to be called on the target cpus.
695 *
696 * @param idCpu The identifier for the CPU the function is called on.
697 * @param pvUser1 The 1st user argument.
698 * @param pvUser2 The 2nd user argument.
699 */
700static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
701{
702 void *pvPageCpu;
703 RTHCPHYS pPageCpuPhys;
704 int *paRc = (int *)pvUser1;
705 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
706
707 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
708 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
709 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
710 Assert(ASMAtomicReadBool(&pCpu->fInUse) == false);
711 Assert(!pCpu->fConfigured || pCpu->pMemObj);
712
713 if (!pCpu->pMemObj)
714 return;
715
716 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
717 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
718
719 if (pCpu->fConfigured)
720 {
721 paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
722 AssertRC(paRc[idCpu]);
723 pCpu->fConfigured = false;
724 }
725 else
726 paRc[idCpu] = VINF_SUCCESS; /* nothing to do */
727
728 pCpu->uCurrentASID = 0;
729 return;
730}
731
732/**
733 * Called whenever a system power state change occurs.
734 *
735 * @param enmEvent Power event
736 * @param pvUser User argument
737 */
738static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
739{
740 NOREF(pvUser);
741 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);
742
743#ifdef LOG_ENABLED
744 if (enmEvent == RTPOWEREVENT_SUSPEND)
745 SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
746 else
747 SUPR0Printf("hwaccmR0PowerCallback RTPOWEREVENT_RESUME\n");
748#endif
749
750 if (enmEvent == RTPOWEREVENT_SUSPEND)
751 ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, true);
752
753 if (HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED)
754 {
755 int aRc[RTCPUSET_MAX_CPUS];
756 int rc;
757 RTCPUID idCpu;
758
759 memset(aRc, 0, sizeof(aRc));
760 if (enmEvent == RTPOWEREVENT_SUSPEND)
761 {
762 /* Turn off VT-x or AMD-V on all CPUs. */
763 rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL);
764 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
765 }
766 else
767 {
768 /* Reinit the CPUs from scratch as the suspend state has messed with the MSRs. */
769 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)((HWACCMR0Globals.vmx.fSupported) ? X86_CPUID_VENDOR_INTEL_EBX : X86_CPUID_VENDOR_AMD_EBX), aRc);
770 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
771
772 if (RT_SUCCESS(rc))
773 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu);
774#ifdef LOG_ENABLED
775 if (RT_FAILURE(rc))
776 SUPR0Printf("hwaccmR0PowerCallback HWACCMR0InitCPU failed with %d\n", rc);
777#endif
778
779 /* Turn VT-x or AMD-V back on on all CPUs. */
780 rc = RTMpOnAll(hwaccmR0EnableCPU, NULL, aRc);
781 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
782 }
783 }
784 if (enmEvent == RTPOWEREVENT_RESUME)
785 ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, false);
786}
787
788
789/**
790 * Does Ring-0 per VM HWACCM initialization.
791 *
792 * This is mainly to check that the Host CPU mode is compatible
793 * with VMX.
794 *
795 * @returns VBox status code.
796 * @param pVM The VM to operate on.
797 */
798VMMR0DECL(int) HWACCMR0InitVM(PVM pVM)
799{
800 int rc;
801
802 AssertReturn(pVM, VERR_INVALID_PARAMETER);
803
804#ifdef LOG_ENABLED
805 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
806#endif
807
808 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
809 if (ASMAtomicReadBool(&HWACCMR0Globals.fSuspended))
810 return VERR_HWACCM_SUSPEND_PENDING;
811
812 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported;
813 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported;
814
815 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl;
816 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4;
817 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info;
818 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls;
819 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls;
820 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2 = HWACCMR0Globals.vmx.msr.vmx_proc_ctls2;
821 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit;
822 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry;
823 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc;
824 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0;
825 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1;
826 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0;
827 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1;
828 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum;
829 pVM->hwaccm.s.vmx.msr.vmx_eptcaps = HWACCMR0Globals.vmx.msr.vmx_eptcaps;
830 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev;
831 pVM->hwaccm.s.svm.u32Features = HWACCMR0Globals.svm.u32Features;
832 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
833 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX;
834 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError;
835
836 pVM->hwaccm.s.uMaxASID = HWACCMR0Globals.uMaxASID;
837
838 for (unsigned i=0;i<pVM->cCPUs;i++)
839 {
840 PVMCPU pVCpu = &pVM->aCpus[i];
841
842 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
843
844 /* Invalidate the last cpu we were running on. */
845 pVCpu->hwaccm.s.idLastCpu = NIL_RTCPUID;
846
847 /* we'll aways increment this the first time (host uses ASID 0) */
848 pVCpu->hwaccm.s.uCurrentASID = 0;
849 }
850
851 RTCCUINTREG fFlags = ASMIntDisableFlags();
852 PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
853
854 /* @note Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
855 ASMAtomicWriteBool(&pCpu->fInUse, true);
856 ASMSetFlags(fFlags);
857
858 /* Init a VT-x or AMD-V VM. */
859 rc = HWACCMR0Globals.pfnInitVM(pVM);
860
861 ASMAtomicWriteBool(&pCpu->fInUse, false);
862 return rc;
863}
864
865
866/**
867 * Does Ring-0 per VM HWACCM termination.
868 *
869 * @returns VBox status code.
870 * @param pVM The VM to operate on.
871 */
872VMMR0DECL(int) HWACCMR0TermVM(PVM pVM)
873{
874 int rc;
875
876 AssertReturn(pVM, VERR_INVALID_PARAMETER);
877
878#ifdef LOG_ENABLED
879 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM);
880#endif
881
882 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
883 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
884
885 /* @note Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */
886 RTCCUINTREG fFlags = ASMIntDisableFlags();
887 PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
888
889 ASMAtomicWriteBool(&pCpu->fInUse, true);
890 ASMSetFlags(fFlags);
891
892 /* Terminate a VT-x or AMD-V VM. */
893 rc = HWACCMR0Globals.pfnTermVM(pVM);
894
895 ASMAtomicWriteBool(&pCpu->fInUse, false);
896 return rc;
897}
898
899
900/**
901 * Sets up a VT-x or AMD-V session
902 *
903 * @returns VBox status code.
904 * @param pVM The VM to operate on.
905 */
906VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
907{
908 int rc;
909 RTCPUID idCpu = RTMpCpuId();
910 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
911
912 AssertReturn(pVM, VERR_INVALID_PARAMETER);
913
914 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
915 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
916
917#ifdef LOG_ENABLED
918 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM);
919#endif
920
921 ASMAtomicWriteBool(&pCpu->fInUse, true);
922
923 for (unsigned i=0;i<pVM->cCPUs;i++)
924 {
925 /* On first entry we'll sync everything. */
926 pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
927 }
928
929 /* Setup VT-x or AMD-V. */
930 rc = HWACCMR0Globals.pfnSetupVM(pVM);
931
932 ASMAtomicWriteBool(&pCpu->fInUse, false);
933
934 return rc;
935}
936
937
938/**
939 * Enters the VT-x or AMD-V session
940 *
941 * @returns VBox status code.
942 * @param pVM The VM to operate on.
943 * @param pVCpu VMCPUD id.
944 */
945VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu)
946{
947 PCPUMCTX pCtx;
948 int rc;
949 RTCPUID idCpu = RTMpCpuId();
950 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
951
952 /* Make sure we can't enter a session after we've disabled hwaccm in preparation of a suspend. */
953 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
954 ASMAtomicWriteBool(&pCpu->fInUse, true);
955
956 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
957
958 /* Always load the guest's FPU/XMM state on-demand. */
959 CPUMDeactivateGuestFPUState(pVCpu);
960
961 /* Always load the guest's debug state on-demand. */
962 CPUMDeactivateGuestDebugState(pVCpu);
963
964 /* Always reload the host context and the guest's CR0 register. (!!!!) */
965 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
966
967 /* Setup the register and mask according to the current execution mode. */
968 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
969 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
970 else
971 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
972
973 rc = HWACCMR0Globals.pfnEnterSession(pVM, pVCpu, pCpu);
974 AssertRC(rc);
975 /* We must save the host context here (VT-x) as we might be rescheduled on a different cpu after a long jump back to ring 3. */
976 rc |= HWACCMR0Globals.pfnSaveHostState(pVM, pVCpu);
977 AssertRC(rc);
978 rc |= HWACCMR0Globals.pfnLoadGuestState(pVM, pVCpu, pCtx);
979 AssertRC(rc);
980
981 /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
982 if (RT_SUCCESS(rc))
983 {
984 AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));
985 pVCpu->hwaccm.s.idEnteredCpu = idCpu;
986
987#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
988 PGMDynMapMigrateAutoSet(pVCpu);
989#endif
990 }
991 return rc;
992}
993
994
995/**
996 * Leaves the VT-x or AMD-V session
997 *
998 * @returns VBox status code.
999 * @param pVM The VM to operate on.
1000 * @param pVCpu VMCPUD id.
1001 */
1002VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
1003{
1004 PCPUMCTX pCtx;
1005 int rc;
1006 RTCPUID idCpu = RTMpCpuId();
1007 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
1008
1009 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1010
1011 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1012
1013 /* Note: It's rather tricky with longjmps done by e.g. Log statements or the page fault handler.
1014 * We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active
1015 * or trash somebody else's FPU state.
1016 */
1017 /* Save the guest FPU and XMM state if necessary. */
1018 if (CPUMIsGuestFPUStateActive(pVCpu))
1019 {
1020 Log2(("CPUMR0SaveGuestFPU\n"));
1021 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
1022
1023 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1024 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
1025 }
1026
1027 rc = HWACCMR0Globals.pfnLeaveSession(pVM, pVCpu, pCtx);
1028
1029 /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
1030#ifdef RT_STRICT
1031 if (RT_UNLIKELY( pVCpu->hwaccm.s.idEnteredCpu != idCpu
1032 && RT_FAILURE(rc)))
1033 {
1034 AssertMsgFailed(("Owner is %d, I'm %d", (int)pVCpu->hwaccm.s.idEnteredCpu, (int)idCpu));
1035 rc = VERR_INTERNAL_ERROR;
1036 }
1037#endif
1038 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
1039
1040 ASMAtomicWriteBool(&pCpu->fInUse, false);
1041 return rc;
1042}
1043
1044/**
1045 * Runs guest code in a hardware accelerated VM.
1046 *
1047 * @returns VBox status code.
1048 * @param pVM The VM to operate on.
1049 * @param pVCpu VMCPUD id.
1050 */
1051VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
1052{
1053 CPUMCTX *pCtx;
1054 RTCPUID idCpu = RTMpCpuId(); NOREF(idCpu);
1055 int rc;
1056#ifdef VBOX_STRICT
1057 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
1058#endif
1059
1060 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
1061 Assert(HWACCMR0Globals.aCpuInfo[idCpu].fConfigured);
1062 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1063 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
1064
1065#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1066 PGMDynMapStartAutoSet(pVCpu);
1067#endif
1068
1069 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1070
1071 rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, pCtx);
1072
1073#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1074 PGMDynMapReleaseAutoSet(pVCpu);
1075#endif
1076 return rc;
1077}
1078
1079
1080#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1081/**
1082 * Save guest FPU/XMM state (64 bits guest mode & 32 bits host only)
1083 *
1084 * @returns VBox status code.
1085 * @param pVM VM handle.
1086 * @param pVCpu VMCPU handle.
1087 * @param pCtx CPU context
1088 */
1089VMMR0DECL(int) HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1090{
1091 if (pVM->hwaccm.s.vmx.fSupported)
1092 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
1093
1094 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
1095}
1096
1097/**
1098 * Save guest debug state (64 bits guest mode & 32 bits host only)
1099 *
1100 * @returns VBox status code.
1101 * @param pVM VM handle.
1102 * @param pVCpu VMCPU handle.
1103 * @param pCtx CPU context
1104 */
1105VMMR0DECL(int) HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1106{
1107 if (pVM->hwaccm.s.vmx.fSupported)
1108 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
1109
1110 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
1111}
1112
1113/**
1114 * Test the 32->64 bits switcher
1115 *
1116 * @returns VBox status code.
1117 * @param pVM VM handle.
1118 */
1119VMMR0DECL(int) HWACCMR0TestSwitcher3264(PVM pVM)
1120{
1121 PVMCPU pVCpu = &pVM->aCpus[0];
1122 CPUMCTX *pCtx;
1123 uint32_t aParam[5] = {0, 1, 2, 3, 4};
1124 int rc;
1125
1126 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1127
1128 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
1129 if (pVM->hwaccm.s.vmx.fSupported)
1130 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
1131 else
1132 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
1133 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
1134 return rc;
1135}
1136
1137#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
1138
1139/**
1140 * Returns suspend status of the host
1141 *
1142 * @returns Suspend pending or not
1143 */
1144VMMR0DECL(bool) HWACCMR0SuspendPending()
1145{
1146 return ASMAtomicReadBool(&HWACCMR0Globals.fSuspended);
1147}
1148
1149/**
1150 * Returns the cpu structure for the current cpu.
1151 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1152 *
1153 * @returns cpu structure pointer
1154 */
1155VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu()
1156{
1157 RTCPUID idCpu = RTMpCpuId();
1158
1159 return &HWACCMR0Globals.aCpuInfo[idCpu];
1160}
1161
1162/**
1163 * Returns the cpu structure for the current cpu.
1164 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1165 *
1166 * @returns cpu structure pointer
1167 * @param idCpu id of the VCPU
1168 */
1169VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
1170{
1171 return &HWACCMR0Globals.aCpuInfo[idCpu];
1172}
1173
1174/**
1175 * Disable VT-x if it's active *and* the current switcher turns off paging
1176 *
1177 * @returns VBox status code.
1178 * @param pVM VM handle.
1179 * @param pfVTxDisabled VT-x was disabled or not (out)
1180 */
1181VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)
1182{
1183 Assert(!(ASMGetFlags() & X86_EFL_IF));
1184
1185 *pfVTxDisabled = false;
1186
1187 if ( HWACCMR0Globals.enmHwAccmState != HWACCMSTATE_ENABLED
1188 || !HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */)
1189 return VINF_SUCCESS; /* nothing to do */
1190
1191 switch(VMMGetSwitcher(pVM))
1192 {
1193 case VMMSWITCHER_32_TO_32:
1194 case VMMSWITCHER_PAE_TO_PAE:
1195 return VINF_SUCCESS; /* safe switchers as they don't turn off paging */
1196
1197 case VMMSWITCHER_32_TO_PAE:
1198 case VMMSWITCHER_PAE_TO_32: /* is this one actually used?? */
1199 case VMMSWITCHER_AMD64_TO_32:
1200 case VMMSWITCHER_AMD64_TO_PAE:
1201 break; /* unsafe switchers */
1202
1203 default:
1204 AssertFailed();
1205 return VERR_INTERNAL_ERROR;
1206 }
1207
1208 PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
1209 void *pvPageCpu;
1210 RTHCPHYS pPageCpuPhys;
1211
1212 AssertReturn(pCpu && pCpu->pMemObj, VERR_INTERNAL_ERROR);
1213 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
1214 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
1215
1216 *pfVTxDisabled = true;
1217 return VMXR0DisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
1218}
1219
1220/**
1221 * Reeable VT-x if was active *and* the current switcher turned off paging
1222 *
1223 * @returns VBox status code.
1224 * @param pVM VM handle.
1225 * @param fVTxDisabled VT-x was disabled or not
1226 */
1227VMMR0DECL(int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
1228{
1229 Assert(!(ASMGetFlags() & X86_EFL_IF));
1230
1231 if (!fVTxDisabled)
1232 return VINF_SUCCESS; /* nothing to do */
1233
1234 Assert( HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED
1235 && HWACCMR0Globals.vmx.fSupported);
1236
1237 PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();
1238 void *pvPageCpu;
1239 RTHCPHYS pPageCpuPhys;
1240
1241 AssertReturn(pCpu && pCpu->pMemObj, VERR_INTERNAL_ERROR);
1242 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj);
1243 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
1244
1245 return VMXR0EnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
1246}
1247
1248#ifdef VBOX_STRICT
1249# include <iprt/string.h>
1250/**
1251 * Dumps a descriptor.
1252 *
1253 * @param pDesc Descriptor to dump.
1254 * @param Sel Selector number.
1255 * @param pszMsg Message to prepend the log entry with.
1256 */
1257VMMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
1258{
1259 /*
1260 * Make variable description string.
1261 */
1262 static struct
1263 {
1264 unsigned cch;
1265 const char *psz;
1266 } const aTypes[32] =
1267 {
1268# define STRENTRY(str) { sizeof(str) - 1, str }
1269
1270 /* system */
1271# if HC_ARCH_BITS == 64
1272 STRENTRY("Reserved0 "), /* 0x00 */
1273 STRENTRY("Reserved1 "), /* 0x01 */
1274 STRENTRY("LDT "), /* 0x02 */
1275 STRENTRY("Reserved3 "), /* 0x03 */
1276 STRENTRY("Reserved4 "), /* 0x04 */
1277 STRENTRY("Reserved5 "), /* 0x05 */
1278 STRENTRY("Reserved6 "), /* 0x06 */
1279 STRENTRY("Reserved7 "), /* 0x07 */
1280 STRENTRY("Reserved8 "), /* 0x08 */
1281 STRENTRY("TSS64Avail "), /* 0x09 */
1282 STRENTRY("ReservedA "), /* 0x0a */
1283 STRENTRY("TSS64Busy "), /* 0x0b */
1284 STRENTRY("Call64 "), /* 0x0c */
1285 STRENTRY("ReservedD "), /* 0x0d */
1286 STRENTRY("Int64 "), /* 0x0e */
1287 STRENTRY("Trap64 "), /* 0x0f */
1288# else
1289 STRENTRY("Reserved0 "), /* 0x00 */
1290 STRENTRY("TSS16Avail "), /* 0x01 */
1291 STRENTRY("LDT "), /* 0x02 */
1292 STRENTRY("TSS16Busy "), /* 0x03 */
1293 STRENTRY("Call16 "), /* 0x04 */
1294 STRENTRY("Task "), /* 0x05 */
1295 STRENTRY("Int16 "), /* 0x06 */
1296 STRENTRY("Trap16 "), /* 0x07 */
1297 STRENTRY("Reserved8 "), /* 0x08 */
1298 STRENTRY("TSS32Avail "), /* 0x09 */
1299 STRENTRY("ReservedA "), /* 0x0a */
1300 STRENTRY("TSS32Busy "), /* 0x0b */
1301 STRENTRY("Call32 "), /* 0x0c */
1302 STRENTRY("ReservedD "), /* 0x0d */
1303 STRENTRY("Int32 "), /* 0x0e */
1304 STRENTRY("Trap32 "), /* 0x0f */
1305# endif
1306 /* non system */
1307 STRENTRY("DataRO "), /* 0x10 */
1308 STRENTRY("DataRO Accessed "), /* 0x11 */
1309 STRENTRY("DataRW "), /* 0x12 */
1310 STRENTRY("DataRW Accessed "), /* 0x13 */
1311 STRENTRY("DataDownRO "), /* 0x14 */
1312 STRENTRY("DataDownRO Accessed "), /* 0x15 */
1313 STRENTRY("DataDownRW "), /* 0x16 */
1314 STRENTRY("DataDownRW Accessed "), /* 0x17 */
1315 STRENTRY("CodeEO "), /* 0x18 */
1316 STRENTRY("CodeEO Accessed "), /* 0x19 */
1317 STRENTRY("CodeER "), /* 0x1a */
1318 STRENTRY("CodeER Accessed "), /* 0x1b */
1319 STRENTRY("CodeConfEO "), /* 0x1c */
1320 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
1321 STRENTRY("CodeConfER "), /* 0x1e */
1322 STRENTRY("CodeConfER Accessed ") /* 0x1f */
1323# undef SYSENTRY
1324 };
1325# define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
1326 char szMsg[128];
1327 char *psz = &szMsg[0];
1328 unsigned i = pDesc->Gen.u1DescType << 4 | pDesc->Gen.u4Type;
1329 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
1330 psz += aTypes[i].cch;
1331
1332 if (pDesc->Gen.u1Present)
1333 ADD_STR(psz, "Present ");
1334 else
1335 ADD_STR(psz, "Not-Present ");
1336# if HC_ARCH_BITS == 64
1337 if (pDesc->Gen.u1Long)
1338 ADD_STR(psz, "64-bit ");
1339 else
1340 ADD_STR(psz, "Comp ");
1341# else
1342 if (pDesc->Gen.u1Granularity)
1343 ADD_STR(psz, "Page ");
1344 if (pDesc->Gen.u1DefBig)
1345 ADD_STR(psz, "32-bit ");
1346 else
1347 ADD_STR(psz, "16-bit ");
1348# endif
1349# undef ADD_STR
1350 *psz = '\0';
1351
1352 /*
1353 * Limit and Base and format the output.
1354 */
1355 uint32_t u32Limit = X86DESC_LIMIT(*pDesc);
1356 if (pDesc->Gen.u1Granularity)
1357 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
1358
1359# if HC_ARCH_BITS == 64
1360 uint64_t u32Base = X86DESC64_BASE(*pDesc);
1361
1362 Log(("%s %04x - %RX64 %RX64 - base=%RX64 limit=%08x dpl=%d %s\n", pszMsg,
1363 Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1364# else
1365 uint32_t u32Base = X86DESC_BASE(*pDesc);
1366
1367 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
1368 Sel, pDesc->au32[0], pDesc->au32[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1369# endif
1370}
1371
1372/**
1373 * Formats a full register dump.
1374 *
1375 * @param pVM The VM to operate on.
1376 * @param pVCpu The VMCPU to operate on.
1377 * @param pCtx The context to format.
1378 */
1379VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1380{
1381 /*
1382 * Format the flags.
1383 */
1384 static struct
1385 {
1386 const char *pszSet; const char *pszClear; uint32_t fFlag;
1387 } aFlags[] =
1388 {
1389 { "vip",NULL, X86_EFL_VIP },
1390 { "vif",NULL, X86_EFL_VIF },
1391 { "ac", NULL, X86_EFL_AC },
1392 { "vm", NULL, X86_EFL_VM },
1393 { "rf", NULL, X86_EFL_RF },
1394 { "nt", NULL, X86_EFL_NT },
1395 { "ov", "nv", X86_EFL_OF },
1396 { "dn", "up", X86_EFL_DF },
1397 { "ei", "di", X86_EFL_IF },
1398 { "tf", NULL, X86_EFL_TF },
1399 { "nt", "pl", X86_EFL_SF },
1400 { "nz", "zr", X86_EFL_ZF },
1401 { "ac", "na", X86_EFL_AF },
1402 { "po", "pe", X86_EFL_PF },
1403 { "cy", "nc", X86_EFL_CF },
1404 };
1405 char szEFlags[80];
1406 char *psz = szEFlags;
1407 uint32_t efl = pCtx->eflags.u32;
1408 for (unsigned i = 0; i < RT_ELEMENTS(aFlags); i++)
1409 {
1410 const char *pszAdd = aFlags[i].fFlag & efl ? aFlags[i].pszSet : aFlags[i].pszClear;
1411 if (pszAdd)
1412 {
1413 strcpy(psz, pszAdd);
1414 psz += strlen(pszAdd);
1415 *psz++ = ' ';
1416 }
1417 }
1418 psz[-1] = '\0';
1419
1420
1421 /*
1422 * Format the registers.
1423 */
1424 if (CPUMIsGuestIn64BitCode(pVCpu, CPUMCTX2CORE(pCtx)))
1425 {
1426 Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n"
1427 "rsi=%016RX64 rdi=%016RX64 r8 =%016RX64 r9 =%016RX64\n"
1428 "r10=%016RX64 r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1429 "r14=%016RX64 r15=%016RX64\n"
1430 "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 iopl=%d %*s\n"
1431 "cs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1432 "ds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1433 "es={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1434 "fs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1435 "gs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1436 "ss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1437 "cr0=%016RX64 cr2=%016RX64 cr3=%016RX64 cr4=%016RX64\n"
1438 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64 dr3=%016RX64\n"
1439 "dr4=%016RX64 dr5=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1440 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1441 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1442 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1443 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1444 ,
1445 pCtx->rax, pCtx->rbx, pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi,
1446 pCtx->r8, pCtx->r9, pCtx->r10, pCtx->r11, pCtx->r12, pCtx->r13,
1447 pCtx->r14, pCtx->r15,
1448 pCtx->rip, pCtx->rsp, pCtx->rbp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1449 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1450 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1451 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1452 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1453 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1454 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1455 pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4,
1456 pCtx->dr[0], pCtx->dr[1], pCtx->dr[2], pCtx->dr[3],
1457 pCtx->dr[4], pCtx->dr[5], pCtx->dr[6], pCtx->dr[7],
1458 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1459 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1460 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1461 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1462 }
1463 else
1464 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1465 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1466 "cs={%04x base=%016RX64 limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1467 "ds={%04x base=%016RX64 limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1468 "es={%04x base=%016RX64 limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1469 "fs={%04x base=%016RX64 limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1470 "gs={%04x base=%016RX64 limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1471 "ss={%04x base=%016RX64 limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1472 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1473 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1474 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1475 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1476 ,
1477 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1478 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1479 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr[0], pCtx->dr[1],
1480 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr[2], pCtx->dr[3],
1481 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr[4], pCtx->dr[5],
1482 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr[6], pCtx->dr[7],
1483 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1484 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1485 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1486 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1487 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1488 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1489
1490 Log(("FPU:\n"
1491 "FCW=%04x FSW=%04x FTW=%02x\n"
1492 "res1=%02x FOP=%04x FPUIP=%08x CS=%04x Rsvrd1=%04x\n"
1493 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
1494 ,
1495 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW,
1496 pCtx->fpu.huh1, pCtx->fpu.FOP, pCtx->fpu.FPUIP, pCtx->fpu.CS, pCtx->fpu.Rsvrd1,
1497 pCtx->fpu.FPUDP, pCtx->fpu.DS, pCtx->fpu.Rsrvd2,
1498 pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK));
1499
1500
1501 Log(("MSR:\n"
1502 "EFER =%016RX64\n"
1503 "PAT =%016RX64\n"
1504 "STAR =%016RX64\n"
1505 "CSTAR =%016RX64\n"
1506 "LSTAR =%016RX64\n"
1507 "SFMASK =%016RX64\n"
1508 "KERNELGSBASE =%016RX64\n",
1509 pCtx->msrEFER,
1510 pCtx->msrPAT,
1511 pCtx->msrSTAR,
1512 pCtx->msrCSTAR,
1513 pCtx->msrLSTAR,
1514 pCtx->msrSFMASK,
1515 pCtx->msrKERNELGSBASE));
1516
1517}
1518#endif /* VBOX_STRICT */
1519
1520/* Dummy callback handlers. */
1521VMMR0DECL(int) HWACCMR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)
1522{
1523 return VINF_SUCCESS;
1524}
1525
1526VMMR0DECL(int) HWACCMR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1527{
1528 return VINF_SUCCESS;
1529}
1530
1531VMMR0DECL(int) HWACCMR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
1532{
1533 return VINF_SUCCESS;
1534}
1535
1536VMMR0DECL(int) HWACCMR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
1537{
1538 return VINF_SUCCESS;
1539}
1540
1541VMMR0DECL(int) HWACCMR0DummyInitVM(PVM pVM)
1542{
1543 return VINF_SUCCESS;
1544}
1545
1546VMMR0DECL(int) HWACCMR0DummyTermVM(PVM pVM)
1547{
1548 return VINF_SUCCESS;
1549}
1550
1551VMMR0DECL(int) HWACCMR0DummySetupVM(PVM pVM)
1552{
1553 return VINF_SUCCESS;
1554}
1555
1556VMMR0DECL(int) HWACCMR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1557{
1558 return VINF_SUCCESS;
1559}
1560
1561VMMR0DECL(int) HWACCMR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)
1562{
1563 return VINF_SUCCESS;
1564}
1565
1566VMMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1567{
1568 return VINF_SUCCESS;
1569}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette