VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp@ 41801

Last change on this file since 41801 was 41776, checked in by vboxsync, 13 years ago

doxygen comments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 72.5 KB
Line 
1/* $Id: HWACCMR0.cpp 41776 2012-06-16 18:36:56Z vboxsync $ */
2/** @file
3 * Hardware Assisted Virtualization Manager (HM) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/vmm/hwaccm.h>
24#include <VBox/vmm/pgm.h>
25#include "HWACCMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hwacc_vmx.h>
28#include <VBox/vmm/hwacc_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/assert.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/cpuset.h>
35#include <iprt/mem.h>
36#include <iprt/memobj.h>
37#include <iprt/once.h>
38#include <iprt/param.h>
39#include <iprt/power.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42#include <iprt/x86.h>
43#include "HWVMXR0.h"
44#include "HWSVMR0.h"
45
46
47/*******************************************************************************
48* Internal Functions *
49*******************************************************************************/
50static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
51static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
52static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
55static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData);
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * This is used to manage the status code of a RTMpOnAll in HM.
63 */
64typedef struct HMR0FIRSTRC
65{
66 /** The status code. */
67 int32_t volatile rc;
68 /** The ID of the CPU reporting the first failure. */
69 RTCPUID volatile idCpu;
70} HMR0FIRSTRC;
71/** Pointer to a first return code structure. */
72typedef HMR0FIRSTRC *PHMR0FIRSTRC;
73
74
75/*******************************************************************************
76* Global Variables *
77*******************************************************************************/
78/**
79 * Global data.
80 */
81static struct
82{
83 /** Per CPU globals. */
84 HMGLOBLCPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
85
86 /** @name Ring-0 method table for AMD-V and VT-x specific operations.
87 * @{ */
88 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu));
89 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
90 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
91 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
92 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
94 DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
95 DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM));
96 DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM));
97 DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM));
98 /** @} */
99
100 /** Maximum ASID allowed. */
101 uint32_t uMaxASID;
102
103 /** VT-x data. */
104 struct
105 {
106 /** Set to by us to indicate VMX is supported by the CPU. */
107 bool fSupported;
108 /** Whether we're using SUPR0EnableVTx or not. */
109 bool fUsingSUPR0EnableVTx;
110 /** Whether we're using the preemption timer or not. */
111 bool fUsePreemptTimer;
112 /** The shift mask employed by the VMX-Preemption timer. */
113 uint8_t cPreemptTimerShift;
114
115 /** Host CR4 value (set by ring-0 VMX init) */
116 uint64_t hostCR4;
117
118 /** Host EFER value (set by ring-0 VMX init) */
119 uint64_t hostEFER;
120
121 /** VMX MSR values */
122 struct
123 {
124 uint64_t feature_ctrl;
125 uint64_t vmx_basic_info;
126 VMX_CAPABILITY vmx_pin_ctls;
127 VMX_CAPABILITY vmx_proc_ctls;
128 VMX_CAPABILITY vmx_proc_ctls2;
129 VMX_CAPABILITY vmx_exit;
130 VMX_CAPABILITY vmx_entry;
131 uint64_t vmx_misc;
132 uint64_t vmx_cr0_fixed0;
133 uint64_t vmx_cr0_fixed1;
134 uint64_t vmx_cr4_fixed0;
135 uint64_t vmx_cr4_fixed1;
136 uint64_t vmx_vmcs_enum;
137 uint64_t vmx_eptcaps;
138 } msr;
139 /* Last instruction error */
140 uint32_t ulLastInstrError;
141 } vmx;
142
143 /** AMD-V information. */
144 struct
145 {
146 /* HWCR msr (for diagnostics) */
147 uint64_t msrHWCR;
148
149 /** SVM revision. */
150 uint32_t u32Rev;
151
152 /** SVM feature bits from cpuid 0x8000000a */
153 uint32_t u32Features;
154
155 /** Set by us to indicate SVM is supported by the CPU. */
156 bool fSupported;
157 } svm;
158 /** Saved error from detection */
159 int32_t lLastError;
160
161 struct
162 {
163 uint32_t u32AMDFeatureECX;
164 uint32_t u32AMDFeatureEDX;
165 } cpuid;
166
167 /** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's
168 * enabled and disabled each time it's used to execute guest code. */
169 bool fGlobalInit;
170 /** Indicates whether the host is suspending or not. We'll refuse a few
171 * actions when the host is being suspended to speed up the suspending and
172 * avoid trouble. */
173 volatile bool fSuspended;
174
175 /** Whether we've already initialized all CPUs.
176 * @remarks We could check the EnableAllCpusOnce state, but this is
177 * simpler and hopefully easier to understand. */
178 bool fEnabled;
179 /** Serialize initialization in HWACCMR0EnableAllCpus. */
180 RTONCE EnableAllCpusOnce;
181} g_HvmR0;
182
183
184
185/**
186 * Initializes a first return code structure.
187 *
188 * @param pFirstRc The structure to init.
189 */
190static void hmR0FirstRcInit(PHMR0FIRSTRC pFirstRc)
191{
192 pFirstRc->rc = VINF_SUCCESS;
193 pFirstRc->idCpu = NIL_RTCPUID;
194}
195
196
197/**
198 * Try se the status code (success ignored).
199 *
200 * @param pFirstRc The first return code structure.
201 * @param rc The status code.
202 */
203static void hmR0FirstRcSetStatus(PHMR0FIRSTRC pFirstRc, int rc)
204{
205 if ( RT_FAILURE(rc)
206 && ASMAtomicCmpXchgS32(&pFirstRc->rc, rc, VINF_SUCCESS))
207 pFirstRc->idCpu = RTMpCpuId();
208}
209
210
211/**
212 * Get the status code of a first return code structure.
213 *
214 * @returns The status code; VINF_SUCCESS or error status, no informational or
215 * warning errors.
216 * @param pFirstRc The first return code structure.
217 */
218static int hmR0FirstRcGetStatus(PHMR0FIRSTRC pFirstRc)
219{
220 return pFirstRc->rc;
221}
222
223
224#ifdef VBOX_STRICT
225/**
226 * Get the CPU ID on which the failure status code was reported.
227 *
228 * @returns The CPU ID, NIL_RTCPUID if no failure was reported.
229 * @param pFirstRc The first return code structure.
230 */
231static RTCPUID hmR0FirstRcGetCpuId(PHMR0FIRSTRC pFirstRc)
232{
233 return pFirstRc->idCpu;
234}
235#endif /* VBOX_STRICT */
236
237
238/** @name Dummy callback handlers.
239 * @{ */
240
241static DECLCALLBACK(int) hmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
242{
243 NOREF(pVM); NOREF(pVCpu); NOREF(pCpu);
244 return VINF_SUCCESS;
245}
246
247static DECLCALLBACK(int) hmR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
248{
249 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
250 return VINF_SUCCESS;
251}
252
253static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
254{
255 NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage);
256 return VINF_SUCCESS;
257}
258
259static DECLCALLBACK(int) hmR0DummyDisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
260{
261 NOREF(pCpu); NOREF(pvCpuPage); NOREF(HCPhysCpuPage);
262 return VINF_SUCCESS;
263}
264
265static DECLCALLBACK(int) hmR0DummyInitVM(PVM pVM)
266{
267 NOREF(pVM);
268 return VINF_SUCCESS;
269}
270
271static DECLCALLBACK(int) hmR0DummyTermVM(PVM pVM)
272{
273 NOREF(pVM);
274 return VINF_SUCCESS;
275}
276
277static DECLCALLBACK(int) hmR0DummySetupVM(PVM pVM)
278{
279 NOREF(pVM);
280 return VINF_SUCCESS;
281}
282
283static DECLCALLBACK(int) hmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
284{
285 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
286 return VINF_SUCCESS;
287}
288
289static DECLCALLBACK(int) hmR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)
290{
291 NOREF(pVM); NOREF(pVCpu);
292 return VINF_SUCCESS;
293}
294
295static DECLCALLBACK(int) hmR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
296{
297 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
298 return VINF_SUCCESS;
299}
300
301/** @} */
302
303
304/**
305 * Checks if the CPU is subject to the "VMX-Preemption Timer Does Not Count
306 * Down at the Rate Specified" erratum.
307 *
308 * Errata names and related steppings:
309 * - BA86 - D0.
310 * - AAX65 - C2.
311 * - AAU65 - C2, K0.
312 * - AAO95 - B1.
313 * - AAT59 - C2.
314 * - AAK139 - D0.
315 * - AAM126 - C0, C1, D0.
316 * - AAN92 - B1.
317 * - AAJ124 - C0, D0.
318 *
319 * - AAP86 - B1.
320 *
321 * Steppings: B1, C0, C1, C2, D0, K0.
322 *
323 * @returns true if subject to it, false if not.
324 */
325static bool hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum(void)
326{
327 uint32_t u = ASMCpuId_EAX(1);
328 u &= ~(RT_BIT_32(14) | RT_BIT_32(15) | RT_BIT_32(28) | RT_BIT_32(29) | RT_BIT_32(30) | RT_BIT_32(31));
329 if ( u == UINT32_C(0x000206E6) /* 323344.pdf - BA86 - D0 - Intel Xeon Processor 7500 Series */
330 || u == UINT32_C(0x00020652) /* 323056.pdf - AAX65 - C2 - Intel Xeon Processor L3406 */
331 || u == UINT32_C(0x00020652) /* 322814.pdf - AAT59 - C2 - Intel CoreTM i7-600, i5-500, i5-400 and i3-300 Mobile Processor Series */
332 || u == UINT32_C(0x00020652) /* 322911.pdf - AAU65 - C2 - Intel CoreTM i5-600, i3-500 Desktop Processor Series and Intel Pentium Processor G6950 */
333 || u == UINT32_C(0x00020655) /* 322911.pdf - AAU65 - K0 - Intel CoreTM i5-600, i3-500 Desktop Processor Series and Intel Pentium Processor G6950 */
334 || u == UINT32_C(0x000106E5) /* 322373.pdf - AAO95 - B1 - Intel Xeon Processor 3400 Series */
335 || u == UINT32_C(0x000106E5) /* 322166.pdf - AAN92 - B1 - Intel CoreTM i7-800 and i5-700 Desktop Processor Series */
336 || u == UINT32_C(0x000106E5) /* 320767.pdf - AAP86 - B1 - Intel Core i7-900 Mobile Processor Extreme Edition Series, Intel Core i7-800 and i7-700 Mobile Processor Series */
337 || u == UINT32_C(0x000106A0) /*?321333.pdf - AAM126 - C0 - Intel Xeon Processor 3500 Series Specification */
338 || u == UINT32_C(0x000106A1) /*?321333.pdf - AAM126 - C1 - Intel Xeon Processor 3500 Series Specification */
339 || u == UINT32_C(0x000106A4) /* 320836.pdf - AAJ124 - C0 - Intel Core i7-900 Desktop Processor Extreme Edition Series and Intel Core i7-900 Desktop Processor Series */
340 || u == UINT32_C(0x000106A5) /* 321333.pdf - AAM126 - D0 - Intel Xeon Processor 3500 Series Specification */
341 || u == UINT32_C(0x000106A5) /* 321324.pdf - AAK139 - D0 - Intel Xeon Processor 5500 Series Specification */
342 || u == UINT32_C(0x000106A5) /* 320836.pdf - AAJ124 - D0 - Intel Core i7-900 Desktop Processor Extreme Edition Series and Intel Core i7-900 Desktop Processor Series */
343 )
344 return true;
345 return false;
346}
347
348
349/**
350 * Intel specific initialization code.
351 *
352 * @returns VBox status code (will only fail if out of memory).
353 */
354static int hmR0InitIntel(uint32_t u32FeaturesECX, uint32_t u32FeaturesEDX)
355{
356 /*
357 * Check that all the required VT-x features are present.
358 * We also assume all VT-x-enabled CPUs support fxsave/fxrstor.
359 */
360 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
361 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
362 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
363 )
364 {
365 /** @todo move this into a separate function. */
366 g_HvmR0.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
367
368 /*
369 * First try use native kernel API for controlling VT-x.
370 * (This is only supported by some Mac OS X kernels atm.)
371 */
372 int rc = g_HvmR0.lLastError = SUPR0EnableVTx(true /* fEnable */);
373 g_HvmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
374 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
375 {
376 AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
377 if (RT_SUCCESS(rc))
378 {
379 g_HvmR0.vmx.fSupported = true;
380 rc = SUPR0EnableVTx(false /* fEnable */);
381 AssertRC(rc);
382 }
383 }
384 else
385 {
386 /* We need to check if VT-x has been properly initialized on all
387 CPUs. Some BIOSes do a lousy job. */
388 HMR0FIRSTRC FirstRc;
389 hmR0FirstRcInit(&FirstRc);
390 g_HvmR0.lLastError = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
391 if (RT_SUCCESS(g_HvmR0.lLastError))
392 g_HvmR0.lLastError = hmR0FirstRcGetStatus(&FirstRc);
393 }
394 if (RT_SUCCESS(g_HvmR0.lLastError))
395 {
396 /* Reread in case we've changed it. */
397 g_HvmR0.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
398
399 if ( (g_HvmR0.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
400 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
401 {
402 /*
403 * Read all relevant MSR.
404 */
405 g_HvmR0.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
406 g_HvmR0.vmx.msr.vmx_pin_ctls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
407 g_HvmR0.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
408 g_HvmR0.vmx.msr.vmx_exit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
409 g_HvmR0.vmx.msr.vmx_entry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
410 g_HvmR0.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);
411 g_HvmR0.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
412 g_HvmR0.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
413 g_HvmR0.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
414 g_HvmR0.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
415 g_HvmR0.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
416 g_HvmR0.vmx.hostCR4 = ASMGetCR4();
417 g_HvmR0.vmx.hostEFER = ASMRdMsr(MSR_K6_EFER);
418 /* VPID 16 bits ASID. */
419 g_HvmR0.uMaxASID = 0x10000; /* exclusive */
420
421 if (g_HvmR0.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
422 {
423 g_HvmR0.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
424 if ( g_HvmR0.vmx.msr.vmx_proc_ctls2.n.allowed1
425 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID))
426 g_HvmR0.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS);
427 }
428
429 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
430 {
431 /*
432 * Enter root mode
433 */
434 RTR0MEMOBJ hScatchMemObj;
435 rc = RTR0MemObjAllocCont(&hScatchMemObj, PAGE_SIZE, true /* executable R0 mapping */);
436 if (RT_FAILURE(rc))
437 return rc;
438
439 void *pvScatchPage = RTR0MemObjAddress(hScatchMemObj);
440 RTHCPHYS HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0);
441 ASMMemZeroPage(pvScatchPage);
442
443 /* Set revision dword at the beginning of the structure. */
444 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_HvmR0.vmx.msr.vmx_basic_info);
445
446 /* Make sure we don't get rescheduled to another cpu during this probe. */
447 RTCCUINTREG fFlags = ASMIntDisableFlags();
448
449 /*
450 * Check CR4.VMXE
451 */
452 g_HvmR0.vmx.hostCR4 = ASMGetCR4();
453 if (!(g_HvmR0.vmx.hostCR4 & X86_CR4_VMXE))
454 {
455 /* In theory this bit could be cleared behind our back. Which would cause
456 #UD faults when we try to execute the VMX instructions... */
457 ASMSetCR4(g_HvmR0.vmx.hostCR4 | X86_CR4_VMXE);
458 }
459
460 /* Enter VMX Root Mode */
461 rc = VMXEnable(HCPhysScratchPage);
462 if (RT_SUCCESS(rc))
463 {
464 g_HvmR0.vmx.fSupported = true;
465 VMXDisable();
466
467 /*
468 * Check for the VMX-Preemption Timer and adjust for the * "VMX-Preemption
469 * Timer Does Not Count Down at the Rate Specified" erratum.
470 */
471 if ( g_HvmR0.vmx.msr.vmx_pin_ctls.n.allowed1
472 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
473 {
474 g_HvmR0.vmx.fUsePreemptTimer = true;
475 g_HvmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HvmR0.vmx.msr.vmx_misc);
476 if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum())
477 g_HvmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
478 }
479 }
480 else
481 {
482 /*
483 * KVM leaves the CPU in VMX root mode. Not only is this not allowed,
484 * it will crash the host when we enter raw mode, because:
485 *
486 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify
487 * this bit), and
488 * (b) turning off paging causes a #GP (unavoidable when switching
489 * from long to 32 bits mode or 32 bits to PAE).
490 *
491 * They should fix their code, but until they do we simply refuse to run.
492 */
493 g_HvmR0.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
494 }
495
496 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set
497 if it wasn't so before (some software could incorrectly
498 think it's in VMX mode). */
499 ASMSetCR4(g_HvmR0.vmx.hostCR4);
500 ASMSetFlags(fFlags);
501
502 RTR0MemObjFree(hScatchMemObj, false);
503 }
504 }
505 else
506 {
507 AssertFailed(); /* can't hit this case anymore */
508 g_HvmR0.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;
509 }
510
511 /*
512 * Install the VT-x methods.
513 */
514 if (g_HvmR0.vmx.fSupported)
515 {
516 g_HvmR0.pfnEnterSession = VMXR0Enter;
517 g_HvmR0.pfnLeaveSession = VMXR0Leave;
518 g_HvmR0.pfnSaveHostState = VMXR0SaveHostState;
519 g_HvmR0.pfnLoadGuestState = VMXR0LoadGuestState;
520 g_HvmR0.pfnRunGuestCode = VMXR0RunGuestCode;
521 g_HvmR0.pfnEnableCpu = VMXR0EnableCpu;
522 g_HvmR0.pfnDisableCpu = VMXR0DisableCpu;
523 g_HvmR0.pfnInitVM = VMXR0InitVM;
524 g_HvmR0.pfnTermVM = VMXR0TermVM;
525 g_HvmR0.pfnSetupVM = VMXR0SetupVM;
526 }
527 }
528#ifdef LOG_ENABLED
529 else
530 SUPR0Printf("hmR0InitIntelCpu failed with rc=%d\n", g_HvmR0.lLastError);
531#endif
532 }
533 else
534 g_HvmR0.lLastError = VERR_VMX_NO_VMX;
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * AMD-specific initialization code.
541 */
542static void hmR0InitAmd(uint32_t u32FeaturesEDX)
543{
544 /*
545 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
546 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
547 */
548 if ( (g_HvmR0.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
549 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
550 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
551 )
552 {
553 g_HvmR0.pfnEnterSession = SVMR0Enter;
554 g_HvmR0.pfnLeaveSession = SVMR0Leave;
555 g_HvmR0.pfnSaveHostState = SVMR0SaveHostState;
556 g_HvmR0.pfnLoadGuestState = SVMR0LoadGuestState;
557 g_HvmR0.pfnRunGuestCode = SVMR0RunGuestCode;
558 g_HvmR0.pfnEnableCpu = SVMR0EnableCpu;
559 g_HvmR0.pfnDisableCpu = SVMR0DisableCpu;
560 g_HvmR0.pfnInitVM = SVMR0InitVM;
561 g_HvmR0.pfnTermVM = SVMR0TermVM;
562 g_HvmR0.pfnSetupVM = SVMR0SetupVM;
563
564 /* Query AMD features. */
565 uint32_t u32Dummy;
566 ASMCpuId(0x8000000A, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxASID,
567 &u32Dummy, &g_HvmR0.svm.u32Features);
568
569 /*
570 * We need to check if AMD-V has been properly initialized on all CPUs.
571 * Some BIOSes might do a poor job.
572 */
573 HMR0FIRSTRC FirstRc;
574 hmR0FirstRcInit(&FirstRc);
575 int rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
576 AssertRC(rc);
577 if (RT_SUCCESS(rc))
578 rc = hmR0FirstRcGetStatus(&FirstRc);
579#ifndef DEBUG_bird
580 AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
581 ("hmR0InitAmdCpu failed for cpu %d with rc=%Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
582#endif
583 if (RT_SUCCESS(rc))
584 {
585 /* Read the HWCR msr for diagnostics. */
586 g_HvmR0.svm.msrHWCR = ASMRdMsr(MSR_K8_HWCR);
587 g_HvmR0.svm.fSupported = true;
588 }
589 else
590 g_HvmR0.lLastError = rc;
591 }
592 else
593 g_HvmR0.lLastError = VERR_SVM_NO_SVM;
594}
595
596
597/**
598 * Does global Ring-0 HM initialization (at module init).
599 *
600 * @returns VBox status code.
601 */
602VMMR0DECL(int) HWACCMR0Init(void)
603{
604 /*
605 * Initialize the globals.
606 */
607 g_HvmR0.fEnabled = false;
608 static RTONCE s_OnceInit = RTONCE_INITIALIZER;
609 g_HvmR0.EnableAllCpusOnce = s_OnceInit;
610 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
611 g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
612
613 /* Fill in all callbacks with placeholders. */
614 g_HvmR0.pfnEnterSession = hmR0DummyEnter;
615 g_HvmR0.pfnLeaveSession = hmR0DummyLeave;
616 g_HvmR0.pfnSaveHostState = hmR0DummySaveHostState;
617 g_HvmR0.pfnLoadGuestState = hmR0DummyLoadGuestState;
618 g_HvmR0.pfnRunGuestCode = hmR0DummyRunGuestCode;
619 g_HvmR0.pfnEnableCpu = hmR0DummyEnableCpu;
620 g_HvmR0.pfnDisableCpu = hmR0DummyDisableCpu;
621 g_HvmR0.pfnInitVM = hmR0DummyInitVM;
622 g_HvmR0.pfnTermVM = hmR0DummyTermVM;
623 g_HvmR0.pfnSetupVM = hmR0DummySetupVM;
624
625 /* Default is global VT-x/AMD-V init */
626 g_HvmR0.fGlobalInit = true;
627
628 /*
629 * Make sure aCpuInfo is big enough for all the CPUs on this system.
630 */
631 if (RTMpGetArraySize() > RT_ELEMENTS(g_HvmR0.aCpuInfo))
632 {
633 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_HvmR0.aCpuInfo)));
634 return VERR_TOO_MANY_CPUS;
635 }
636
637 /*
638 * Check for VT-x and AMD-V capabilities
639 */
640 int rc;
641 if (ASMHasCpuId())
642 {
643 uint32_t u32FeaturesECX, u32FeaturesEDX;
644 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;
645 uint32_t u32Dummy;
646
647 /* Standard features. */
648 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
649 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
650
651 /* Query AMD features. */
652 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy,
653 &g_HvmR0.cpuid.u32AMDFeatureECX,
654 &g_HvmR0.cpuid.u32AMDFeatureEDX);
655
656 /* Go to CPU specific initialization code. */
657 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX
658 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX
659 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX)
660 {
661 rc = hmR0InitIntel(u32FeaturesECX, u32FeaturesEDX);
662 if (RT_FAILURE(rc))
663 return rc;
664 }
665 else if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX
666 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX
667 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX)
668 hmR0InitAmd(u32FeaturesEDX);
669 else
670 g_HvmR0.lLastError = VERR_HWACCM_UNKNOWN_CPU;
671 }
672 else
673 g_HvmR0.lLastError = VERR_HWACCM_NO_CPUID;
674
675 /*
676 * Register notification callbacks that we can use to disable/enable CPUs
677 * when brought offline/online or suspending/resuming.
678 */
679 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
680 {
681 rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL);
682 AssertRC(rc);
683
684 rc = RTPowerNotificationRegister(hmR0PowerCallback, NULL);
685 AssertRC(rc);
686 }
687
688 /* We return success here because module init shall not fail if HM
689 fails to initialize. */
690 return VINF_SUCCESS;
691}
692
693
694/**
695 * Does global Ring-0 HM termination (at module termination).
696 *
697 * @returns VBox status code.
698 */
699VMMR0DECL(int) HWACCMR0Term(void)
700{
701 int rc;
702 if ( g_HvmR0.vmx.fSupported
703 && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
704 {
705 /*
706 * Simple if the host OS manages VT-x.
707 */
708 Assert(g_HvmR0.fGlobalInit);
709 rc = SUPR0EnableVTx(false /* fEnable */);
710
711 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo); iCpu++)
712 {
713 g_HvmR0.aCpuInfo[iCpu].fConfigured = false;
714 Assert(g_HvmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
715 }
716 }
717 else
718 {
719 Assert(!g_HvmR0.vmx.fUsingSUPR0EnableVTx);
720 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
721 {
722 /* Doesn't really matter if this fails. */
723 rc = RTMpNotificationDeregister(hmR0MpEventCallback, NULL); AssertRC(rc);
724 rc = RTPowerNotificationDeregister(hmR0PowerCallback, NULL); AssertRC(rc);
725 }
726 else
727 rc = VINF_SUCCESS;
728
729 /*
730 * Disable VT-x/AMD-V on all CPUs if we enabled it before.
731 */
732 if (g_HvmR0.fGlobalInit)
733 {
734 HMR0FIRSTRC FirstRc;
735 hmR0FirstRcInit(&FirstRc);
736 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL, &FirstRc);
737 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
738 if (RT_SUCCESS(rc))
739 {
740 rc = hmR0FirstRcGetStatus(&FirstRc);
741 AssertMsgRC(rc, ("%u: %Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
742 }
743 }
744
745 /*
746 * Free the per-cpu pages used for VT-x and AMD-V.
747 */
748 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
749 {
750 if (g_HvmR0.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)
751 {
752 RTR0MemObjFree(g_HvmR0.aCpuInfo[i].hMemObj, false);
753 g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
754 }
755 }
756 }
757 return rc;
758}
759
760
761/**
762 * Worker function used by hmR0PowerCallback and HWACCMR0Init to initalize
763 * VT-x on a CPU.
764 *
765 * @param idCpu The identifier for the CPU the function is called on.
766 * @param pvUser1 Pointer to the first RC structure.
767 * @param pvUser2 Ignored.
768 */
769static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
770{
771 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
772 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
773 NOREF(pvUser2);
774
775 /*
776 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP.
777 * Once the lock bit is set, this MSR can no longer be modified.
778 */
779 uint64_t fFC = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
780 if ( !(fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
781 || ( (fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
782 == MSR_IA32_FEATURE_CONTROL_VMXON ) /* Some BIOSes forget to set the locked bit. */
783 )
784 {
785 /* MSR is not yet locked; we can change it ourselves here */
786 ASMWrMsr(MSR_IA32_FEATURE_CONTROL,
787 g_HvmR0.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);
788 fFC = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
789 }
790
791 int rc;
792 if ( (fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
793 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))
794 rc = VINF_SUCCESS;
795 else
796 rc = VERR_VMX_MSR_LOCKED_OR_DISABLED;
797
798 hmR0FirstRcSetStatus(pFirstRc, rc);
799}
800
801
802/**
803 * Worker function used by hmR0PowerCallback and HWACCMR0Init to initalize
804 * VT-x / AMD-V on a CPU.
805 *
806 * @param idCpu The identifier for the CPU the function is called on.
807 * @param pvUser1 Pointer to the first RC structure.
808 * @param pvUser2 Ignored.
809 */
810static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
811{
812 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
813 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
814 NOREF(pvUser2);
815
816 /* Check if SVM is disabled. */
817 int rc;
818 uint64_t fVmCr = ASMRdMsr(MSR_K8_VM_CR);
819 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
820 {
821 /* Turn on SVM in the EFER MSR. */
822 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
823 if (fEfer & MSR_K6_EFER_SVME)
824 rc = VERR_SVM_IN_USE;
825 else
826 {
827 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
828
829 /* Paranoia. */
830 fEfer = ASMRdMsr(MSR_K6_EFER);
831 if (fEfer & MSR_K6_EFER_SVME)
832 {
833 /* Restore previous value. */
834 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
835 rc = VINF_SUCCESS;
836 }
837 else
838 rc = VERR_SVM_ILLEGAL_EFER_MSR;
839 }
840 }
841 else
842 rc = VERR_SVM_DISABLED;
843
844 hmR0FirstRcSetStatus(pFirstRc, rc);
845}
846
847
848
849/**
850 * Disable VT-x or AMD-V on the current CPU
851 *
852 * @returns VBox status code.
853 * @param pVM Pointer to the VM (can be 0).
854 * @param idCpu The identifier for the CPU the function is called on.
855 */
856static int hmR0EnableCpu(PVM pVM, RTCPUID idCpu)
857{
858 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
859
860 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
861 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
862 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
863 Assert(!pCpu->fConfigured);
864 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
865
866 pCpu->idCpu = idCpu;
867 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */
868 pCpu->cTLBFlushes = 0;
869
870 /* Should never happen */
871 AssertLogRelMsgReturn(pCpu->hMemObj != NIL_RTR0MEMOBJ, ("hmR0EnableCpu failed idCpu=%u.\n", idCpu), VERR_HM_IPE_1);
872
873 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
874 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
875
876 int rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage);
877 AssertRC(rc);
878 if (RT_SUCCESS(rc))
879 pCpu->fConfigured = true;
880
881 return rc;
882}
883
884
885/**
886 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
887 * is to be called on the target cpus.
888 *
889 * @param idCpu The identifier for the CPU the function is called on.
890 * @param pvUser1 The 1st user argument.
891 * @param pvUser2 The 2nd user argument.
892 */
893static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
894{
895 PVM pVM = (PVM)pvUser1; /* can be NULL! */
896 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2;
897 AssertReturnVoid(g_HvmR0.fGlobalInit);
898 hmR0FirstRcSetStatus(pFirstRc, hmR0EnableCpu(pVM, idCpu));
899}
900
901
902/**
903 * RTOnce callback employed by HWACCMR0EnableAllCpus.
904 *
905 * @returns VBox status code.
906 * @param pvUser Pointer to the VM.
907 * @param pvUserIgnore NULL, ignored.
908 */
909static DECLCALLBACK(int32_t) hmR0EnableAllCpuOnce(void *pvUser, void *pvUserIgnore)
910{
911 PVM pVM = (PVM)pvUser;
912 NOREF(pvUserIgnore);
913
914 /*
915 * Indicate that we've initialized.
916 *
917 * Note! There is a potential race between this function and the suspend
918 * notification. Kind of unlikely though, so ignored for now.
919 */
920 AssertReturn(!g_HvmR0.fEnabled, VERR_HM_ALREADY_ENABLED_IPE);
921 ASMAtomicWriteBool(&g_HvmR0.fEnabled, true);
922
923 /*
924 * The global init variable is set by the first VM.
925 */
926 g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
927
928 int rc;
929 if ( g_HvmR0.vmx.fSupported
930 && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
931 {
932 /*
933 * Global VT-x initialization API (only darwin for now).
934 */
935 rc = SUPR0EnableVTx(true /* fEnable */);
936 if (RT_SUCCESS(rc))
937 {
938 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo); iCpu++)
939 {
940 g_HvmR0.aCpuInfo[iCpu].fConfigured = true;
941 Assert(g_HvmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
942 }
943
944 /* If the host provides a VT-x init API, then we'll rely on that for global init. */
945 g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
946 }
947 else
948 AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc));
949 }
950 else
951 {
952 /*
953 * We're doing the job ourselves.
954 */
955 /* Allocate one page per cpu for the global vt-x and amd-v pages */
956 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
957 {
958 Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
959
960 if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i)))
961 {
962 rc = RTR0MemObjAllocCont(&g_HvmR0.aCpuInfo[i].hMemObj, PAGE_SIZE, true /* executable R0 mapping */);
963 AssertLogRelRCReturn(rc, rc);
964
965 void *pvR0 = RTR0MemObjAddress(g_HvmR0.aCpuInfo[i].hMemObj); Assert(pvR0);
966 ASMMemZeroPage(pvR0);
967 }
968 g_HvmR0.aCpuInfo[i].fConfigured = false;
969 }
970
971 if (g_HvmR0.fGlobalInit)
972 {
973 /* First time, so initialize each cpu/core. */
974 HMR0FIRSTRC FirstRc;
975 hmR0FirstRcInit(&FirstRc);
976 rc = RTMpOnAll(hmR0EnableCpuCallback, (void *)pVM, &FirstRc);
977 if (RT_SUCCESS(rc))
978 rc = hmR0FirstRcGetStatus(&FirstRc);
979 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
980 }
981 else
982 rc = VINF_SUCCESS;
983 }
984
985 return rc;
986}
987
988
989/**
990 * Sets up HWACCM on all cpus.
991 *
992 * @returns VBox status code.
993 * @param pVM Pointer to the VM.
994 */
995VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM)
996{
997 /* Make sure we don't touch hwaccm after we've disabled hwaccm in
998 preparation of a suspend. */
999 if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
1000 return VERR_HWACCM_SUSPEND_PENDING;
1001
1002 return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM, NULL);
1003}
1004
1005
1006/**
1007 * Disable VT-x or AMD-V on the current CPU.
1008 *
1009 * @returns VBox status code.
1010 * @param idCpu The identifier for the CPU the function is called on.
1011 */
1012static int hmR0DisableCpu(RTCPUID idCpu)
1013{
1014 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
1015
1016 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
1017 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
1018 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
1019 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
1020 Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ);
1021
1022 if (pCpu->hMemObj == NIL_RTR0MEMOBJ)
1023 return pCpu->fConfigured ? VERR_NO_MEMORY : VINF_SUCCESS /* not initialized. */;
1024
1025 int rc;
1026 if (pCpu->fConfigured)
1027 {
1028 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
1029 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
1030 if (idCpu == RTMpCpuId())
1031 {
1032 rc = g_HvmR0.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
1033 AssertRC(rc);
1034 }
1035 else
1036 {
1037 pCpu->fIgnoreAMDVInUseError = true;
1038 rc = VINF_SUCCESS;
1039 }
1040
1041 pCpu->fConfigured = false;
1042 }
1043 else
1044 rc = VINF_SUCCESS; /* nothing to do */
1045
1046 pCpu->uCurrentASID = 0;
1047 return rc;
1048}
1049
1050
1051/**
1052 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that
1053 * is to be called on the target cpus.
1054 *
1055 * @param idCpu The identifier for the CPU the function is called on.
1056 * @param pvUser1 The 1st user argument.
1057 * @param pvUser2 The 2nd user argument.
1058 */
1059static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1060{
1061 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; NOREF(pvUser1);
1062 AssertReturnVoid(g_HvmR0.fGlobalInit);
1063 hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu));
1064}
1065
1066
1067/**
1068 * Callback function invoked when a cpu goes online or offline.
1069 *
1070 * @param enmEvent The Mp event.
1071 * @param idCpu The identifier for the CPU the function is called on.
1072 * @param pvData Opaque data (PVM pointer).
1073 */
1074static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData)
1075{
1076 NOREF(pvData);
1077
1078 /*
1079 * We only care about uninitializing a CPU that is going offline. When a
1080 * CPU comes online, the initialization is done lazily in HWACCMR0Enter().
1081 */
1082 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1083 switch (enmEvent)
1084 {
1085 case RTMPEVENT_OFFLINE:
1086 {
1087 int rc = hmR0DisableCpu(idCpu);
1088 AssertRC(rc);
1089 break;
1090 }
1091
1092 default:
1093 break;
1094 }
1095}
1096
1097
1098/**
1099 * Called whenever a system power state change occurs.
1100 *
1101 * @param enmEvent The Power event.
1102 * @param pvUser User argument.
1103 */
1104static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
1105{
1106 NOREF(pvUser);
1107 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
1108
1109#ifdef LOG_ENABLED
1110 if (enmEvent == RTPOWEREVENT_SUSPEND)
1111 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
1112 else
1113 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_RESUME\n");
1114#endif
1115
1116 if (enmEvent == RTPOWEREVENT_SUSPEND)
1117 ASMAtomicWriteBool(&g_HvmR0.fSuspended, true);
1118
1119 if (g_HvmR0.fEnabled)
1120 {
1121 int rc;
1122 HMR0FIRSTRC FirstRc;
1123 hmR0FirstRcInit(&FirstRc);
1124
1125 if (enmEvent == RTPOWEREVENT_SUSPEND)
1126 {
1127 if (g_HvmR0.fGlobalInit)
1128 {
1129 /* Turn off VT-x or AMD-V on all CPUs. */
1130 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL, &FirstRc);
1131 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1132 }
1133 /* else nothing to do here for the local init case */
1134 }
1135 else
1136 {
1137 /* Reinit the CPUs from scratch as the suspend state might have
1138 messed with the MSRs. (lousy BIOSes as usual) */
1139 if (g_HvmR0.vmx.fSupported)
1140 rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
1141 else
1142 rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
1143 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1144 if (RT_SUCCESS(rc))
1145 rc = hmR0FirstRcGetStatus(&FirstRc);
1146#ifdef LOG_ENABLED
1147 if (RT_FAILURE(rc))
1148 SUPR0Printf("hmR0PowerCallback hmR0InitXxxCpu failed with %Rc\n", rc);
1149#endif
1150 if (g_HvmR0.fGlobalInit)
1151 {
1152 /* Turn VT-x or AMD-V back on on all CPUs. */
1153 rc = RTMpOnAll(hmR0EnableCpuCallback, NULL, &FirstRc /* output ignored */);
1154 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1155 }
1156 /* else nothing to do here for the local init case */
1157 }
1158 }
1159
1160 if (enmEvent == RTPOWEREVENT_RESUME)
1161 ASMAtomicWriteBool(&g_HvmR0.fSuspended, false);
1162}
1163
1164
1165/**
1166 * Does Ring-0 per VM HM initialization.
1167 *
1168 * This will copy HM global into the VM structure and call the CPU specific
1169 * init routine which will allocate resources for each virtual CPU and such.
1170 *
1171 * @returns VBox status code.
1172 * @param pVM Pointer to the VM.
1173 */
1174VMMR0DECL(int) HWACCMR0InitVM(PVM pVM)
1175{
1176 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1177
1178#ifdef LOG_ENABLED
1179 SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
1180#endif
1181
1182 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
1183 if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
1184 return VERR_HWACCM_SUSPEND_PENDING;
1185
1186 /*
1187 * Copy globals to the VM structure.
1188 */
1189 pVM->hwaccm.s.vmx.fSupported = g_HvmR0.vmx.fSupported;
1190 pVM->hwaccm.s.svm.fSupported = g_HvmR0.svm.fSupported;
1191
1192 pVM->hwaccm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer;
1193 pVM->hwaccm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift;
1194 pVM->hwaccm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr.feature_ctrl;
1195 pVM->hwaccm.s.vmx.hostCR4 = g_HvmR0.vmx.hostCR4;
1196 pVM->hwaccm.s.vmx.hostEFER = g_HvmR0.vmx.hostEFER;
1197 pVM->hwaccm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr.vmx_basic_info;
1198 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmx_pin_ctls;
1199 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmx_proc_ctls;
1200 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmx_proc_ctls2;
1201 pVM->hwaccm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmx_exit;
1202 pVM->hwaccm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmx_entry;
1203 pVM->hwaccm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr.vmx_misc;
1204 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr.vmx_cr0_fixed0;
1205 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr.vmx_cr0_fixed1;
1206 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr.vmx_cr4_fixed0;
1207 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr.vmx_cr4_fixed1;
1208 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.vmx_vmcs_enum;
1209 pVM->hwaccm.s.vmx.msr.vmx_eptcaps = g_HvmR0.vmx.msr.vmx_eptcaps;
1210 pVM->hwaccm.s.svm.msrHWCR = g_HvmR0.svm.msrHWCR;
1211 pVM->hwaccm.s.svm.u32Rev = g_HvmR0.svm.u32Rev;
1212 pVM->hwaccm.s.svm.u32Features = g_HvmR0.svm.u32Features;
1213 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureECX;
1214 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = g_HvmR0.cpuid.u32AMDFeatureEDX;
1215 pVM->hwaccm.s.lLastError = g_HvmR0.lLastError;
1216
1217 pVM->hwaccm.s.uMaxASID = g_HvmR0.uMaxASID;
1218
1219
1220 if (!pVM->hwaccm.s.cMaxResumeLoops) /* allow ring-3 overrides */
1221 {
1222 pVM->hwaccm.s.cMaxResumeLoops = 1024;
1223#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
1224 if (RTThreadPreemptIsPendingTrusty())
1225 pVM->hwaccm.s.cMaxResumeLoops = 8192;
1226#endif
1227 }
1228
1229 /*
1230 * Initialize some per CPU fields.
1231 */
1232 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1233 {
1234 PVMCPU pVCpu = &pVM->aCpus[i];
1235
1236 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
1237
1238 /* Invalidate the last cpu we were running on. */
1239 pVCpu->hwaccm.s.idLastCpu = NIL_RTCPUID;
1240
1241 /* we'll aways increment this the first time (host uses ASID 0) */
1242 pVCpu->hwaccm.s.uCurrentASID = 0;
1243 }
1244
1245 /*
1246 * Call the hardware specific initialization method.
1247 *
1248 * Note! The fInUse handling here isn't correct as we can we can be
1249 * rescheduled to a different cpu, but the fInUse case is mostly for
1250 * debugging... Disabling preemption isn't an option when allocating
1251 * memory, so we'll let it slip for now.
1252 */
1253 RTCCUINTREG fFlags = ASMIntDisableFlags();
1254 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
1255 ASMAtomicWriteBool(&pCpu->fInUse, true);
1256 ASMSetFlags(fFlags);
1257
1258 int rc = g_HvmR0.pfnInitVM(pVM);
1259
1260 ASMAtomicWriteBool(&pCpu->fInUse, false);
1261 return rc;
1262}
1263
1264
1265/**
1266 * Does Ring-0 per VM HM termination.
1267 *
1268 * @returns VBox status code.
1269 * @param pVM Pointer to the VM.
1270 */
1271VMMR0DECL(int) HWACCMR0TermVM(PVM pVM)
1272{
1273 Log(("HWACCMR0TermVM: %p\n", pVM));
1274 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1275
1276 /* Make sure we don't touch hm after we've disabled hwaccm in preparation
1277 of a suspend. */
1278 /** @todo r=bird: This cannot be right, the termination functions are
1279 * just freeing memory and resetting pVM/pVCpu members...
1280 * ==> memory leak. */
1281 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1282
1283 /*
1284 * Call the hardware specific method.
1285 *
1286 * Note! Not correct as we can be rescheduled to a different cpu, but the
1287 * fInUse case is mostly for debugging.
1288 */
1289 RTCCUINTREG fFlags = ASMIntDisableFlags();
1290 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
1291 ASMAtomicWriteBool(&pCpu->fInUse, true);
1292 ASMSetFlags(fFlags);
1293
1294 int rc = g_HvmR0.pfnTermVM(pVM);
1295
1296 ASMAtomicWriteBool(&pCpu->fInUse, false);
1297 return rc;
1298}
1299
1300
1301/**
1302 * Sets up a VT-x or AMD-V session.
1303 *
1304 * This is mostly about setting up the hardware VM state.
1305 *
1306 * @returns VBox status code.
1307 * @param pVM Pointer to the VM.
1308 */
1309VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
1310{
1311 Log(("HWACCMR0SetupVM: %p\n", pVM));
1312 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1313
1314 /* Make sure we don't touch hwaccm after we've disabled hwaccm in
1315 preparation of a suspend. */
1316 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1317
1318
1319 /*
1320 * Call the hardware specific setup VM method. This requires the CPU to be
1321 * enabled for AMD-V/VT-x and preemption to be prevented.
1322 */
1323 RTCCUINTREG fFlags = ASMIntDisableFlags();
1324 RTCPUID idCpu = RTMpCpuId();
1325 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
1326 ASMAtomicWriteBool(&pCpu->fInUse, true);
1327
1328 /* On first entry we'll sync everything. */
1329 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1330 pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
1331
1332 /* Enable VT-x or AMD-V if local init is required. */
1333 int rc;
1334 if (!g_HvmR0.fGlobalInit)
1335 {
1336 rc = hmR0EnableCpu(pVM, idCpu);
1337 AssertReturnStmt(RT_SUCCESS_NP(rc), ASMSetFlags(fFlags), rc);
1338 }
1339
1340 /* Setup VT-x or AMD-V. */
1341 rc = g_HvmR0.pfnSetupVM(pVM);
1342
1343 /* Disable VT-x or AMD-V if local init was done before. */
1344 if (!g_HvmR0.fGlobalInit)
1345 {
1346 int rc2 = hmR0DisableCpu(idCpu);
1347 AssertRC(rc2);
1348 }
1349
1350 ASMAtomicWriteBool(&pCpu->fInUse, false);
1351 ASMSetFlags(fFlags);
1352
1353 return rc;
1354}
1355
1356
1357/**
1358 * Enters the VT-x or AMD-V session.
1359 *
1360 * @returns VBox status code.
1361 * @param pVM Pointer to the VM.
1362 * @param pVCpu Pointer to the VMCPU.
1363 *
1364 * @remarks This is called with preemption disabled.
1365 */
1366VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu)
1367{
1368 RTCPUID idCpu = RTMpCpuId();
1369 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
1370
1371 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
1372 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1373 ASMAtomicWriteBool(&pCpu->fInUse, true);
1374
1375 AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));
1376 pVCpu->hwaccm.s.idEnteredCpu = idCpu;
1377
1378 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1379
1380 /* Always load the guest's FPU/XMM state on-demand. */
1381 CPUMDeactivateGuestFPUState(pVCpu);
1382
1383 /* Always load the guest's debug state on-demand. */
1384 CPUMDeactivateGuestDebugState(pVCpu);
1385
1386 /* Always reload the host context and the guest's CR0 register. (!!!!) */
1387 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
1388
1389 /* Setup the register and mask according to the current execution mode. */
1390 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1391 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
1392 else
1393 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
1394
1395 /* Enable VT-x or AMD-V if local init is required, or enable if it's a
1396 freshly onlined CPU. */
1397 int rc;
1398 if ( !pCpu->fConfigured
1399 || !g_HvmR0.fGlobalInit)
1400 {
1401 rc = hmR0EnableCpu(pVM, idCpu);
1402 AssertRCReturn(rc, rc);
1403 }
1404
1405#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1406 bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
1407#endif
1408
1409 rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
1410 AssertRC(rc);
1411 /* We must save the host context here (VT-x) as we might be rescheduled on
1412 a different cpu after a long jump back to ring 3. */
1413 rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);
1414 AssertRC(rc);
1415 rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
1416 AssertRC(rc);
1417
1418#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1419 if (fStartedSet)
1420 PGMRZDynMapReleaseAutoSet(pVCpu);
1421#endif
1422
1423 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
1424 and ring-3 calls. */
1425 if (RT_FAILURE(rc))
1426 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
1427 return rc;
1428}
1429
1430
1431/**
1432 * Leaves the VT-x or AMD-V session.
1433 *
1434 * @returns VBox status code.
1435 * @param pVM Pointer to the VM.
1436 * @param pVCpu Pointer to the VMCPU.
1437 *
1438 * @remarks Called with preemption disabled just like HWACCMR0Enter, our
1439 * counterpart.
1440 */
1441VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
1442{
1443 int rc;
1444 RTCPUID idCpu = RTMpCpuId();
1445 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
1446 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1447
1448 /** @todo r=bird: This can't be entirely right? */
1449 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1450
1451 /*
1452 * Save the guest FPU and XMM state if necessary.
1453 *
1454 * Note! It's rather tricky with longjmps done by e.g. Log statements or
1455 * the page fault handler. We must restore the host FPU here to make
1456 * absolutely sure we don't leave the guest FPU state active or trash
1457 * somebody else's FPU state.
1458 */
1459 if (CPUMIsGuestFPUStateActive(pVCpu))
1460 {
1461 Log2(("CPUMR0SaveGuestFPU\n"));
1462 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
1463
1464 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1465 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
1466 }
1467
1468 rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
1469
1470 /* We don't pass on invlpg information to the recompiler for nested paging
1471 guests, so we must make sure the recompiler flushes its TLB the next
1472 time it executes code. */
1473 if ( pVM->hwaccm.s.fNestedPaging
1474 && CPUMIsGuestInPagedProtectedModeEx(pCtx))
1475 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1476
1477 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
1478 and ring-3 calls. */
1479 AssertMsgStmt( pVCpu->hwaccm.s.idEnteredCpu == idCpu
1480 || RT_FAILURE_NP(rc),
1481 ("Owner is %u, I'm %u", pVCpu->hwaccm.s.idEnteredCpu, idCpu),
1482 rc = VERR_HM_WRONG_CPU_1);
1483 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
1484
1485 /*
1486 * Disable VT-x or AMD-V if local init was done before.
1487 */
1488 if (!g_HvmR0.fGlobalInit)
1489 {
1490 rc = hmR0DisableCpu(idCpu);
1491 AssertRC(rc);
1492
1493 /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */
1494 pVCpu->hwaccm.s.idLastCpu = NIL_RTCPUID;
1495 pVCpu->hwaccm.s.uCurrentASID = 0;
1496 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1497 }
1498
1499 ASMAtomicWriteBool(&pCpu->fInUse, false);
1500 return rc;
1501}
1502
1503
1504/**
1505 * Runs guest code in a hardware accelerated VM.
1506 *
1507 * @returns VBox status code.
1508 * @param pVM Pointer to the VM.
1509 * @param pVCpu VMCPUD id.
1510 *
1511 * @remarks Called with preemption disabled and after first having called
1512 * HWACCMR0Enter.
1513 */
1514VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
1515{
1516#ifdef VBOX_STRICT
1517 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[RTMpCpuId()];
1518 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
1519 Assert(pCpu->fConfigured);
1520 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
1521 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
1522#endif
1523
1524#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1525 PGMRZDynMapStartAutoSet(pVCpu);
1526#endif
1527
1528 int rc = g_HvmR0.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu));
1529
1530#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1531 PGMRZDynMapReleaseAutoSet(pVCpu);
1532#endif
1533 return rc;
1534}
1535
1536#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1537
1538/**
1539 * Save guest FPU/XMM state (64 bits guest mode & 32 bits host only)
1540 *
1541 * @returns VBox status code.
1542 * @param pVM Pointer to the VM.
1543 * @param pVCpu Pointer to the VMCPU.
1544 * @param pCtx Pointer to the guest CPU context.
1545 */
1546VMMR0DECL(int) HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1547{
1548 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFpu64SwitchBack);
1549 if (pVM->hwaccm.s.vmx.fSupported)
1550 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
1551 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
1552}
1553
1554
1555/**
1556 * Save guest debug state (64 bits guest mode & 32 bits host only)
1557 *
1558 * @returns VBox status code.
1559 * @param pVM Pointer to the VM.
1560 * @param pVCpu Pointer to the VMCPU.
1561 * @param pCtx Pointer to the guest CPU context.
1562 */
1563VMMR0DECL(int) HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1564{
1565 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDebug64SwitchBack);
1566 if (pVM->hwaccm.s.vmx.fSupported)
1567 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
1568 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
1569}
1570
1571
1572/**
1573 * Test the 32->64 bits switcher.
1574 *
1575 * @returns VBox status code.
1576 * @param pVM Pointer to the VM.
1577 */
1578VMMR0DECL(int) HWACCMR0TestSwitcher3264(PVM pVM)
1579{
1580 PVMCPU pVCpu = &pVM->aCpus[0];
1581 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1582 uint32_t aParam[5] = {0, 1, 2, 3, 4};
1583 int rc;
1584
1585 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
1586 if (pVM->hwaccm.s.vmx.fSupported)
1587 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
1588 else
1589 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
1590 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
1591
1592 return rc;
1593}
1594
1595#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
1596
1597/**
1598 * Returns suspend status of the host.
1599 *
1600 * @returns Suspend pending or not.
1601 */
1602VMMR0DECL(bool) HWACCMR0SuspendPending(void)
1603{
1604 return ASMAtomicReadBool(&g_HvmR0.fSuspended);
1605}
1606
1607
1608/**
1609 * Returns the cpu structure for the current cpu.
1610 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1611 *
1612 * @returns The cpu structure pointer.
1613 */
1614VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void)
1615{
1616 RTCPUID idCpu = RTMpCpuId();
1617 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
1618 return &g_HvmR0.aCpuInfo[idCpu];
1619}
1620
1621
1622/**
1623 * Returns the cpu structure for the current cpu.
1624 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1625 *
1626 * @returns The cpu structure pointer.
1627 * @param idCpu id of the VCPU.
1628 */
1629VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
1630{
1631 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
1632 return &g_HvmR0.aCpuInfo[idCpu];
1633}
1634
1635
1636/**
1637 * Save a pending IO read.
1638 *
1639 * @param pVCpu Pointer to the VMCPU.
1640 * @param GCPtrRip Address of IO instruction.
1641 * @param GCPtrRipNext Address of the next instruction.
1642 * @param uPort Port address.
1643 * @param uAndVal AND mask for saving the result in eax.
1644 * @param cbSize Read size.
1645 */
1646VMMR0DECL(void) HWACCMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
1647{
1648 pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_READ;
1649 pVCpu->hwaccm.s.PendingIO.GCPtrRip = GCPtrRip;
1650 pVCpu->hwaccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;
1651 pVCpu->hwaccm.s.PendingIO.s.Port.uPort = uPort;
1652 pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal = uAndVal;
1653 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize = cbSize;
1654 return;
1655}
1656
1657
1658/**
1659 * Save a pending IO write.
1660 *
1661 * @param pVCpu Pointer to the VMCPU.
1662 * @param GCPtrRIP Address of IO instruction.
1663 * @param uPort Port address.
1664 * @param uAndVal AND mask for fetching the result from eax.
1665 * @param cbSize Read size.
1666 */
1667VMMR0DECL(void) HWACCMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
1668{
1669 pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_WRITE;
1670 pVCpu->hwaccm.s.PendingIO.GCPtrRip = GCPtrRip;
1671 pVCpu->hwaccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;
1672 pVCpu->hwaccm.s.PendingIO.s.Port.uPort = uPort;
1673 pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal = uAndVal;
1674 pVCpu->hwaccm.s.PendingIO.s.Port.cbSize = cbSize;
1675 return;
1676}
1677
1678
1679/**
1680 * Raw-mode switcher hook - disable VT-x if it's active *and* the current
1681 * switcher turns off paging.
1682 *
1683 * @returns VBox status code.
1684 * @param pVM Pointer to the VM.
1685 * @param pfVTxDisabled Where to store whether VT-x was disabled or not.
1686 */
1687VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)
1688{
1689 Assert(!(ASMGetFlags() & X86_EFL_IF) || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1690
1691 *pfVTxDisabled = false;
1692
1693 if ( !g_HvmR0.fEnabled
1694 || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */
1695 || !g_HvmR0.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */)
1696 return VINF_SUCCESS; /* nothing to do */
1697
1698 switch (VMMGetSwitcher(pVM))
1699 {
1700 case VMMSWITCHER_32_TO_32:
1701 case VMMSWITCHER_PAE_TO_PAE:
1702 return VINF_SUCCESS; /* safe switchers as they don't turn off paging */
1703
1704 case VMMSWITCHER_32_TO_PAE:
1705 case VMMSWITCHER_PAE_TO_32: /* is this one actually used?? */
1706 case VMMSWITCHER_AMD64_TO_32:
1707 case VMMSWITCHER_AMD64_TO_PAE:
1708 break; /* unsafe switchers */
1709
1710 default:
1711 AssertFailedReturn(VERR_HM_WRONG_SWITCHER);
1712 }
1713
1714 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
1715 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2);
1716
1717 *pfVTxDisabled = true;
1718 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
1719 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
1720 return VMXR0DisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
1721}
1722
1723
1724/**
1725 * Raw-mode switcher hook - re-enable VT-x if was active *and* the current
1726 * switcher turned off paging.
1727 *
1728 * @returns VBox status code.
1729 * @param pVM Pointer to the VM.
1730 * @param fVTxDisabled Whether VT-x was disabled or not.
1731 */
1732VMMR0DECL(int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
1733{
1734 Assert(!(ASMGetFlags() & X86_EFL_IF));
1735
1736 if (!fVTxDisabled)
1737 return VINF_SUCCESS; /* nothing to do */
1738
1739 Assert(g_HvmR0.fEnabled);
1740 Assert(g_HvmR0.vmx.fSupported);
1741 Assert(g_HvmR0.fGlobalInit);
1742
1743 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
1744 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2);
1745
1746 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
1747 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
1748 return VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage);
1749}
1750
1751#ifdef VBOX_STRICT
1752
1753/**
1754 * Dumps a descriptor.
1755 *
1756 * @param pDesc Descriptor to dump.
1757 * @param Sel Selector number.
1758 * @param pszMsg Message to prepend the log entry with.
1759 */
1760VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
1761{
1762 /*
1763 * Make variable description string.
1764 */
1765 static struct
1766 {
1767 unsigned cch;
1768 const char *psz;
1769 } const s_aTypes[32] =
1770 {
1771# define STRENTRY(str) { sizeof(str) - 1, str }
1772
1773 /* system */
1774# if HC_ARCH_BITS == 64
1775 STRENTRY("Reserved0 "), /* 0x00 */
1776 STRENTRY("Reserved1 "), /* 0x01 */
1777 STRENTRY("LDT "), /* 0x02 */
1778 STRENTRY("Reserved3 "), /* 0x03 */
1779 STRENTRY("Reserved4 "), /* 0x04 */
1780 STRENTRY("Reserved5 "), /* 0x05 */
1781 STRENTRY("Reserved6 "), /* 0x06 */
1782 STRENTRY("Reserved7 "), /* 0x07 */
1783 STRENTRY("Reserved8 "), /* 0x08 */
1784 STRENTRY("TSS64Avail "), /* 0x09 */
1785 STRENTRY("ReservedA "), /* 0x0a */
1786 STRENTRY("TSS64Busy "), /* 0x0b */
1787 STRENTRY("Call64 "), /* 0x0c */
1788 STRENTRY("ReservedD "), /* 0x0d */
1789 STRENTRY("Int64 "), /* 0x0e */
1790 STRENTRY("Trap64 "), /* 0x0f */
1791# else
1792 STRENTRY("Reserved0 "), /* 0x00 */
1793 STRENTRY("TSS16Avail "), /* 0x01 */
1794 STRENTRY("LDT "), /* 0x02 */
1795 STRENTRY("TSS16Busy "), /* 0x03 */
1796 STRENTRY("Call16 "), /* 0x04 */
1797 STRENTRY("Task "), /* 0x05 */
1798 STRENTRY("Int16 "), /* 0x06 */
1799 STRENTRY("Trap16 "), /* 0x07 */
1800 STRENTRY("Reserved8 "), /* 0x08 */
1801 STRENTRY("TSS32Avail "), /* 0x09 */
1802 STRENTRY("ReservedA "), /* 0x0a */
1803 STRENTRY("TSS32Busy "), /* 0x0b */
1804 STRENTRY("Call32 "), /* 0x0c */
1805 STRENTRY("ReservedD "), /* 0x0d */
1806 STRENTRY("Int32 "), /* 0x0e */
1807 STRENTRY("Trap32 "), /* 0x0f */
1808# endif
1809 /* non system */
1810 STRENTRY("DataRO "), /* 0x10 */
1811 STRENTRY("DataRO Accessed "), /* 0x11 */
1812 STRENTRY("DataRW "), /* 0x12 */
1813 STRENTRY("DataRW Accessed "), /* 0x13 */
1814 STRENTRY("DataDownRO "), /* 0x14 */
1815 STRENTRY("DataDownRO Accessed "), /* 0x15 */
1816 STRENTRY("DataDownRW "), /* 0x16 */
1817 STRENTRY("DataDownRW Accessed "), /* 0x17 */
1818 STRENTRY("CodeEO "), /* 0x18 */
1819 STRENTRY("CodeEO Accessed "), /* 0x19 */
1820 STRENTRY("CodeER "), /* 0x1a */
1821 STRENTRY("CodeER Accessed "), /* 0x1b */
1822 STRENTRY("CodeConfEO "), /* 0x1c */
1823 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
1824 STRENTRY("CodeConfER "), /* 0x1e */
1825 STRENTRY("CodeConfER Accessed ") /* 0x1f */
1826# undef SYSENTRY
1827 };
1828# define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
1829 char szMsg[128];
1830 char *psz = &szMsg[0];
1831 unsigned i = pDesc->Gen.u1DescType << 4 | pDesc->Gen.u4Type;
1832 memcpy(psz, s_aTypes[i].psz, s_aTypes[i].cch);
1833 psz += s_aTypes[i].cch;
1834
1835 if (pDesc->Gen.u1Present)
1836 ADD_STR(psz, "Present ");
1837 else
1838 ADD_STR(psz, "Not-Present ");
1839# if HC_ARCH_BITS == 64
1840 if (pDesc->Gen.u1Long)
1841 ADD_STR(psz, "64-bit ");
1842 else
1843 ADD_STR(psz, "Comp ");
1844# else
1845 if (pDesc->Gen.u1Granularity)
1846 ADD_STR(psz, "Page ");
1847 if (pDesc->Gen.u1DefBig)
1848 ADD_STR(psz, "32-bit ");
1849 else
1850 ADD_STR(psz, "16-bit ");
1851# endif
1852# undef ADD_STR
1853 *psz = '\0';
1854
1855 /*
1856 * Limit and Base and format the output.
1857 */
1858 uint32_t u32Limit = X86DESC_LIMIT(*pDesc);
1859 if (pDesc->Gen.u1Granularity)
1860 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
1861
1862# if HC_ARCH_BITS == 64
1863 uint64_t u32Base = X86DESC64_BASE(*pDesc);
1864
1865 Log(("%s %04x - %RX64 %RX64 - base=%RX64 limit=%08x dpl=%d %s\n", pszMsg,
1866 Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1867# else
1868 uint32_t u32Base = X86DESC_BASE(*pDesc);
1869
1870 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
1871 Sel, pDesc->au32[0], pDesc->au32[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1872# endif
1873}
1874
1875
1876/**
1877 * Formats a full register dump.
1878 *
1879 * @param pVM Pointer to the VM.
1880 * @param pVCpu Pointer to the VMCPU.
1881 * @param pCtx Pointer to the CPU context.
1882 */
1883VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1884{
1885 NOREF(pVM);
1886
1887 /*
1888 * Format the flags.
1889 */
1890 static struct
1891 {
1892 const char *pszSet; const char *pszClear; uint32_t fFlag;
1893 } const s_aFlags[] =
1894 {
1895 { "vip",NULL, X86_EFL_VIP },
1896 { "vif",NULL, X86_EFL_VIF },
1897 { "ac", NULL, X86_EFL_AC },
1898 { "vm", NULL, X86_EFL_VM },
1899 { "rf", NULL, X86_EFL_RF },
1900 { "nt", NULL, X86_EFL_NT },
1901 { "ov", "nv", X86_EFL_OF },
1902 { "dn", "up", X86_EFL_DF },
1903 { "ei", "di", X86_EFL_IF },
1904 { "tf", NULL, X86_EFL_TF },
1905 { "nt", "pl", X86_EFL_SF },
1906 { "nz", "zr", X86_EFL_ZF },
1907 { "ac", "na", X86_EFL_AF },
1908 { "po", "pe", X86_EFL_PF },
1909 { "cy", "nc", X86_EFL_CF },
1910 };
1911 char szEFlags[80];
1912 char *psz = szEFlags;
1913 uint32_t efl = pCtx->eflags.u32;
1914 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1915 {
1916 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1917 if (pszAdd)
1918 {
1919 strcpy(psz, pszAdd);
1920 psz += strlen(pszAdd);
1921 *psz++ = ' ';
1922 }
1923 }
1924 psz[-1] = '\0';
1925
1926
1927 /*
1928 * Format the registers.
1929 */
1930 if (CPUMIsGuestIn64BitCode(pVCpu, CPUMCTX2CORE(pCtx)))
1931 {
1932 Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n"
1933 "rsi=%016RX64 rdi=%016RX64 r8 =%016RX64 r9 =%016RX64\n"
1934 "r10=%016RX64 r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1935 "r14=%016RX64 r15=%016RX64\n"
1936 "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 iopl=%d %*s\n"
1937 "cs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1938 "ds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1939 "es={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1940 "fs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1941 "gs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1942 "ss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1943 "cr0=%016RX64 cr2=%016RX64 cr3=%016RX64 cr4=%016RX64\n"
1944 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64 dr3=%016RX64\n"
1945 "dr4=%016RX64 dr5=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1946 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1947 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1948 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1949 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1950 ,
1951 pCtx->rax, pCtx->rbx, pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi,
1952 pCtx->r8, pCtx->r9, pCtx->r10, pCtx->r11, pCtx->r12, pCtx->r13,
1953 pCtx->r14, pCtx->r15,
1954 pCtx->rip, pCtx->rsp, pCtx->rbp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1955 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u,
1956 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u,
1957 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u,
1958 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u,
1959 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u,
1960 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u,
1961 pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4,
1962 pCtx->dr[0], pCtx->dr[1], pCtx->dr[2], pCtx->dr[3],
1963 pCtx->dr[4], pCtx->dr[5], pCtx->dr[6], pCtx->dr[7],
1964 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1965 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1966 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1967 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1968 }
1969 else
1970 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1971 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1972 "cs={%04x base=%016RX64 limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1973 "ds={%04x base=%016RX64 limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1974 "es={%04x base=%016RX64 limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1975 "fs={%04x base=%016RX64 limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1976 "gs={%04x base=%016RX64 limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1977 "ss={%04x base=%016RX64 limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1978 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1979 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1980 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1981 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1982 ,
1983 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1984 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), 31, szEFlags,
1985 (RTSEL)pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, pCtx->csHid.Attr.u, pCtx->dr[0], pCtx->dr[1],
1986 (RTSEL)pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, pCtx->dsHid.Attr.u, pCtx->dr[2], pCtx->dr[3],
1987 (RTSEL)pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, pCtx->esHid.Attr.u, pCtx->dr[4], pCtx->dr[5],
1988 (RTSEL)pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, pCtx->fsHid.Attr.u, pCtx->dr[6], pCtx->dr[7],
1989 (RTSEL)pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, pCtx->gsHid.Attr.u, pCtx->cr0, pCtx->cr2,
1990 (RTSEL)pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, pCtx->ssHid.Attr.u, pCtx->cr3, pCtx->cr4,
1991 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, efl,
1992 (RTSEL)pCtx->ldtr, pCtx->ldtrHid.u64Base, pCtx->ldtrHid.u32Limit, pCtx->ldtrHid.Attr.u,
1993 (RTSEL)pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
1994 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1995
1996 Log(("FPU:\n"
1997 "FCW=%04x FSW=%04x FTW=%02x\n"
1998 "FOP=%04x FPUIP=%08x CS=%04x Rsrvd1=%04x\n"
1999 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
2000 ,
2001 pCtx->fpu.FCW, pCtx->fpu.FSW, pCtx->fpu.FTW,
2002 pCtx->fpu.FOP, pCtx->fpu.FPUIP, pCtx->fpu.CS, pCtx->fpu.Rsrvd1,
2003 pCtx->fpu.FPUDP, pCtx->fpu.DS, pCtx->fpu.Rsrvd2,
2004 pCtx->fpu.MXCSR, pCtx->fpu.MXCSR_MASK));
2005
2006
2007 Log(("MSR:\n"
2008 "EFER =%016RX64\n"
2009 "PAT =%016RX64\n"
2010 "STAR =%016RX64\n"
2011 "CSTAR =%016RX64\n"
2012 "LSTAR =%016RX64\n"
2013 "SFMASK =%016RX64\n"
2014 "KERNELGSBASE =%016RX64\n",
2015 pCtx->msrEFER,
2016 pCtx->msrPAT,
2017 pCtx->msrSTAR,
2018 pCtx->msrCSTAR,
2019 pCtx->msrLSTAR,
2020 pCtx->msrSFMASK,
2021 pCtx->msrKERNELGSBASE));
2022
2023}
2024
2025#endif /* VBOX_STRICT */
2026
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette