VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp@ 55876

Last change on this file since 55876 was 55863, checked in by vboxsync, 10 years ago

IPRT,SUPDrv,VMM: Revised the context switching hook interface. Do less work when enabling the hook (formerly 'registration'). Drop the reference counting (kept internally for solaris) as it complicates restrictions wrt destroying enabled hooks. Bumped support driver version.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 70.6 KB
Line 
1/* $Id: HMR0.cpp 55863 2015-05-14 18:29:34Z vboxsync $ */
2/** @file
3 * Hardware Assisted Virtualization Manager (HM) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <VBox/vmm/hm.h>
23#include <VBox/vmm/pgm.h>
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/hm_vmx.h>
27#include <VBox/vmm/hm_svm.h>
28#include <VBox/vmm/gim.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/assert.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/cpuset.h>
35#include <iprt/mem.h>
36#include <iprt/memobj.h>
37#include <iprt/once.h>
38#include <iprt/param.h>
39#include <iprt/power.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42#include <iprt/x86.h>
43#include "HMVMXR0.h"
44#include "HMSVMR0.h"
45
46
47/*******************************************************************************
48* Internal Functions *
49*******************************************************************************/
50static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
51static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2);
52static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2);
53static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2);
54static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser);
55static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData);
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * This is used to manage the status code of a RTMpOnAll in HM.
63 */
64typedef struct HMR0FIRSTRC
65{
66 /** The status code. */
67 int32_t volatile rc;
68 /** The ID of the CPU reporting the first failure. */
69 RTCPUID volatile idCpu;
70} HMR0FIRSTRC;
71/** Pointer to a first return code structure. */
72typedef HMR0FIRSTRC *PHMR0FIRSTRC;
73
74
75/*******************************************************************************
76* Global Variables *
77*******************************************************************************/
78/**
79 * Global data.
80 */
81static struct
82{
83 /** Per CPU globals. */
84 HMGLOBALCPUINFO aCpuInfo[RTCPUSET_MAX_CPUS];
85
86 /** @name Ring-0 method table for AMD-V and VT-x specific operations.
87 * @{ */
88 DECLR0CALLBACKMEMBER(int, pfnEnterSession, (PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu));
89 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit));
90 DECLR0CALLBACKMEMBER(int, pfnSaveHostState, (PVM pVM, PVMCPU pVCpu));
91 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode, (PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
92 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
93 bool fEnabledByHost, void *pvArg));
94 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
95 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM));
96 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM));
97 DECLR0CALLBACKMEMBER(int, pfnSetupVM ,(PVM pVM));
98 /** @} */
99
100 /** Maximum ASID allowed. */
101 uint32_t uMaxAsid;
102
103 /** VT-x data. */
104 struct
105 {
106 /** Set to by us to indicate VMX is supported by the CPU. */
107 bool fSupported;
108 /** Whether we're using SUPR0EnableVTx or not. */
109 bool fUsingSUPR0EnableVTx;
110 /** Whether we're using the preemption timer or not. */
111 bool fUsePreemptTimer;
112 /** The shift mask employed by the VMX-Preemption timer. */
113 uint8_t cPreemptTimerShift;
114
115 /** Host CR4 value (set by ring-0 VMX init) */
116 /** @todo This isn't used for anything relevant. Remove later? */
117 uint64_t u64HostCr4;
118
119 /** Host EFER value (set by ring-0 VMX init) */
120 uint64_t u64HostEfer;
121
122 /** VMX MSR values */
123 VMXMSRS Msrs;
124
125 /* Last instruction error */
126 uint32_t ulLastInstrError;
127 } vmx;
128
129 /** AMD-V information. */
130 struct
131 {
132 /* HWCR MSR (for diagnostics) */
133 uint64_t u64MsrHwcr;
134
135 /** SVM revision. */
136 uint32_t u32Rev;
137
138 /** SVM feature bits from cpuid 0x8000000a */
139 uint32_t u32Features;
140
141 /** Set by us to indicate SVM is supported by the CPU. */
142 bool fSupported;
143 } svm;
144 /** Saved error from detection */
145 int32_t lLastError;
146
147 struct
148 {
149 uint32_t u32AMDFeatureECX;
150 uint32_t u32AMDFeatureEDX;
151 } cpuid;
152
153 /** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's
154 * enabled and disabled each time it's used to execute guest code. */
155 bool fGlobalInit;
156 /** Indicates whether the host is suspending or not. We'll refuse a few
157 * actions when the host is being suspended to speed up the suspending and
158 * avoid trouble. */
159 volatile bool fSuspended;
160
161 /** Whether we've already initialized all CPUs.
162 * @remarks We could check the EnableAllCpusOnce state, but this is
163 * simpler and hopefully easier to understand. */
164 bool fEnabled;
165 /** Serialize initialization in HMR0EnableAllCpus. */
166 RTONCE EnableAllCpusOnce;
167} g_HvmR0;
168
169
170
171/**
172 * Initializes a first return code structure.
173 *
174 * @param pFirstRc The structure to init.
175 */
176static void hmR0FirstRcInit(PHMR0FIRSTRC pFirstRc)
177{
178 pFirstRc->rc = VINF_SUCCESS;
179 pFirstRc->idCpu = NIL_RTCPUID;
180}
181
182
183/**
184 * Try set the status code (success ignored).
185 *
186 * @param pFirstRc The first return code structure.
187 * @param rc The status code.
188 */
189static void hmR0FirstRcSetStatus(PHMR0FIRSTRC pFirstRc, int rc)
190{
191 if ( RT_FAILURE(rc)
192 && ASMAtomicCmpXchgS32(&pFirstRc->rc, rc, VINF_SUCCESS))
193 pFirstRc->idCpu = RTMpCpuId();
194}
195
196
197/**
198 * Get the status code of a first return code structure.
199 *
200 * @returns The status code; VINF_SUCCESS or error status, no informational or
201 * warning errors.
202 * @param pFirstRc The first return code structure.
203 */
204static int hmR0FirstRcGetStatus(PHMR0FIRSTRC pFirstRc)
205{
206 return pFirstRc->rc;
207}
208
209
210#ifdef VBOX_STRICT
211/**
212 * Get the CPU ID on which the failure status code was reported.
213 *
214 * @returns The CPU ID, NIL_RTCPUID if no failure was reported.
215 * @param pFirstRc The first return code structure.
216 */
217static RTCPUID hmR0FirstRcGetCpuId(PHMR0FIRSTRC pFirstRc)
218{
219 return pFirstRc->idCpu;
220}
221#endif /* VBOX_STRICT */
222
223
224/** @name Dummy callback handlers.
225 * @{ */
226
227static DECLCALLBACK(int) hmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
228{
229 NOREF(pVM); NOREF(pVCpu); NOREF(pCpu);
230 return VINF_SUCCESS;
231}
232
233static DECLCALLBACK(void) hmR0DummyThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
234{
235 NOREF(enmEvent); NOREF(pVCpu); NOREF(fGlobalInit);
236}
237
238static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
239 bool fEnabledBySystem, void *pvArg)
240{
241 NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem); NOREF(pvArg);
242 return VINF_SUCCESS;
243}
244
245static DECLCALLBACK(int) hmR0DummyDisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
246{
247 NOREF(pCpu); NOREF(pvCpuPage); NOREF(HCPhysCpuPage);
248 return VINF_SUCCESS;
249}
250
251static DECLCALLBACK(int) hmR0DummyInitVM(PVM pVM)
252{
253 NOREF(pVM);
254 return VINF_SUCCESS;
255}
256
257static DECLCALLBACK(int) hmR0DummyTermVM(PVM pVM)
258{
259 NOREF(pVM);
260 return VINF_SUCCESS;
261}
262
263static DECLCALLBACK(int) hmR0DummySetupVM(PVM pVM)
264{
265 NOREF(pVM);
266 return VINF_SUCCESS;
267}
268
269static DECLCALLBACK(int) hmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
270{
271 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
272 return VINF_SUCCESS;
273}
274
275static DECLCALLBACK(int) hmR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)
276{
277 NOREF(pVM); NOREF(pVCpu);
278 return VINF_SUCCESS;
279}
280
281/** @} */
282
283
284/**
285 * Checks if the CPU is subject to the "VMX-Preemption Timer Does Not Count
286 * Down at the Rate Specified" erratum.
287 *
288 * Errata names and related steppings:
289 * - BA86 - D0.
290 * - AAX65 - C2.
291 * - AAU65 - C2, K0.
292 * - AAO95 - B1.
293 * - AAT59 - C2.
294 * - AAK139 - D0.
295 * - AAM126 - C0, C1, D0.
296 * - AAN92 - B1.
297 * - AAJ124 - C0, D0.
298 *
299 * - AAP86 - B1.
300 *
301 * Steppings: B1, C0, C1, C2, D0, K0.
302 *
303 * @returns true if subject to it, false if not.
304 */
305static bool hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum(void)
306{
307 uint32_t u = ASMCpuId_EAX(1);
308 u &= ~(RT_BIT_32(14) | RT_BIT_32(15) | RT_BIT_32(28) | RT_BIT_32(29) | RT_BIT_32(30) | RT_BIT_32(31));
309 if ( u == UINT32_C(0x000206E6) /* 323344.pdf - BA86 - D0 - Intel Xeon Processor 7500 Series */
310 || u == UINT32_C(0x00020652) /* 323056.pdf - AAX65 - C2 - Intel Xeon Processor L3406 */
311 || u == UINT32_C(0x00020652) /* 322814.pdf - AAT59 - C2 - Intel CoreTM i7-600, i5-500, i5-400 and i3-300 Mobile Processor Series */
312 || u == UINT32_C(0x00020652) /* 322911.pdf - AAU65 - C2 - Intel CoreTM i5-600, i3-500 Desktop Processor Series and Intel Pentium Processor G6950 */
313 || u == UINT32_C(0x00020655) /* 322911.pdf - AAU65 - K0 - Intel CoreTM i5-600, i3-500 Desktop Processor Series and Intel Pentium Processor G6950 */
314 || u == UINT32_C(0x000106E5) /* 322373.pdf - AAO95 - B1 - Intel Xeon Processor 3400 Series */
315 || u == UINT32_C(0x000106E5) /* 322166.pdf - AAN92 - B1 - Intel CoreTM i7-800 and i5-700 Desktop Processor Series */
316 || u == UINT32_C(0x000106E5) /* 320767.pdf - AAP86 - B1 - Intel Core i7-900 Mobile Processor Extreme Edition Series, Intel Core i7-800 and i7-700 Mobile Processor Series */
317 || u == UINT32_C(0x000106A0) /* 321333.pdf - AAM126 - C0 - Intel Xeon Processor 3500 Series Specification */
318 || u == UINT32_C(0x000106A1) /* 321333.pdf - AAM126 - C1 - Intel Xeon Processor 3500 Series Specification */
319 || u == UINT32_C(0x000106A4) /* 320836.pdf - AAJ124 - C0 - Intel Core i7-900 Desktop Processor Extreme Edition Series and Intel Core i7-900 Desktop Processor Series */
320 || u == UINT32_C(0x000106A5) /* 321333.pdf - AAM126 - D0 - Intel Xeon Processor 3500 Series Specification */
321 || u == UINT32_C(0x000106A5) /* 321324.pdf - AAK139 - D0 - Intel Xeon Processor 5500 Series Specification */
322 || u == UINT32_C(0x000106A5) /* 320836.pdf - AAJ124 - D0 - Intel Core i7-900 Desktop Processor Extreme Edition Series and Intel Core i7-900 Desktop Processor Series */
323 )
324 return true;
325 return false;
326}
327
328
329/**
330 * Intel specific initialization code.
331 *
332 * @returns VBox status code (will only fail if out of memory).
333 */
334static int hmR0InitIntel(uint32_t u32FeaturesECX, uint32_t u32FeaturesEDX)
335{
336 /*
337 * Check that all the required VT-x features are present.
338 * We also assume all VT-x-enabled CPUs support fxsave/fxrstor.
339 */
340 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
341 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
342 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
343 )
344 {
345 /** @todo move this into a separate function. */
346 g_HvmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
347
348 /*
349 * First try use native kernel API for controlling VT-x.
350 * (This is only supported by some Mac OS X kernels atm.)
351 */
352 int rc = g_HvmR0.lLastError = SUPR0EnableVTx(true /* fEnable */);
353 g_HvmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
354 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
355 {
356 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
357 if (RT_SUCCESS(rc))
358 {
359 g_HvmR0.vmx.fSupported = true;
360 rc = SUPR0EnableVTx(false /* fEnable */);
361 AssertLogRelRC(rc);
362 }
363 }
364 else
365 {
366 /* We need to check if VT-x has been properly initialized on all
367 CPUs. Some BIOSes do a lousy job. */
368 HMR0FIRSTRC FirstRc;
369 hmR0FirstRcInit(&FirstRc);
370 g_HvmR0.lLastError = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
371 if (RT_SUCCESS(g_HvmR0.lLastError))
372 g_HvmR0.lLastError = hmR0FirstRcGetStatus(&FirstRc);
373 }
374 if (RT_SUCCESS(g_HvmR0.lLastError))
375 {
376 /* Reread in case it was changed by hmR0InitIntelCpu(). */
377 g_HvmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
378
379 /*
380 * Read all relevant registers and MSRs.
381 */
382 g_HvmR0.vmx.u64HostCr4 = ASMGetCR4();
383 g_HvmR0.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER);
384 g_HvmR0.vmx.Msrs.u64BasicInfo = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);
385 g_HvmR0.vmx.Msrs.VmxPinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
386 g_HvmR0.vmx.Msrs.VmxProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
387 g_HvmR0.vmx.Msrs.VmxExit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
388 g_HvmR0.vmx.Msrs.VmxEntry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
389 g_HvmR0.vmx.Msrs.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
390 g_HvmR0.vmx.Msrs.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
391 g_HvmR0.vmx.Msrs.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
392 g_HvmR0.vmx.Msrs.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
393 g_HvmR0.vmx.Msrs.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
394 g_HvmR0.vmx.Msrs.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
395 /* VPID 16 bits ASID. */
396 g_HvmR0.uMaxAsid = 0x10000; /* exclusive */
397
398 if (g_HvmR0.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
399 {
400 g_HvmR0.vmx.Msrs.VmxProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
401 if (g_HvmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID))
402 g_HvmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
403
404 if (g_HvmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC)
405 g_HvmR0.vmx.Msrs.u64Vmfunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
406 }
407
408 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
409 {
410 /*
411 * Enter root mode
412 */
413 RTR0MEMOBJ hScatchMemObj;
414 rc = RTR0MemObjAllocCont(&hScatchMemObj, PAGE_SIZE, false /* fExecutable */);
415 if (RT_FAILURE(rc))
416 {
417 LogRel(("hmR0InitIntel: RTR0MemObjAllocCont(,PAGE_SIZE,false) -> %Rrc\n", rc));
418 return rc;
419 }
420
421 void *pvScatchPage = RTR0MemObjAddress(hScatchMemObj);
422 RTHCPHYS HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0);
423 ASMMemZeroPage(pvScatchPage);
424
425 /* Set revision dword at the beginning of the structure. */
426 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_HvmR0.vmx.Msrs.u64BasicInfo);
427
428 /* Make sure we don't get rescheduled to another cpu during this probe. */
429 RTCCUINTREG fFlags = ASMIntDisableFlags();
430
431 /*
432 * Check CR4.VMXE
433 */
434 g_HvmR0.vmx.u64HostCr4 = ASMGetCR4();
435 if (!(g_HvmR0.vmx.u64HostCr4 & X86_CR4_VMXE))
436 {
437 /* In theory this bit could be cleared behind our back. Which would cause
438 #UD faults when we try to execute the VMX instructions... */
439 ASMSetCR4(g_HvmR0.vmx.u64HostCr4 | X86_CR4_VMXE);
440 }
441
442 /*
443 * The only way of checking if we're in VMX root mode or not is to try and enter it.
444 * There is no instruction or control bit that tells us if we're in VMX root mode.
445 * Therefore, try and enter VMX root mode here.
446 */
447 rc = VMXEnable(HCPhysScratchPage);
448 if (RT_SUCCESS(rc))
449 {
450 g_HvmR0.vmx.fSupported = true;
451 VMXDisable();
452 }
453 else
454 {
455 /*
456 * KVM leaves the CPU in VMX root mode. Not only is this not allowed,
457 * it will crash the host when we enter raw mode, because:
458 *
459 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify
460 * this bit), and
461 * (b) turning off paging causes a #GP (unavoidable when switching
462 * from long to 32 bits mode or 32 bits to PAE).
463 *
464 * They should fix their code, but until they do we simply refuse to run.
465 */
466 g_HvmR0.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;
467 Assert(g_HvmR0.vmx.fSupported == false);
468 }
469
470 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set
471 if it wasn't so before (some software could incorrectly
472 think it's in VMX mode). */
473 ASMSetCR4(g_HvmR0.vmx.u64HostCr4);
474 ASMSetFlags(fFlags);
475
476 RTR0MemObjFree(hScatchMemObj, false);
477 }
478
479 if (g_HvmR0.vmx.fSupported)
480 {
481 rc = VMXR0GlobalInit();
482 if (RT_FAILURE(rc))
483 g_HvmR0.lLastError = rc;
484
485 /*
486 * Install the VT-x methods.
487 */
488 g_HvmR0.pfnEnterSession = VMXR0Enter;
489 g_HvmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback;
490 g_HvmR0.pfnSaveHostState = VMXR0SaveHostState;
491 g_HvmR0.pfnRunGuestCode = VMXR0RunGuestCode;
492 g_HvmR0.pfnEnableCpu = VMXR0EnableCpu;
493 g_HvmR0.pfnDisableCpu = VMXR0DisableCpu;
494 g_HvmR0.pfnInitVM = VMXR0InitVM;
495 g_HvmR0.pfnTermVM = VMXR0TermVM;
496 g_HvmR0.pfnSetupVM = VMXR0SetupVM;
497
498 /*
499 * Check for the VMX-Preemption Timer and adjust for the "VMX-Preemption
500 * Timer Does Not Count Down at the Rate Specified" erratum.
501 */
502 if (g_HvmR0.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER)
503 {
504 g_HvmR0.vmx.fUsePreemptTimer = true;
505 g_HvmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HvmR0.vmx.Msrs.u64Misc);
506 if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum())
507 g_HvmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
508 }
509 }
510 }
511#ifdef LOG_ENABLED
512 else
513 SUPR0Printf("hmR0InitIntelCpu failed with rc=%d\n", g_HvmR0.lLastError);
514#endif
515 }
516 else
517 g_HvmR0.lLastError = VERR_VMX_NO_VMX;
518 return VINF_SUCCESS;
519}
520
521
522/**
523 * AMD-specific initialization code.
524 *
525 * @returns VBox status code.
526 */
527static int hmR0InitAmd(uint32_t u32FeaturesEDX, uint32_t uMaxExtLeaf)
528{
529 /*
530 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
531 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
532 */
533 int rc;
534 if ( (g_HvmR0.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
535 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
536 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
537 && ASMIsValidExtRange(uMaxExtLeaf)
538 && uMaxExtLeaf >= 0x8000000a
539 )
540 {
541 /* Call the global AMD-V initialization routine. */
542 rc = SVMR0GlobalInit();
543 if (RT_FAILURE(rc))
544 {
545 g_HvmR0.lLastError = rc;
546 return rc;
547 }
548
549 /*
550 * Install the AMD-V methods.
551 */
552 g_HvmR0.pfnEnterSession = SVMR0Enter;
553 g_HvmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback;
554 g_HvmR0.pfnSaveHostState = SVMR0SaveHostState;
555 g_HvmR0.pfnRunGuestCode = SVMR0RunGuestCode;
556 g_HvmR0.pfnEnableCpu = SVMR0EnableCpu;
557 g_HvmR0.pfnDisableCpu = SVMR0DisableCpu;
558 g_HvmR0.pfnInitVM = SVMR0InitVM;
559 g_HvmR0.pfnTermVM = SVMR0TermVM;
560 g_HvmR0.pfnSetupVM = SVMR0SetupVM;
561
562 /* Query AMD features. */
563 uint32_t u32Dummy;
564 ASMCpuId(0x8000000a, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxAsid, &u32Dummy, &g_HvmR0.svm.u32Features);
565
566 /*
567 * We need to check if AMD-V has been properly initialized on all CPUs.
568 * Some BIOSes might do a poor job.
569 */
570 HMR0FIRSTRC FirstRc;
571 hmR0FirstRcInit(&FirstRc);
572 rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
573 AssertRC(rc);
574 if (RT_SUCCESS(rc))
575 rc = hmR0FirstRcGetStatus(&FirstRc);
576#ifndef DEBUG_bird
577 AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,
578 ("hmR0InitAmdCpu failed for cpu %d with rc=%Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc));
579#endif
580 if (RT_SUCCESS(rc))
581 {
582 /* Read the HWCR MSR for diagnostics. */
583 g_HvmR0.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
584 g_HvmR0.svm.fSupported = true;
585 }
586 else
587 {
588 g_HvmR0.lLastError = rc;
589 if (rc == VERR_SVM_DISABLED || rc == VERR_SVM_IN_USE)
590 rc = VINF_SUCCESS; /* Don't fail if AMD-V is disabled or in use. */
591 }
592 }
593 else
594 {
595 rc = VINF_SUCCESS; /* Don't fail if AMD-V is not supported. See @bugref{6785}. */
596 g_HvmR0.lLastError = VERR_SVM_NO_SVM;
597 }
598 return rc;
599}
600
601
602/**
603 * Does global Ring-0 HM initialization (at module init).
604 *
605 * @returns VBox status code.
606 */
607VMMR0_INT_DECL(int) HMR0Init(void)
608{
609 /*
610 * Initialize the globals.
611 */
612 g_HvmR0.fEnabled = false;
613 static RTONCE s_OnceInit = RTONCE_INITIALIZER;
614 g_HvmR0.EnableAllCpusOnce = s_OnceInit;
615 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
616 {
617 g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
618 g_HvmR0.aCpuInfo[i].idCpu = NIL_RTCPUID;
619 }
620
621 /* Fill in all callbacks with placeholders. */
622 g_HvmR0.pfnEnterSession = hmR0DummyEnter;
623 g_HvmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback;
624 g_HvmR0.pfnSaveHostState = hmR0DummySaveHostState;
625 g_HvmR0.pfnRunGuestCode = hmR0DummyRunGuestCode;
626 g_HvmR0.pfnEnableCpu = hmR0DummyEnableCpu;
627 g_HvmR0.pfnDisableCpu = hmR0DummyDisableCpu;
628 g_HvmR0.pfnInitVM = hmR0DummyInitVM;
629 g_HvmR0.pfnTermVM = hmR0DummyTermVM;
630 g_HvmR0.pfnSetupVM = hmR0DummySetupVM;
631
632 /* Default is global VT-x/AMD-V init. */
633 g_HvmR0.fGlobalInit = true;
634
635 /*
636 * Make sure aCpuInfo is big enough for all the CPUs on this system.
637 */
638 if (RTMpGetArraySize() > RT_ELEMENTS(g_HvmR0.aCpuInfo))
639 {
640 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_HvmR0.aCpuInfo)));
641 return VERR_TOO_MANY_CPUS;
642 }
643
644 /*
645 * Check for VT-x and AMD-V capabilities.
646 */
647 int rc;
648 if (ASMHasCpuId())
649 {
650 /* Standard features. */
651 uint32_t uMaxLeaf, u32VendorEBX, u32VendorECX, u32VendorEDX;
652 ASMCpuId(0, &uMaxLeaf, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
653 if (ASMIsValidStdRange(uMaxLeaf))
654 {
655 uint32_t u32FeaturesECX, u32FeaturesEDX, u32Dummy;
656 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);
657
658 /* Query AMD features. */
659 uint32_t uMaxExtLeaf = ASMCpuId_EAX(0x80000000);
660 if (ASMIsValidExtRange(uMaxExtLeaf))
661 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy,
662 &g_HvmR0.cpuid.u32AMDFeatureECX,
663 &g_HvmR0.cpuid.u32AMDFeatureEDX);
664 else
665 g_HvmR0.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureEDX = 0;
666
667 /* Go to CPU specific initialization code. */
668 if ( ASMIsIntelCpuEx(u32VendorEBX, u32VendorECX, u32VendorEDX)
669 || ASMIsViaCentaurCpuEx(u32VendorEBX, u32VendorECX, u32VendorEDX))
670 {
671 rc = hmR0InitIntel(u32FeaturesECX, u32FeaturesEDX);
672 if (RT_FAILURE(rc))
673 return rc;
674 }
675 else if (ASMIsAmdCpuEx(u32VendorEBX, u32VendorECX, u32VendorEDX))
676 {
677 rc = hmR0InitAmd(u32FeaturesEDX, uMaxExtLeaf);
678 if (RT_FAILURE(rc))
679 return rc;
680 }
681 else
682 g_HvmR0.lLastError = VERR_HM_UNKNOWN_CPU;
683 }
684 else
685 g_HvmR0.lLastError = VERR_HM_UNKNOWN_CPU;
686 }
687 else
688 g_HvmR0.lLastError = VERR_HM_NO_CPUID;
689
690 /*
691 * Register notification callbacks that we can use to disable/enable CPUs
692 * when brought offline/online or suspending/resuming.
693 */
694 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
695 {
696 rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL);
697 AssertRC(rc);
698
699 rc = RTPowerNotificationRegister(hmR0PowerCallback, NULL);
700 AssertRC(rc);
701 }
702
703 /* We return success here because module init shall not fail if HM
704 fails to initialize. */
705 return VINF_SUCCESS;
706}
707
708
709/**
710 * Does global Ring-0 HM termination (at module termination).
711 *
712 * @returns VBox status code.
713 */
714VMMR0_INT_DECL(int) HMR0Term(void)
715{
716 int rc;
717 if ( g_HvmR0.vmx.fSupported
718 && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
719 {
720 /*
721 * Simple if the host OS manages VT-x.
722 */
723 Assert(g_HvmR0.fGlobalInit);
724 rc = SUPR0EnableVTx(false /* fEnable */);
725
726 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo); iCpu++)
727 {
728 g_HvmR0.aCpuInfo[iCpu].fConfigured = false;
729 Assert(g_HvmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);
730 }
731 }
732 else
733 {
734 Assert(!g_HvmR0.vmx.fUsingSUPR0EnableVTx);
735 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx)
736 {
737 /* Doesn't really matter if this fails. */
738 rc = RTMpNotificationDeregister(hmR0MpEventCallback, NULL); AssertRC(rc);
739 rc = RTPowerNotificationDeregister(hmR0PowerCallback, NULL); AssertRC(rc);
740 }
741 else
742 rc = VINF_SUCCESS;
743
744 /*
745 * Disable VT-x/AMD-V on all CPUs if we enabled it before.
746 */
747 if (g_HvmR0.fGlobalInit)
748 {
749 HMR0FIRSTRC FirstRc;
750 hmR0FirstRcInit(&FirstRc);
751 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL /* pvUser 1 */, &FirstRc);
752 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
753 if (RT_SUCCESS(rc))
754 rc = hmR0FirstRcGetStatus(&FirstRc);
755 }
756
757 /*
758 * Free the per-cpu pages used for VT-x and AMD-V.
759 */
760 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
761 {
762 if (g_HvmR0.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)
763 {
764 RTR0MemObjFree(g_HvmR0.aCpuInfo[i].hMemObj, false);
765 g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;
766 }
767 }
768 }
769
770 /** @todo This needs cleaning up. There's no matching
771 * hmR0TermIntel()/hmR0TermAmd() and all the VT-x/AMD-V specific bits
772 * should move into their respective modules. */
773 /* Finally, call global VT-x/AMD-V termination. */
774 if (g_HvmR0.vmx.fSupported)
775 VMXR0GlobalTerm();
776 else if (g_HvmR0.svm.fSupported)
777 SVMR0GlobalTerm();
778
779 return rc;
780}
781
782
783/**
784 * Worker function used by hmR0PowerCallback() and HMR0Init() to initalize VT-x
785 * on a CPU.
786 *
787 * @param idCpu The identifier for the CPU the function is called on.
788 * @param pvUser1 Pointer to the first RC structure.
789 * @param pvUser2 Ignored.
790 */
791static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
792{
793 /** @todo Unify code with SUPR0QueryVTCaps(). */
794 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
795 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
796 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
797 NOREF(idCpu); NOREF(pvUser2);
798
799 int rc = SUPR0GetVmxUsability(NULL /* pfIsSmxModeAmbiguous */);
800 hmR0FirstRcSetStatus(pFirstRc, rc);
801}
802
803
804/**
805 * Worker function used by hmR0PowerCallback() and HMR0Init() to initalize AMD-V
806 * on a CPU.
807 *
808 * @param idCpu The identifier for the CPU the function is called on.
809 * @param pvUser1 Pointer to the first RC structure.
810 * @param pvUser2 Ignored.
811 */
812static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
813{
814 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1;
815 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
816 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
817 NOREF(idCpu); NOREF(pvUser2);
818
819 int rc = SUPR0GetSvmUsability(true /* fInitSvm */);
820 hmR0FirstRcSetStatus(pFirstRc, rc);
821}
822
823
824/**
825 * Enable VT-x or AMD-V on the current CPU
826 *
827 * @returns VBox status code.
828 * @param pVM Pointer to the VM (can be NULL).
829 * @param idCpu The identifier for the CPU the function is called on.
830 *
831 * @remarks Maybe called with interrupts disabled!
832 */
833static int hmR0EnableCpu(PVM pVM, RTCPUID idCpu)
834{
835 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
836
837 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
838 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
839 Assert(!pCpu->fConfigured);
840 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
841
842 pCpu->idCpu = idCpu;
843 /* Do NOT reset cTlbFlushes here, see @bugref{6255}. */
844
845 int rc;
846 if (g_HvmR0.vmx.fSupported && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
847 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HvmR0.vmx.Msrs);
848 else
849 {
850 AssertLogRelMsgReturn(pCpu->hMemObj != NIL_RTR0MEMOBJ, ("hmR0EnableCpu failed idCpu=%u.\n", idCpu), VERR_HM_IPE_1);
851 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
852 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0 /* iPage */);
853
854 if (g_HvmR0.vmx.fSupported)
855 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs);
856 else
857 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, NULL /* pvArg */);
858 }
859 if (RT_SUCCESS(rc))
860 pCpu->fConfigured = true;
861
862 return rc;
863}
864
865
866/**
867 * Worker function passed to RTMpOnAll() that is to be called on all CPUs.
868 *
869 * @param idCpu The identifier for the CPU the function is called on.
870 * @param pvUser1 Opaque pointer to the VM (can be NULL!).
871 * @param pvUser2 The 2nd user argument.
872 */
873static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
874{
875 PVM pVM = (PVM)pvUser1; /* can be NULL! */
876 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2;
877 AssertReturnVoid(g_HvmR0.fGlobalInit);
878 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
879 hmR0FirstRcSetStatus(pFirstRc, hmR0EnableCpu(pVM, idCpu));
880}
881
882
883/**
884 * RTOnce callback employed by HMR0EnableAllCpus.
885 *
886 * @returns VBox status code.
887 * @param pvUser Pointer to the VM.
888 * @param pvUserIgnore NULL, ignored.
889 */
890static DECLCALLBACK(int32_t) hmR0EnableAllCpuOnce(void *pvUser)
891{
892 PVM pVM = (PVM)pvUser;
893
894 /*
895 * Indicate that we've initialized.
896 *
897 * Note! There is a potential race between this function and the suspend
898 * notification. Kind of unlikely though, so ignored for now.
899 */
900 AssertReturn(!g_HvmR0.fEnabled, VERR_HM_ALREADY_ENABLED_IPE);
901 ASMAtomicWriteBool(&g_HvmR0.fEnabled, true);
902
903 /*
904 * The global init variable is set by the first VM.
905 */
906 g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit;
907
908#ifdef VBOX_STRICT
909 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
910 {
911 Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
912 Assert(!g_HvmR0.aCpuInfo[i].fConfigured);
913 Assert(!g_HvmR0.aCpuInfo[i].cTlbFlushes);
914 Assert(!g_HvmR0.aCpuInfo[i].uCurrentAsid);
915 }
916#endif
917
918 int rc;
919 if ( g_HvmR0.vmx.fSupported
920 && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
921 {
922 /*
923 * Global VT-x initialization API (only darwin for now).
924 */
925 rc = SUPR0EnableVTx(true /* fEnable */);
926 if (RT_SUCCESS(rc))
927 {
928 /* If the host provides a VT-x init API, then we'll rely on that for global init. */
929 g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true;
930 }
931 else
932 AssertMsgFailed(("hmR0EnableAllCpuOnce/SUPR0EnableVTx: rc=%Rrc\n", rc));
933 }
934 else
935 {
936 /*
937 * We're doing the job ourselves.
938 */
939 /* Allocate one page per cpu for the global VT-x and AMD-V pages */
940 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
941 {
942 Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
943
944 if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i)))
945 {
946 rc = RTR0MemObjAllocCont(&g_HvmR0.aCpuInfo[i].hMemObj, PAGE_SIZE, false /* executable R0 mapping */);
947 AssertLogRelRCReturn(rc, rc);
948
949 void *pvR0 = RTR0MemObjAddress(g_HvmR0.aCpuInfo[i].hMemObj); Assert(pvR0);
950 ASMMemZeroPage(pvR0);
951 }
952 }
953
954 rc = VINF_SUCCESS;
955 }
956
957 if ( RT_SUCCESS(rc)
958 && g_HvmR0.fGlobalInit)
959 {
960 /* First time, so initialize each cpu/core. */
961 HMR0FIRSTRC FirstRc;
962 hmR0FirstRcInit(&FirstRc);
963 rc = RTMpOnAll(hmR0EnableCpuCallback, (void *)pVM, &FirstRc);
964 if (RT_SUCCESS(rc))
965 rc = hmR0FirstRcGetStatus(&FirstRc);
966 }
967
968 return rc;
969}
970
971
972/**
973 * Sets up HM on all cpus.
974 *
975 * @returns VBox status code.
976 * @param pVM Pointer to the VM.
977 */
978VMMR0_INT_DECL(int) HMR0EnableAllCpus(PVM pVM)
979{
980 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
981 if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
982 return VERR_HM_SUSPEND_PENDING;
983
984 return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM);
985}
986
987
988/**
989 * Disable VT-x or AMD-V on the current CPU.
990 *
991 * @returns VBox status code.
992 * @param idCpu The identifier for the CPU this function is called on.
993 *
994 * @remarks Must be called with preemption disabled.
995 */
996static int hmR0DisableCpu(RTCPUID idCpu)
997{
998 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
999
1000 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
1001 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1002 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
1003 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
1004 Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ);
1005 AssertRelease(idCpu == RTMpCpuId());
1006
1007 if (pCpu->hMemObj == NIL_RTR0MEMOBJ)
1008 return pCpu->fConfigured ? VERR_NO_MEMORY : VINF_SUCCESS /* not initialized. */;
1009
1010 int rc;
1011 if (pCpu->fConfigured)
1012 {
1013 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
1014 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
1015
1016 rc = g_HvmR0.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
1017 AssertRCReturn(rc, rc);
1018
1019 pCpu->fConfigured = false;
1020 pCpu->idCpu = NIL_RTCPUID;
1021 }
1022 else
1023 rc = VINF_SUCCESS; /* nothing to do */
1024 return rc;
1025}
1026
1027
1028/**
1029 * Worker function passed to RTMpOnAll() that is to be called on the target
1030 * CPUs.
1031 *
1032 * @param idCpu The identifier for the CPU the function is called on.
1033 * @param pvUser1 The 1st user argument.
1034 * @param pvUser2 Opaque pointer to the FirstRc.
1035 */
1036static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1037{
1038 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; NOREF(pvUser1);
1039 AssertReturnVoid(g_HvmR0.fGlobalInit);
1040 hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu));
1041}
1042
1043
1044/**
1045 * Worker function passed to RTMpOnSpecific() that is to be called on the target
1046 * CPU.
1047 *
1048 * @param idCpu The identifier for the CPU the function is called on.
1049 * @param pvUser1 Null, not used.
1050 * @param pvUser2 Null, not used.
1051 */
1052static DECLCALLBACK(void) hmR0DisableCpuOnSpecificCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1053{
1054 NOREF(pvUser1);
1055 NOREF(pvUser2);
1056 hmR0DisableCpu(idCpu);
1057}
1058
1059
1060/**
1061 * Callback function invoked when a cpu goes online or offline.
1062 *
1063 * @param enmEvent The Mp event.
1064 * @param idCpu The identifier for the CPU the function is called on.
1065 * @param pvData Opaque data (PVM pointer).
1066 */
1067static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData)
1068{
1069 NOREF(pvData);
1070
1071 /*
1072 * We only care about uninitializing a CPU that is going offline. When a
1073 * CPU comes online, the initialization is done lazily in HMR0Enter().
1074 */
1075 switch (enmEvent)
1076 {
1077 case RTMPEVENT_OFFLINE:
1078 {
1079 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1080 RTThreadPreemptDisable(&PreemptState);
1081 if (idCpu == RTMpCpuId())
1082 {
1083 int rc = hmR0DisableCpu(idCpu);
1084 AssertRC(rc);
1085 RTThreadPreemptRestore(&PreemptState);
1086 }
1087 else
1088 {
1089 RTThreadPreemptRestore(&PreemptState);
1090 RTMpOnSpecific(idCpu, hmR0DisableCpuOnSpecificCallback, NULL /* pvUser1 */, NULL /* pvUser2 */);
1091 }
1092 break;
1093 }
1094
1095 default:
1096 break;
1097 }
1098}
1099
1100
1101/**
1102 * Called whenever a system power state change occurs.
1103 *
1104 * @param enmEvent The Power event.
1105 * @param pvUser User argument.
1106 */
1107static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)
1108{
1109 NOREF(pvUser);
1110 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx);
1111
1112#ifdef LOG_ENABLED
1113 if (enmEvent == RTPOWEREVENT_SUSPEND)
1114 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_SUSPEND\n");
1115 else
1116 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_RESUME\n");
1117#endif
1118
1119 if (enmEvent == RTPOWEREVENT_SUSPEND)
1120 ASMAtomicWriteBool(&g_HvmR0.fSuspended, true);
1121
1122 if (g_HvmR0.fEnabled)
1123 {
1124 int rc;
1125 HMR0FIRSTRC FirstRc;
1126 hmR0FirstRcInit(&FirstRc);
1127
1128 if (enmEvent == RTPOWEREVENT_SUSPEND)
1129 {
1130 if (g_HvmR0.fGlobalInit)
1131 {
1132 /* Turn off VT-x or AMD-V on all CPUs. */
1133 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL /* pvUser 1 */, &FirstRc);
1134 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1135 }
1136 /* else nothing to do here for the local init case */
1137 }
1138 else
1139 {
1140 /* Reinit the CPUs from scratch as the suspend state might have
1141 messed with the MSRs. (lousy BIOSes as usual) */
1142 if (g_HvmR0.vmx.fSupported)
1143 rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
1144 else
1145 rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL);
1146 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1147 if (RT_SUCCESS(rc))
1148 rc = hmR0FirstRcGetStatus(&FirstRc);
1149#ifdef LOG_ENABLED
1150 if (RT_FAILURE(rc))
1151 SUPR0Printf("hmR0PowerCallback hmR0InitXxxCpu failed with %Rc\n", rc);
1152#endif
1153 if (g_HvmR0.fGlobalInit)
1154 {
1155 /* Turn VT-x or AMD-V back on on all CPUs. */
1156 rc = RTMpOnAll(hmR0EnableCpuCallback, NULL /* pVM */, &FirstRc /* output ignored */);
1157 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
1158 }
1159 /* else nothing to do here for the local init case */
1160 }
1161 }
1162
1163 if (enmEvent == RTPOWEREVENT_RESUME)
1164 ASMAtomicWriteBool(&g_HvmR0.fSuspended, false);
1165}
1166
1167
1168/**
1169 * Does ring-0 per-VM HM initialization.
1170 *
1171 * This will copy HM global into the VM structure and call the CPU specific
1172 * init routine which will allocate resources for each virtual CPU and such.
1173 *
1174 * @returns VBox status code.
1175 * @param pVM Pointer to the VM.
1176 *
1177 * @remarks This is called after HMR3Init(), see vmR3CreateU() and
1178 * vmR3InitRing3().
1179 */
1180VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM)
1181{
1182 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1183
1184#ifdef LOG_ENABLED
1185 SUPR0Printf("HMR0InitVM: %p\n", pVM);
1186#endif
1187
1188 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
1189 if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
1190 return VERR_HM_SUSPEND_PENDING;
1191
1192 /*
1193 * Copy globals to the VM structure.
1194 */
1195 pVM->hm.s.vmx.fSupported = g_HvmR0.vmx.fSupported;
1196 pVM->hm.s.svm.fSupported = g_HvmR0.svm.fSupported;
1197
1198 pVM->hm.s.vmx.fUsePreemptTimer &= g_HvmR0.vmx.fUsePreemptTimer; /* Can be overridden by CFGM. See HMR3Init(). */
1199 pVM->hm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift;
1200 pVM->hm.s.vmx.u64HostCr4 = g_HvmR0.vmx.u64HostCr4;
1201 pVM->hm.s.vmx.u64HostEfer = g_HvmR0.vmx.u64HostEfer;
1202 pVM->hm.s.vmx.Msrs = g_HvmR0.vmx.Msrs;
1203 pVM->hm.s.svm.u64MsrHwcr = g_HvmR0.svm.u64MsrHwcr;
1204 pVM->hm.s.svm.u32Rev = g_HvmR0.svm.u32Rev;
1205 pVM->hm.s.svm.u32Features = g_HvmR0.svm.u32Features;
1206 pVM->hm.s.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureECX;
1207 pVM->hm.s.cpuid.u32AMDFeatureEDX = g_HvmR0.cpuid.u32AMDFeatureEDX;
1208 pVM->hm.s.lLastError = g_HvmR0.lLastError;
1209 pVM->hm.s.uMaxAsid = g_HvmR0.uMaxAsid;
1210
1211 if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */
1212 {
1213 pVM->hm.s.cMaxResumeLoops = 1024;
1214 if (RTThreadPreemptIsPendingTrusty())
1215 pVM->hm.s.cMaxResumeLoops = 8192;
1216 }
1217
1218 /*
1219 * Initialize some per-VCPU fields.
1220 */
1221 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1222 {
1223 PVMCPU pVCpu = &pVM->aCpus[i];
1224 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
1225 pVCpu->hm.s.idLastCpu = NIL_RTCPUID;
1226 pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu);
1227
1228 /* We'll aways increment this the first time (host uses ASID 0). */
1229 AssertReturn(!pVCpu->hm.s.uCurrentAsid, VERR_HM_IPE_3);
1230 }
1231
1232 pVM->hm.s.uHostKernelFeatures = SUPR0GetKernelFeatures();
1233
1234 /*
1235 * Call the hardware specific initialization method.
1236 */
1237 return g_HvmR0.pfnInitVM(pVM);
1238}
1239
1240
1241/**
1242 * Does ring-0 per VM HM termination.
1243 *
1244 * @returns VBox status code.
1245 * @param pVM Pointer to the VM.
1246 */
1247VMMR0_INT_DECL(int) HMR0TermVM(PVM pVM)
1248{
1249 Log(("HMR0TermVM: %p\n", pVM));
1250 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1251
1252 /*
1253 * Call the hardware specific method.
1254 *
1255 * Note! We might be preparing for a suspend, so the pfnTermVM() functions should probably not
1256 * mess with VT-x/AMD-V features on the CPU, currently all they do is free memory so this is safe.
1257 */
1258 return g_HvmR0.pfnTermVM(pVM);
1259}
1260
1261
1262/**
1263 * Sets up a VT-x or AMD-V session.
1264 *
1265 * This is mostly about setting up the hardware VM state.
1266 *
1267 * @returns VBox status code.
1268 * @param pVM Pointer to the VM.
1269 */
1270VMMR0_INT_DECL(int) HMR0SetupVM(PVM pVM)
1271{
1272 Log(("HMR0SetupVM: %p\n", pVM));
1273 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1274
1275 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
1276 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
1277
1278 /* On first entry we'll sync everything. */
1279 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1280 HMCPU_CF_RESET_TO(&pVM->aCpus[i], HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
1281
1282 /*
1283 * Call the hardware specific setup VM method. This requires the CPU to be
1284 * enabled for AMD-V/VT-x and preemption to be prevented.
1285 */
1286 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1287 RTThreadPreemptDisable(&PreemptState);
1288 RTCPUID idCpu = RTMpCpuId();
1289
1290 /* Enable VT-x or AMD-V if local init is required. */
1291 int rc;
1292 if (!g_HvmR0.fGlobalInit)
1293 {
1294 rc = hmR0EnableCpu(pVM, idCpu);
1295 if (RT_FAILURE(rc))
1296 {
1297 RTThreadPreemptRestore(&PreemptState);
1298 return rc;
1299 }
1300 }
1301
1302 /* Setup VT-x or AMD-V. */
1303 rc = g_HvmR0.pfnSetupVM(pVM);
1304
1305 /* Disable VT-x or AMD-V if local init was done before. */
1306 if (!g_HvmR0.fGlobalInit)
1307 {
1308 int rc2 = hmR0DisableCpu(idCpu);
1309 AssertRC(rc2);
1310 }
1311
1312 RTThreadPreemptRestore(&PreemptState);
1313 return rc;
1314}
1315
1316
1317/**
1318 * Turns on HM on the CPU if necessary and initializes the bare minimum state
1319 * required for entering HM context.
1320 *
1321 * @returns VBox status code.
1322 * @param pvCpu Pointer to the VMCPU.
1323 *
1324 * @remarks No-long-jump zone!!!
1325 */
1326VMMR0_INT_DECL(int) HMR0EnterCpu(PVMCPU pVCpu)
1327{
1328 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1329
1330 int rc = VINF_SUCCESS;
1331 RTCPUID idCpu = RTMpCpuId();
1332 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
1333 AssertPtr(pCpu);
1334
1335 /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
1336 if (!pCpu->fConfigured)
1337 rc = hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
1338
1339 /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */
1340 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE);
1341
1342 Assert(pCpu->idCpu == idCpu && pCpu->idCpu != NIL_RTCPUID);
1343 pVCpu->hm.s.idEnteredCpu = idCpu;
1344 return rc;
1345}
1346
1347
1348/**
1349 * Enters the VT-x or AMD-V session.
1350 *
1351 * @returns VBox status code.
1352 * @param pVM Pointer to the VM.
1353 * @param pVCpu Pointer to the VMCPU.
1354 *
1355 * @remarks This is called with preemption disabled.
1356 */
1357VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)
1358{
1359 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
1360 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
1361 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1362
1363 /* Load the bare minimum state required for entering HM. */
1364 int rc = HMR0EnterCpu(pVCpu);
1365 AssertRCReturn(rc, rc);
1366
1367#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1368 AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_5);
1369 bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
1370#endif
1371
1372 RTCPUID idCpu = RTMpCpuId();
1373 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
1374 Assert(pCpu);
1375 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
1376
1377 rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
1378 AssertMsgRCReturn(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
1379
1380 /* Load the host-state as we may be resuming code after a longjmp and quite
1381 possibly now be scheduled on a different CPU. */
1382 rc = g_HvmR0.pfnSaveHostState(pVM, pVCpu);
1383 AssertMsgRCReturn(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
1384
1385#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1386 if (fStartedSet)
1387 PGMRZDynMapReleaseAutoSet(pVCpu);
1388#endif
1389
1390 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
1391 if (RT_FAILURE(rc))
1392 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
1393 return rc;
1394}
1395
1396
1397/**
1398 * Deinitializes the bare minimum state used for HM context and if necessary
1399 * disable HM on the CPU.
1400 *
1401 * @returns VBox status code.
1402 * @param pVCpu Pointer to the VMCPU.
1403 *
1404 * @remarks No-long-jump zone!!!
1405 */
1406VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu)
1407{
1408 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1409 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_HM_WRONG_CPU);
1410
1411 RTCPUID idCpu = RTMpCpuId();
1412 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
1413
1414 if ( !g_HvmR0.fGlobalInit
1415 && pCpu->fConfigured)
1416 {
1417 int rc = hmR0DisableCpu(idCpu);
1418 AssertRCReturn(rc, rc);
1419 Assert(!pCpu->fConfigured);
1420 Assert(pCpu->idCpu == NIL_RTCPUID);
1421
1422 /* For obtaining a non-zero ASID/VPID on next re-entry. */
1423 pVCpu->hm.s.idLastCpu = NIL_RTCPUID;
1424 }
1425
1426 /* Clear it while leaving HM context, hmPokeCpuForTlbFlush() relies on this. */
1427 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
1428
1429 return VINF_SUCCESS;
1430}
1431
1432
1433/**
1434 * Thread-context hook for HM.
1435 *
1436 * @param enmEvent The thread-context event.
1437 * @param pvUser Opaque pointer to the VMCPU.
1438 */
1439VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
1440{
1441 PVMCPU pVCpu = (PVMCPU)pvUser;
1442 Assert(pVCpu);
1443 Assert(g_HvmR0.pfnThreadCtxCallback);
1444
1445 g_HvmR0.pfnThreadCtxCallback(enmEvent, pVCpu, g_HvmR0.fGlobalInit);
1446}
1447
1448
1449/**
1450 * Runs guest code in a hardware accelerated VM.
1451 *
1452 * @returns VBox status code.
1453 * @param pVM Pointer to the VM.
1454 * @param pVCpu Pointer to the VMCPU.
1455 *
1456 * @remarks Can be called with preemption enabled if thread-context hooks are
1457 * used!!!
1458 */
1459VMMR0_INT_DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
1460{
1461#ifdef VBOX_STRICT
1462 /* With thread-context hooks we would be running this code with preemption enabled. */
1463 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
1464 {
1465 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[RTMpCpuId()];
1466 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
1467 Assert(pCpu->fConfigured);
1468 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
1469 }
1470#endif
1471
1472#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1473 AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_4);
1474 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1475 PGMRZDynMapStartAutoSet(pVCpu);
1476#endif
1477
1478 int rc = g_HvmR0.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu));
1479
1480#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1481 PGMRZDynMapReleaseAutoSet(pVCpu);
1482#endif
1483 return rc;
1484}
1485
1486#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1487
1488/**
1489 * Save guest FPU/XMM state (64 bits guest mode & 32 bits host only)
1490 *
1491 * @returns VBox status code.
1492 * @param pVM Pointer to the VM.
1493 * @param pVCpu Pointer to the VMCPU.
1494 * @param pCtx Pointer to the guest CPU context.
1495 */
1496VMMR0_INT_DECL(int) HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1497{
1498 STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack);
1499 if (pVM->hm.s.vmx.fSupported)
1500 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL);
1501 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL);
1502}
1503
1504
1505/**
1506 * Save guest debug state (64 bits guest mode & 32 bits host only)
1507 *
1508 * @returns VBox status code.
1509 * @param pVM Pointer to the VM.
1510 * @param pVCpu Pointer to the VMCPU.
1511 * @param pCtx Pointer to the guest CPU context.
1512 */
1513VMMR0_INT_DECL(int) HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1514{
1515 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack);
1516 if (pVM->hm.s.vmx.fSupported)
1517 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL);
1518 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL);
1519}
1520
1521
1522/**
1523 * Test the 32->64 bits switcher.
1524 *
1525 * @returns VBox status code.
1526 * @param pVM Pointer to the VM.
1527 */
1528VMMR0_INT_DECL(int) HMR0TestSwitcher3264(PVM pVM)
1529{
1530 PVMCPU pVCpu = &pVM->aCpus[0];
1531 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1532 uint32_t aParam[5] = {0, 1, 2, 3, 4};
1533 int rc;
1534
1535 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
1536 if (pVM->hm.s.vmx.fSupported)
1537 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]);
1538 else
1539 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]);
1540 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
1541
1542 return rc;
1543}
1544
1545#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
1546
1547/**
1548 * Returns suspend status of the host.
1549 *
1550 * @returns Suspend pending or not.
1551 */
1552VMMR0_INT_DECL(bool) HMR0SuspendPending(void)
1553{
1554 return ASMAtomicReadBool(&g_HvmR0.fSuspended);
1555}
1556
1557
1558/**
1559 * Returns the cpu structure for the current cpu.
1560 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1561 *
1562 * @returns The cpu structure pointer.
1563 */
1564VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpu(void)
1565{
1566 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1567 RTCPUID idCpu = RTMpCpuId();
1568 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
1569 return &g_HvmR0.aCpuInfo[idCpu];
1570}
1571
1572
1573/**
1574 * Returns the cpu structure for the current cpu.
1575 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
1576 *
1577 * @returns The cpu structure pointer.
1578 * @param idCpu id of the VCPU.
1579 */
1580VMMR0DECL(PHMGLOBALCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu)
1581{
1582 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
1583 return &g_HvmR0.aCpuInfo[idCpu];
1584}
1585
1586
1587/**
1588 * Save a pending IO read.
1589 *
1590 * @param pVCpu Pointer to the VMCPU.
1591 * @param GCPtrRip Address of IO instruction.
1592 * @param GCPtrRipNext Address of the next instruction.
1593 * @param uPort Port address.
1594 * @param uAndVal AND mask for saving the result in eax.
1595 * @param cbSize Read size.
1596 */
1597VMMR0_INT_DECL(void) HMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext,
1598 unsigned uPort, unsigned uAndVal, unsigned cbSize)
1599{
1600 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_PORT_READ;
1601 pVCpu->hm.s.PendingIO.GCPtrRip = GCPtrRip;
1602 pVCpu->hm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;
1603 pVCpu->hm.s.PendingIO.s.Port.uPort = uPort;
1604 pVCpu->hm.s.PendingIO.s.Port.uAndVal = uAndVal;
1605 pVCpu->hm.s.PendingIO.s.Port.cbSize = cbSize;
1606 return;
1607}
1608
1609
1610/**
1611 * Save a pending IO write.
1612 *
1613 * @param pVCpu Pointer to the VMCPU.
1614 * @param GCPtrRIP Address of IO instruction.
1615 * @param uPort Port address.
1616 * @param uAndVal AND mask for fetching the result from eax.
1617 * @param cbSize Read size.
1618 */
1619VMMR0_INT_DECL(void) HMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext,
1620 unsigned uPort, unsigned uAndVal, unsigned cbSize)
1621{
1622 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_PORT_WRITE;
1623 pVCpu->hm.s.PendingIO.GCPtrRip = GCPtrRip;
1624 pVCpu->hm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;
1625 pVCpu->hm.s.PendingIO.s.Port.uPort = uPort;
1626 pVCpu->hm.s.PendingIO.s.Port.uAndVal = uAndVal;
1627 pVCpu->hm.s.PendingIO.s.Port.cbSize = cbSize;
1628 return;
1629}
1630
1631
1632/**
1633 * Raw-mode switcher hook - disable VT-x if it's active *and* the current
1634 * switcher turns off paging.
1635 *
1636 * @returns VBox status code.
1637 * @param pVM Pointer to the VM.
1638 * @param enmSwitcher The switcher we're about to use.
1639 * @param pfVTxDisabled Where to store whether VT-x was disabled or not.
1640 */
1641VMMR0_INT_DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled)
1642{
1643 NOREF(pVM);
1644
1645 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1646
1647 *pfVTxDisabled = false;
1648
1649 /* No such issues with AMD-V */
1650 if (!g_HvmR0.vmx.fSupported)
1651 return VINF_SUCCESS;
1652
1653 /* Check if the swithcing we're up to is safe. */
1654 switch (enmSwitcher)
1655 {
1656 case VMMSWITCHER_32_TO_32:
1657 case VMMSWITCHER_PAE_TO_PAE:
1658 return VINF_SUCCESS; /* safe switchers as they don't turn off paging */
1659
1660 case VMMSWITCHER_32_TO_PAE:
1661 case VMMSWITCHER_PAE_TO_32: /* is this one actually used?? */
1662 case VMMSWITCHER_AMD64_TO_32:
1663 case VMMSWITCHER_AMD64_TO_PAE:
1664 break; /* unsafe switchers */
1665
1666 default:
1667 AssertFailedReturn(VERR_HM_WRONG_SWITCHER);
1668 }
1669
1670 /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x,
1671 regardless of whether we're currently using VT-x or not. */
1672 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
1673 {
1674 *pfVTxDisabled = SUPR0SuspendVTxOnCpu();
1675 return VINF_SUCCESS;
1676 }
1677
1678 /** @todo Check if this code is presumtive wrt other VT-x users on the
1679 * system... */
1680
1681 /* Nothing to do if we haven't enabled VT-x. */
1682 if (!g_HvmR0.fEnabled)
1683 return VINF_SUCCESS;
1684
1685 /* Local init implies the CPU is currently not in VMX root mode. */
1686 if (!g_HvmR0.fGlobalInit)
1687 return VINF_SUCCESS;
1688
1689 /* Ok, disable VT-x. */
1690 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1691 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2);
1692
1693 *pfVTxDisabled = true;
1694 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
1695 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
1696 return VMXR0DisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);
1697}
1698
1699
1700/**
1701 * Raw-mode switcher hook - re-enable VT-x if was active *and* the current
1702 * switcher turned off paging.
1703 *
1704 * @param pVM Pointer to the VM.
1705 * @param fVTxDisabled Whether VT-x was disabled or not.
1706 */
1707VMMR0_INT_DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
1708{
1709 Assert(!ASMIntAreEnabled());
1710
1711 if (!fVTxDisabled)
1712 return; /* nothing to do */
1713
1714 Assert(g_HvmR0.vmx.fSupported);
1715 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
1716 SUPR0ResumeVTxOnCpu(fVTxDisabled);
1717 else
1718 {
1719 Assert(g_HvmR0.fEnabled);
1720 Assert(g_HvmR0.fGlobalInit);
1721
1722 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1723 AssertReturnVoid(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ);
1724
1725 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj);
1726 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
1727 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs);
1728 }
1729}
1730
1731#ifdef VBOX_STRICT
1732
1733/**
1734 * Dumps a descriptor.
1735 *
1736 * @param pDesc Descriptor to dump.
1737 * @param Sel Selector number.
1738 * @param pszMsg Message to prepend the log entry with.
1739 */
1740VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
1741{
1742 /*
1743 * Make variable description string.
1744 */
1745 static struct
1746 {
1747 unsigned cch;
1748 const char *psz;
1749 } const s_aTypes[32] =
1750 {
1751# define STRENTRY(str) { sizeof(str) - 1, str }
1752
1753 /* system */
1754# if HC_ARCH_BITS == 64
1755 STRENTRY("Reserved0 "), /* 0x00 */
1756 STRENTRY("Reserved1 "), /* 0x01 */
1757 STRENTRY("LDT "), /* 0x02 */
1758 STRENTRY("Reserved3 "), /* 0x03 */
1759 STRENTRY("Reserved4 "), /* 0x04 */
1760 STRENTRY("Reserved5 "), /* 0x05 */
1761 STRENTRY("Reserved6 "), /* 0x06 */
1762 STRENTRY("Reserved7 "), /* 0x07 */
1763 STRENTRY("Reserved8 "), /* 0x08 */
1764 STRENTRY("TSS64Avail "), /* 0x09 */
1765 STRENTRY("ReservedA "), /* 0x0a */
1766 STRENTRY("TSS64Busy "), /* 0x0b */
1767 STRENTRY("Call64 "), /* 0x0c */
1768 STRENTRY("ReservedD "), /* 0x0d */
1769 STRENTRY("Int64 "), /* 0x0e */
1770 STRENTRY("Trap64 "), /* 0x0f */
1771# else
1772 STRENTRY("Reserved0 "), /* 0x00 */
1773 STRENTRY("TSS16Avail "), /* 0x01 */
1774 STRENTRY("LDT "), /* 0x02 */
1775 STRENTRY("TSS16Busy "), /* 0x03 */
1776 STRENTRY("Call16 "), /* 0x04 */
1777 STRENTRY("Task "), /* 0x05 */
1778 STRENTRY("Int16 "), /* 0x06 */
1779 STRENTRY("Trap16 "), /* 0x07 */
1780 STRENTRY("Reserved8 "), /* 0x08 */
1781 STRENTRY("TSS32Avail "), /* 0x09 */
1782 STRENTRY("ReservedA "), /* 0x0a */
1783 STRENTRY("TSS32Busy "), /* 0x0b */
1784 STRENTRY("Call32 "), /* 0x0c */
1785 STRENTRY("ReservedD "), /* 0x0d */
1786 STRENTRY("Int32 "), /* 0x0e */
1787 STRENTRY("Trap32 "), /* 0x0f */
1788# endif
1789 /* non system */
1790 STRENTRY("DataRO "), /* 0x10 */
1791 STRENTRY("DataRO Accessed "), /* 0x11 */
1792 STRENTRY("DataRW "), /* 0x12 */
1793 STRENTRY("DataRW Accessed "), /* 0x13 */
1794 STRENTRY("DataDownRO "), /* 0x14 */
1795 STRENTRY("DataDownRO Accessed "), /* 0x15 */
1796 STRENTRY("DataDownRW "), /* 0x16 */
1797 STRENTRY("DataDownRW Accessed "), /* 0x17 */
1798 STRENTRY("CodeEO "), /* 0x18 */
1799 STRENTRY("CodeEO Accessed "), /* 0x19 */
1800 STRENTRY("CodeER "), /* 0x1a */
1801 STRENTRY("CodeER Accessed "), /* 0x1b */
1802 STRENTRY("CodeConfEO "), /* 0x1c */
1803 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
1804 STRENTRY("CodeConfER "), /* 0x1e */
1805 STRENTRY("CodeConfER Accessed ") /* 0x1f */
1806# undef SYSENTRY
1807 };
1808# define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
1809 char szMsg[128];
1810 char *psz = &szMsg[0];
1811 unsigned i = pDesc->Gen.u1DescType << 4 | pDesc->Gen.u4Type;
1812 memcpy(psz, s_aTypes[i].psz, s_aTypes[i].cch);
1813 psz += s_aTypes[i].cch;
1814
1815 if (pDesc->Gen.u1Present)
1816 ADD_STR(psz, "Present ");
1817 else
1818 ADD_STR(psz, "Not-Present ");
1819# if HC_ARCH_BITS == 64
1820 if (pDesc->Gen.u1Long)
1821 ADD_STR(psz, "64-bit ");
1822 else
1823 ADD_STR(psz, "Comp ");
1824# else
1825 if (pDesc->Gen.u1Granularity)
1826 ADD_STR(psz, "Page ");
1827 if (pDesc->Gen.u1DefBig)
1828 ADD_STR(psz, "32-bit ");
1829 else
1830 ADD_STR(psz, "16-bit ");
1831# endif
1832# undef ADD_STR
1833 *psz = '\0';
1834
1835 /*
1836 * Limit and Base and format the output.
1837 */
1838 uint32_t u32Limit = X86DESC_LIMIT_G(pDesc);
1839
1840# if HC_ARCH_BITS == 64
1841 uint64_t u32Base = X86DESC64_BASE(pDesc);
1842 Log(("%s %04x - %RX64 %RX64 - base=%RX64 limit=%08x dpl=%d %s\n", pszMsg,
1843 Sel, pDesc->au64[0], pDesc->au64[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1844# else
1845 uint32_t u32Base = X86DESC_BASE(pDesc);
1846 Log(("%s %04x - %08x %08x - base=%08x limit=%08x dpl=%d %s\n", pszMsg,
1847 Sel, pDesc->au32[0], pDesc->au32[1], u32Base, u32Limit, pDesc->Gen.u2Dpl, szMsg));
1848# endif
1849}
1850
1851
1852/**
1853 * Formats a full register dump.
1854 *
1855 * @param pVM Pointer to the VM.
1856 * @param pVCpu Pointer to the VMCPU.
1857 * @param pCtx Pointer to the CPU context.
1858 */
1859VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1860{
1861 NOREF(pVM);
1862
1863 /*
1864 * Format the flags.
1865 */
1866 static struct
1867 {
1868 const char *pszSet; const char *pszClear; uint32_t fFlag;
1869 } const s_aFlags[] =
1870 {
1871 { "vip", NULL, X86_EFL_VIP },
1872 { "vif", NULL, X86_EFL_VIF },
1873 { "ac", NULL, X86_EFL_AC },
1874 { "vm", NULL, X86_EFL_VM },
1875 { "rf", NULL, X86_EFL_RF },
1876 { "nt", NULL, X86_EFL_NT },
1877 { "ov", "nv", X86_EFL_OF },
1878 { "dn", "up", X86_EFL_DF },
1879 { "ei", "di", X86_EFL_IF },
1880 { "tf", NULL, X86_EFL_TF },
1881 { "nt", "pl", X86_EFL_SF },
1882 { "nz", "zr", X86_EFL_ZF },
1883 { "ac", "na", X86_EFL_AF },
1884 { "po", "pe", X86_EFL_PF },
1885 { "cy", "nc", X86_EFL_CF },
1886 };
1887 char szEFlags[80];
1888 char *psz = szEFlags;
1889 uint32_t uEFlags = pCtx->eflags.u32;
1890 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1891 {
1892 const char *pszAdd = s_aFlags[i].fFlag & uEFlags ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1893 if (pszAdd)
1894 {
1895 strcpy(psz, pszAdd);
1896 psz += strlen(pszAdd);
1897 *psz++ = ' ';
1898 }
1899 }
1900 psz[-1] = '\0';
1901
1902
1903 /*
1904 * Format the registers.
1905 */
1906 if (CPUMIsGuestIn64BitCode(pVCpu))
1907 {
1908 Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n"
1909 "rsi=%016RX64 rdi=%016RX64 r8 =%016RX64 r9 =%016RX64\n"
1910 "r10=%016RX64 r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1911 "r14=%016RX64 r15=%016RX64\n"
1912 "rip=%016RX64 rsp=%016RX64 rbp=%016RX64 iopl=%d %*s\n"
1913 "cs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1914 "ds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1915 "es={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1916 "fs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1917 "gs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1918 "ss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1919 "cr0=%016RX64 cr2=%016RX64 cr3=%016RX64 cr4=%016RX64\n"
1920 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64 dr3=%016RX64\n"
1921 "dr4=%016RX64 dr5=%016RX64 dr6=%016RX64 dr7=%016RX64\n"
1922 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1923 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1924 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1925 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1926 ,
1927 pCtx->rax, pCtx->rbx, pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi,
1928 pCtx->r8, pCtx->r9, pCtx->r10, pCtx->r11, pCtx->r12, pCtx->r13,
1929 pCtx->r14, pCtx->r15,
1930 pCtx->rip, pCtx->rsp, pCtx->rbp, X86_EFL_GET_IOPL(uEFlags), 31, szEFlags,
1931 pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
1932 pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
1933 pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
1934 pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
1935 pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
1936 pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
1937 pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4,
1938 pCtx->dr[0], pCtx->dr[1], pCtx->dr[2], pCtx->dr[3],
1939 pCtx->dr[4], pCtx->dr[5], pCtx->dr[6], pCtx->dr[7],
1940 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, uEFlags,
1941 pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1942 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1943 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1944 }
1945 else
1946 Log(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
1947 "eip=%08x esp=%08x ebp=%08x iopl=%d %*s\n"
1948 "cs={%04x base=%016RX64 limit=%08x flags=%08x} dr0=%08RX64 dr1=%08RX64\n"
1949 "ds={%04x base=%016RX64 limit=%08x flags=%08x} dr2=%08RX64 dr3=%08RX64\n"
1950 "es={%04x base=%016RX64 limit=%08x flags=%08x} dr4=%08RX64 dr5=%08RX64\n"
1951 "fs={%04x base=%016RX64 limit=%08x flags=%08x} dr6=%08RX64 dr7=%08RX64\n"
1952 "gs={%04x base=%016RX64 limit=%08x flags=%08x} cr0=%08RX64 cr2=%08RX64\n"
1953 "ss={%04x base=%016RX64 limit=%08x flags=%08x} cr3=%08RX64 cr4=%08RX64\n"
1954 "gdtr=%016RX64:%04x idtr=%016RX64:%04x eflags=%08x\n"
1955 "ldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1956 "tr ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1957 "SysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1958 ,
1959 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
1960 pCtx->eip, pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(uEFlags), 31, szEFlags,
1961 pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pCtx->dr[0], pCtx->dr[1],
1962 pCtx->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pCtx->dr[2], pCtx->dr[3],
1963 pCtx->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pCtx->dr[4], pCtx->dr[5],
1964 pCtx->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pCtx->dr[6], pCtx->dr[7],
1965 pCtx->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pCtx->cr0, pCtx->cr2,
1966 pCtx->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pCtx->cr3, pCtx->cr4,
1967 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, uEFlags,
1968 pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1969 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1970 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp));
1971
1972 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
1973 Log(("FPU:\n"
1974 "FCW=%04x FSW=%04x FTW=%02x\n"
1975 "FOP=%04x FPUIP=%08x CS=%04x Rsrvd1=%04x\n"
1976 "FPUDP=%04x DS=%04x Rsvrd2=%04x MXCSR=%08x MXCSR_MASK=%08x\n"
1977 ,
1978 pFpuCtx->FCW, pFpuCtx->FSW, pFpuCtx->FTW,
1979 pFpuCtx->FOP, pFpuCtx->FPUIP, pFpuCtx->CS, pFpuCtx->Rsrvd1,
1980 pFpuCtx->FPUDP, pFpuCtx->DS, pFpuCtx->Rsrvd2,
1981 pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK));
1982
1983 Log(("MSR:\n"
1984 "EFER =%016RX64\n"
1985 "PAT =%016RX64\n"
1986 "STAR =%016RX64\n"
1987 "CSTAR =%016RX64\n"
1988 "LSTAR =%016RX64\n"
1989 "SFMASK =%016RX64\n"
1990 "KERNELGSBASE =%016RX64\n",
1991 pCtx->msrEFER,
1992 pCtx->msrPAT,
1993 pCtx->msrSTAR,
1994 pCtx->msrCSTAR,
1995 pCtx->msrLSTAR,
1996 pCtx->msrSFMASK,
1997 pCtx->msrKERNELGSBASE));
1998}
1999
2000#endif /* VBOX_STRICT */
2001
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette