VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp@ 53349

Last change on this file since 53349 was 52419, checked in by vboxsync, 10 years ago

VMM: Fix restoring 32-bit guest FPU state on 64-bit capable VMs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 34.9 KB
Line 
1/* $Id: CPUMR0.cpp 52419 2014-08-19 16:12:46Z vboxsync $ */
2/** @file
3 * CPUM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/err.h>
27#include <VBox/log.h>
28#include <VBox/vmm/hm.h>
29#include <iprt/assert.h>
30#include <iprt/asm-amd64-x86.h>
31#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
32# include <iprt/mem.h>
33# include <iprt/memobj.h>
34# include <VBox/apic.h>
35#endif
36#include <iprt/x86.h>
37
38
39/*******************************************************************************
40* Structures and Typedefs *
41*******************************************************************************/
42#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
43/**
44 * Local APIC mappings.
45 */
46typedef struct CPUMHOSTLAPIC
47{
48 /** Indicates that the entry is in use and have valid data. */
49 bool fEnabled;
50 /** Whether it's operating in X2APIC mode (EXTD). */
51 bool fX2Apic;
52 /** The APIC version number. */
53 uint32_t uVersion;
54 /** Has APIC_REG_LVT_THMR. Not used. */
55 uint32_t fHasThermal;
56 /** The physical address of the APIC registers. */
57 RTHCPHYS PhysBase;
58 /** The memory object entering the physical address. */
59 RTR0MEMOBJ hMemObj;
60 /** The mapping object for hMemObj. */
61 RTR0MEMOBJ hMapObj;
62 /** The mapping address APIC registers.
63 * @remarks Different CPUs may use the same physical address to map their
64 * APICs, so this pointer is only valid when on the CPU owning the
65 * APIC. */
66 void *pv;
67} CPUMHOSTLAPIC;
68#endif
69
70
71/*******************************************************************************
72* Global Variables *
73*******************************************************************************/
74#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
75static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS];
76#endif
77
78/**
79 * CPUID bits to unify among all cores.
80 */
81static struct
82{
83 uint32_t uLeaf; /**< Leaf to check. */
84 uint32_t ecx; /**< which bits in ecx to unify between CPUs. */
85 uint32_t edx; /**< which bits in edx to unify between CPUs. */
86}
87const g_aCpuidUnifyBits[] =
88{
89 {
90 0x00000001,
91 X86_CPUID_FEATURE_ECX_CX16 | X86_CPUID_FEATURE_ECX_MONITOR,
92 X86_CPUID_FEATURE_EDX_CX8
93 }
94};
95
96
97
98/*******************************************************************************
99* Internal Functions *
100*******************************************************************************/
101#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
102static int cpumR0MapLocalApics(void);
103static void cpumR0UnmapLocalApics(void);
104#endif
105static int cpumR0SaveHostDebugState(PVMCPU pVCpu);
106
107
108/**
109 * Does the Ring-0 CPU initialization once during module load.
110 * XXX Host-CPU hot-plugging?
111 */
112VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
113{
114 int rc = VINF_SUCCESS;
115#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
116 rc = cpumR0MapLocalApics();
117#endif
118 return rc;
119}
120
121
122/**
123 * Terminate the module.
124 */
125VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
126{
127#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
128 cpumR0UnmapLocalApics();
129#endif
130 return VINF_SUCCESS;
131}
132
133
134/**
135 *
136 *
137 * Check the CPUID features of this particular CPU and disable relevant features
138 * for the guest which do not exist on this CPU. We have seen systems where the
139 * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
140 * @bugref{5436}.
141 *
142 * @note This function might be called simultaneously on more than one CPU!
143 *
144 * @param idCpu The identifier for the CPU the function is called on.
145 * @param pvUser1 Pointer to the VM structure.
146 * @param pvUser2 Ignored.
147 */
148static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
149{
150 PVM pVM = (PVM)pvUser1;
151 PCPUM pCPUM = &pVM->cpum.s;
152
153 NOREF(idCpu); NOREF(pvUser2);
154 for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
155 {
156 /* Note! Cannot use cpumCpuIdGetLeaf from here because we're not
157 necessarily in the VM process context. So, we using the
158 legacy arrays as temporary storage. */
159
160 uint32_t uLeaf = g_aCpuidUnifyBits[i].uLeaf;
161 PCPUMCPUID pLegacyLeaf;
162 if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
163 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
164 else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
165 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
166 else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
167 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
168 else
169 continue;
170
171 uint32_t eax, ebx, ecx, edx;
172 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx);
173
174 ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);
175 ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx);
176 }
177}
178
179
180/**
181 * Does Ring-0 CPUM initialization.
182 *
183 * This is mainly to check that the Host CPU mode is compatible
184 * with VBox.
185 *
186 * @returns VBox status code.
187 * @param pVM Pointer to the VM.
188 */
189VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM)
190{
191 LogFlow(("CPUMR0Init: %p\n", pVM));
192
193 /*
194 * Check CR0 & CR4 flags.
195 */
196 uint32_t u32CR0 = ASMGetCR0();
197 if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
198 {
199 Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
200 return VERR_UNSUPPORTED_CPU_MODE;
201 }
202
203 /*
204 * Check for sysenter and syscall usage.
205 */
206 if (ASMHasCpuId())
207 {
208 /*
209 * SYSENTER/SYSEXIT
210 *
211 * Intel docs claim you should test both the flag and family, model &
212 * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
213 * but don't support it. AMD CPUs may support this feature in legacy
214 * mode, they've banned it from long mode. Since we switch to 32-bit
215 * mode when entering raw-mode context the feature would become
216 * accessible again on AMD CPUs, so we have to check regardless of
217 * host bitness.
218 */
219 uint32_t u32CpuVersion;
220 uint32_t u32Dummy;
221 uint32_t fFeatures;
222 ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
223 uint32_t const u32Family = u32CpuVersion >> 8;
224 uint32_t const u32Model = (u32CpuVersion >> 4) & 0xF;
225 uint32_t const u32Stepping = u32CpuVersion & 0xF;
226 if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
227 && ( u32Family != 6 /* (> pentium pro) */
228 || u32Model >= 3
229 || u32Stepping >= 3
230 || !ASMIsIntelCpu())
231 )
232 {
233 /*
234 * Read the MSR and see if it's in use or not.
235 */
236 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
237 if (u32)
238 {
239 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
240 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
241 }
242 }
243
244 /*
245 * SYSCALL/SYSRET
246 *
247 * This feature is indicated by the SEP bit returned in EDX by CPUID
248 * function 0x80000001. Intel CPUs only supports this feature in
249 * long mode. Since we're not running 64-bit guests in raw-mode there
250 * are no issues with 32-bit intel hosts.
251 */
252 uint32_t cExt = 0;
253 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
254 if (ASMIsValidExtRange(cExt))
255 {
256 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
257 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
258 {
259#ifdef RT_ARCH_X86
260# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
261 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
262# else
263 if (!ASMIsIntelCpu())
264# endif
265#endif
266 {
267 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
268 if (fEfer & MSR_K6_EFER_SCE)
269 {
270 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
271 Log(("CPUMR0Init: host uses syscall\n"));
272 }
273 }
274 }
275 }
276
277 /*
278 * Unify/cross check some CPUID feature bits on all available CPU cores
279 * and threads. We've seen CPUs where the monitor support differed.
280 *
281 * Because the hyper heap isn't always mapped into ring-0, we cannot
282 * access it from a RTMpOnAll callback. We use the legacy CPUID arrays
283 * as temp ring-0 accessible memory instead, ASSUMING that they're all
284 * up to date when we get here.
285 */
286 RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
287
288 for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
289 {
290 uint32_t uLeaf = g_aCpuidUnifyBits[i].uLeaf;
291 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, uLeaf, 0);
292 if (pLeaf)
293 {
294 PCPUMCPUID pLegacyLeaf;
295 if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
296 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
297 else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
298 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
299 else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
300 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
301 else
302 continue;
303
304 pLeaf->uEcx = pLegacyLeaf->ecx;
305 pLeaf->uEdx = pLegacyLeaf->edx;
306 }
307 }
308
309 }
310
311
312 /*
313 * Check if debug registers are armed.
314 * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
315 */
316 uint32_t u32DR7 = ASMGetDR7();
317 if (u32DR7 & X86_DR7_ENABLED_MASK)
318 {
319 for (VMCPUID i = 0; i < pVM->cCpus; i++)
320 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
321 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
322 }
323
324 return VINF_SUCCESS;
325}
326
327
328/**
329 * Trap handler for device-not-available fault (#NM).
330 * Device not available, FP or (F)WAIT instruction.
331 *
332 * @returns VBox status code.
333 * @retval VINF_SUCCESS if the guest FPU state is loaded.
334 * @retval VINF_EM_RAW_GUEST_TRAP if it is a guest trap.
335 *
336 * @param pVM Pointer to the VM.
337 * @param pVCpu Pointer to the VMCPU.
338 * @param pCtx Pointer to the guest-CPU context.
339 */
340VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
341{
342 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
343 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
344
345 /* If the FPU state has already been loaded, then it's a guest trap. */
346 if (CPUMIsGuestFPUStateActive(pVCpu))
347 {
348 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS))
349 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM)));
350 return VINF_EM_RAW_GUEST_TRAP;
351 }
352
353 /*
354 * There are two basic actions:
355 * 1. Save host fpu and restore guest fpu.
356 * 2. Generate guest trap.
357 *
358 * When entering the hypervisor we'll always enable MP (for proper wait
359 * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
360 * is taken from the guest OS in order to get proper SSE handling.
361 *
362 *
363 * Actions taken depending on the guest CR0 flags:
364 *
365 * 3 2 1
366 * TS | EM | MP | FPUInstr | WAIT :: VMM Action
367 * ------------------------------------------------------------------------
368 * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
369 * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
370 * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
371 * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
372 * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
373 * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
374 * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
375 * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
376 */
377
378 switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
379 {
380 case X86_CR0_MP | X86_CR0_TS:
381 case X86_CR0_MP | X86_CR0_TS | X86_CR0_EM:
382 return VINF_EM_RAW_GUEST_TRAP;
383 default:
384 break;
385 }
386
387 return CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
388}
389
390
391/**
392 * Saves the host-FPU/XMM state and loads the guest-FPU state into the CPU.
393 *
394 * @returns VBox status code.
395 *
396 * @param pVM Pointer to the VM.
397 * @param pVCpu Pointer to the VMCPU.
398 * @param pCtx Pointer to the guest-CPU context.
399 */
400VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
401{
402
403 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
404#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
405 if (CPUMIsGuestInLongModeEx(pCtx))
406 {
407 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
408
409 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
410 cpumR0SaveHostFPUState(&pVCpu->cpum.s);
411
412 /* Restore the state on entry as we need to be in 64-bit mode to access the full state. */
413 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
414 }
415 else
416#endif
417 {
418 NOREF(pCtx);
419 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
420 /** @todo Move the FFXR handling down into
421 * cpumR0SaveHostRestoreGuestFPUState to optimize the
422 * VBOX_WITH_KERNEL_USING_XMM handling. */
423 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
424 uint64_t uHostEfer = 0;
425 bool fRestoreEfer = false;
426 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
427 {
428 /** @todo r=ramshankar: Can't we used a cached value here
429 * instead of reading the MSR? host EFER doesn't usually
430 * change. */
431 uHostEfer = ASMRdMsr(MSR_K6_EFER);
432 if (uHostEfer & MSR_K6_EFER_FFXSR)
433 {
434 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
435 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
436 fRestoreEfer = true;
437 }
438 }
439
440 /* Do the job and record that we've switched FPU state. */
441 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
442
443 /* Restore EFER. */
444 if (fRestoreEfer)
445 ASMWrMsr(MSR_K6_EFER, uHostEfer);
446 }
447
448 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
449 return VINF_SUCCESS;
450}
451
452
453/**
454 * Save guest FPU/XMM state
455 *
456 * @returns VBox status code.
457 * @param pVM Pointer to the VM.
458 * @param pVCpu Pointer to the VMCPU.
459 * @param pCtx Pointer to the guest CPU context.
460 */
461VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
462{
463 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
464 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
465 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
466 NOREF(pVM); NOREF(pCtx);
467
468#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
469 if (CPUMIsGuestInLongModeEx(pCtx))
470 {
471 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
472 {
473 HMR0SaveFPUState(pVM, pVCpu, pCtx);
474 cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
475 }
476 /* else nothing to do; we didn't perform a world switch */
477 }
478 else
479#endif
480 {
481#ifdef VBOX_WITH_KERNEL_USING_XMM
482 /*
483 * We've already saved the XMM registers in the assembly wrapper, so
484 * we have to save them before saving the entire FPU state and put them
485 * back afterwards.
486 */
487 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
488 * I'm not able to test such an optimization tonight.
489 * We could just all this in assembly. */
490 uint128_t aGuestXmmRegs[16];
491 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
492#endif
493
494 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
495 uint64_t uHostEfer = 0;
496 bool fRestoreEfer = false;
497 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
498 {
499 uHostEfer = ASMRdMsr(MSR_K6_EFER);
500 if (uHostEfer & MSR_K6_EFER_FFXSR)
501 {
502 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
503 fRestoreEfer = true;
504 }
505 }
506
507 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
508
509 /* Restore EFER MSR */
510 if (fRestoreEfer)
511 ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR);
512
513#ifdef VBOX_WITH_KERNEL_USING_XMM
514 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
515#endif
516 }
517
518 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
519 return VINF_SUCCESS;
520}
521
522
523/**
524 * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
525 * DR7 with safe values.
526 *
527 * @returns VBox status code.
528 * @param pVCpu Pointer to the VMCPU.
529 */
530static int cpumR0SaveHostDebugState(PVMCPU pVCpu)
531{
532 /*
533 * Save the host state.
534 */
535#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
536 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
537 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
538#else
539 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
540 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
541 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
542 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
543#endif
544 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
545 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
546 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
547
548 /* Preemption paranoia. */
549 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
550
551 /*
552 * Make sure DR7 is harmless or else we could trigger breakpoints when
553 * load guest or hypervisor DRx values later.
554 */
555 if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL)
556 ASMSetDR7(X86_DR7_INIT_VAL);
557
558 return VINF_SUCCESS;
559}
560
561
562/**
563 * Saves the guest DRx state residing in host registers and restore the host
564 * register values.
565 *
566 * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
567 * since it's assumed that we're shadowing the guest DRx register values
568 * accurately when using the combined hypervisor debug register values
569 * (CPUMR0LoadHyperDebugState).
570 *
571 * @returns true if either guest or hypervisor debug registers were loaded.
572 * @param pVCpu The cross context CPU structure for the calling EMT.
573 * @param fDr6 Whether to include DR6 or not.
574 * @thread EMT(pVCpu)
575 */
576VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
577{
578 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
579 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
580
581 /*
582 * Do we need to save the guest DRx registered loaded into host registers?
583 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
584 */
585 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
586 {
587#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
588 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
589 {
590 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
591 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
592 if (!fDr6)
593 pVCpu->cpum.s.Guest.dr[6] = uDr6;
594 }
595 else
596#endif
597 {
598#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
599 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
600#else
601 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
602 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
603 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
604 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
605#endif
606 if (fDr6)
607 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
608 }
609 }
610 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
611 | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
612
613 /*
614 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
615 */
616 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
617 {
618 /* A bit of paranoia first... */
619 uint64_t uCurDR7 = ASMGetDR7();
620 if (uCurDR7 != X86_DR7_INIT_VAL)
621 ASMSetDR7(X86_DR7_INIT_VAL);
622
623#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
624 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
625 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
626#else
627 ASMSetDR0(pVCpu->cpum.s.Host.dr0);
628 ASMSetDR1(pVCpu->cpum.s.Host.dr1);
629 ASMSetDR2(pVCpu->cpum.s.Host.dr2);
630 ASMSetDR3(pVCpu->cpum.s.Host.dr3);
631#endif
632 /** @todo consider only updating if they differ, esp. DR6. Need to figure how
633 * expensive DRx reads are over DRx writes. */
634 ASMSetDR6(pVCpu->cpum.s.Host.dr6);
635 ASMSetDR7(pVCpu->cpum.s.Host.dr7);
636
637 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
638 }
639
640 return fDrXLoaded;
641}
642
643
644/**
645 * Saves the guest DRx state if it resides host registers.
646 *
647 * This does NOT clear any use flags, so the host registers remains loaded with
648 * the guest DRx state upon return. The purpose is only to make sure the values
649 * in the CPU context structure is up to date.
650 *
651 * @returns true if the host registers contains guest values, false if not.
652 * @param pVCpu The cross context CPU structure for the calling EMT.
653 * @param fDr6 Whether to include DR6 or not.
654 * @thread EMT(pVCpu)
655 */
656VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
657{
658 /*
659 * Do we need to save the guest DRx registered loaded into host registers?
660 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
661 */
662 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
663 {
664#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
665 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
666 {
667 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
668 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
669 if (!fDr6)
670 pVCpu->cpum.s.Guest.dr[6] = uDr6;
671 }
672 else
673#endif
674 {
675#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
676 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
677#else
678 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
679 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
680 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
681 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
682#endif
683 if (fDr6)
684 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
685 }
686 return true;
687 }
688 return false;
689}
690
691
692/**
693 * Lazily sync in the debug state.
694 *
695 * @param pVCpu The cross context CPU structure for the calling EMT.
696 * @param fDr6 Whether to include DR6 or not.
697 * @thread EMT(pVCpu)
698 */
699VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
700{
701 /*
702 * Save the host state and disarm all host BPs.
703 */
704 cpumR0SaveHostDebugState(pVCpu);
705 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
706
707 /*
708 * Activate the guest state DR0-3.
709 * DR7 and DR6 (if fDr6 is true) are left to the caller.
710 */
711#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
712 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
713 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
714 else
715#endif
716 {
717#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
718 cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);
719#else
720 ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]);
721 ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]);
722 ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]);
723 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
724#endif
725 if (fDr6)
726 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
727
728 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
729 }
730}
731
732
733/**
734 * Lazily sync in the hypervisor debug state
735 *
736 * @returns VBox status code.
737 * @param pVCpu The cross context CPU structure for the calling EMT.
738 * @param fDr6 Whether to include DR6 or not.
739 * @thread EMT(pVCpu)
740 */
741VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
742{
743 /*
744 * Save the host state and disarm all host BPs.
745 */
746 cpumR0SaveHostDebugState(pVCpu);
747 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
748
749 /*
750 * Make sure the hypervisor values are up to date.
751 */
752 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */, true);
753
754 /*
755 * Activate the guest state DR0-3.
756 * DR7 and DR6 (if fDr6 is true) are left to the caller.
757 */
758#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
759 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
760 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
761 else
762#endif
763 {
764#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
765 cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);
766#else
767 ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]);
768 ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]);
769 ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]);
770 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
771#endif
772 if (fDr6)
773 ASMSetDR6(X86_DR6_INIT_VAL);
774
775 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
776 }
777}
778
779#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
780
781/**
782 * Per-CPU callback that probes the CPU for APIC support.
783 *
784 * @param idCpu The identifier for the CPU the function is called on.
785 * @param pvUser1 Ignored.
786 * @param pvUser2 Ignored.
787 */
788static DECLCALLBACK(void) cpumR0MapLocalApicCpuProber(RTCPUID idCpu, void *pvUser1, void *pvUser2)
789{
790 NOREF(pvUser1); NOREF(pvUser2);
791 int iCpu = RTMpCpuIdToSetIndex(idCpu);
792 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
793
794 /*
795 * Check for APIC support.
796 */
797 uint32_t uMaxLeaf, u32EBX, u32ECX, u32EDX;
798 ASMCpuId(0, &uMaxLeaf, &u32EBX, &u32ECX, &u32EDX);
799 if ( ( ASMIsIntelCpuEx(u32EBX, u32ECX, u32EDX)
800 || ASMIsAmdCpuEx(u32EBX, u32ECX, u32EDX)
801 || ASMIsViaCentaurCpuEx(u32EBX, u32ECX, u32EDX))
802 && ASMIsValidStdRange(uMaxLeaf))
803 {
804 uint32_t uDummy;
805 ASMCpuId(1, &uDummy, &u32EBX, &u32ECX, &u32EDX);
806 if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
807 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
808 {
809 /*
810 * Safe to access the MSR. Read it and calc the BASE (a little complicated).
811 */
812 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
813 uint64_t u64Mask = MSR_IA32_APICBASE_BASE_MIN;
814
815 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
816 uint32_t uMaxExtLeaf;
817 ASMCpuId(0x80000000, &uMaxExtLeaf, &u32EBX, &u32ECX, &u32EDX);
818 if ( uMaxExtLeaf >= UINT32_C(0x80000008)
819 && ASMIsValidExtRange(uMaxExtLeaf))
820 {
821 uint32_t u32PhysBits;
822 ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
823 u32PhysBits &= 0xff;
824 u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
825 }
826
827 AssertCompile(sizeof(g_aLApics[iCpu].PhysBase) == sizeof(u64ApicBase));
828 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask;
829 g_aLApics[iCpu].fEnabled = RT_BOOL(u64ApicBase & MSR_IA32_APICBASE_EN);
830 g_aLApics[iCpu].fX2Apic = (u64ApicBase & (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN))
831 == (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN);
832 }
833 }
834}
835
836
837
838/**
839 * Per-CPU callback that verifies our APIC expectations.
840 *
841 * @param idCpu The identifier for the CPU the function is called on.
842 * @param pvUser1 Ignored.
843 * @param pvUser2 Ignored.
844 */
845static DECLCALLBACK(void) cpumR0MapLocalApicCpuChecker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
846{
847 NOREF(pvUser1); NOREF(pvUser2);
848
849 int iCpu = RTMpCpuIdToSetIndex(idCpu);
850 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
851 if (!g_aLApics[iCpu].fEnabled)
852 return;
853
854 /*
855 * 0x0X 82489 external APIC
856 * 0x1X Local APIC
857 * 0x2X..0xFF reserved
858 */
859 uint32_t uApicVersion;
860 if (g_aLApics[iCpu].fX2Apic)
861 uApicVersion = ApicX2RegRead32(APIC_REG_VERSION);
862 else
863 uApicVersion = ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_VERSION);
864 if ((APIC_REG_VERSION_GET_VER(uApicVersion) & 0xF0) == 0x10)
865 {
866 g_aLApics[iCpu].uVersion = uApicVersion;
867 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(uApicVersion) >= 5;
868
869#if 0 /* enable if you need it. */
870 if (g_aLApics[iCpu].fX2Apic)
871 SUPR0Printf("CPUM: X2APIC %02u - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
872 iCpu, uApicVersion,
873 ApicX2RegRead32(APIC_REG_LVT_LINT0), ApicX2RegRead32(APIC_REG_LVT_LINT1),
874 ApicX2RegRead32(APIC_REG_LVT_PC), ApicX2RegRead32(APIC_REG_LVT_THMR) );
875 else
876 SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
877 iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, uApicVersion,
878 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1),
879 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR) );
880#endif
881 }
882 else
883 {
884 g_aLApics[iCpu].fEnabled = false;
885 g_aLApics[iCpu].fX2Apic = false;
886 SUPR0Printf("VBox/CPUM: Unsupported APIC version %#x (iCpu=%d)\n", uApicVersion, iCpu);
887 }
888}
889
890
891/**
892 * Map the MMIO page of each local APIC in the system.
893 */
894static int cpumR0MapLocalApics(void)
895{
896 /*
897 * Check that we'll always stay within the array bounds.
898 */
899 if (RTMpGetArraySize() > RT_ELEMENTS(g_aLApics))
900 {
901 LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
902 return VERR_TOO_MANY_CPUS;
903 }
904
905 /*
906 * Create mappings for all online CPUs we think have legacy APICs.
907 */
908 int rc = RTMpOnAll(cpumR0MapLocalApicCpuProber, NULL, NULL);
909
910 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
911 {
912 if (g_aLApics[iCpu].fEnabled && !g_aLApics[iCpu].fX2Apic)
913 {
914 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
915 PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO);
916 if (RT_SUCCESS(rc))
917 {
918 rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void *)-1,
919 PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
920 if (RT_SUCCESS(rc))
921 {
922 g_aLApics[iCpu].pv = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
923 continue;
924 }
925 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
926 }
927 g_aLApics[iCpu].fEnabled = false;
928 }
929 g_aLApics[iCpu].pv = NULL;
930 }
931
932 /*
933 * Check the APICs.
934 */
935 if (RT_SUCCESS(rc))
936 rc = RTMpOnAll(cpumR0MapLocalApicCpuChecker, NULL, NULL);
937
938 if (RT_FAILURE(rc))
939 {
940 cpumR0UnmapLocalApics();
941 return rc;
942 }
943
944#ifdef LOG_ENABLED
945 /*
946 * Log the result (pretty useless, requires enabling CPUM in VBoxDrv
947 * and !VBOX_WITH_R0_LOGGING).
948 */
949 if (LogIsEnabled())
950 {
951 uint32_t cEnabled = 0;
952 uint32_t cX2Apics = 0;
953 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
954 if (g_aLApics[iCpu].fEnabled)
955 {
956 cEnabled++;
957 cX2Apics += g_aLApics[iCpu].fX2Apic;
958 }
959 Log(("CPUM: %u APICs, %u X2APICs\n", cEnabled, cX2Apics));
960 }
961#endif
962
963 return VINF_SUCCESS;
964}
965
966
967/**
968 * Unmap the Local APIC of all host CPUs.
969 */
970static void cpumR0UnmapLocalApics(void)
971{
972 for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
973 {
974 if (g_aLApics[iCpu].pv)
975 {
976 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
977 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
978 g_aLApics[iCpu].hMapObj = NIL_RTR0MEMOBJ;
979 g_aLApics[iCpu].hMemObj = NIL_RTR0MEMOBJ;
980 g_aLApics[iCpu].fEnabled = false;
981 g_aLApics[iCpu].fX2Apic = false;
982 g_aLApics[iCpu].pv = NULL;
983 }
984 }
985}
986
987
988/**
989 * Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch.
990 *
991 * Writes the Local APIC mapping address of the current host CPU to CPUMCPU so
992 * the world switchers can access the APIC registers for the purpose of
993 * disabling and re-enabling the NMIs. Must be called with disabled preemption
994 * or disabled interrupts!
995 *
996 * @param pVCpu Pointer to the cross context CPU structure of the
997 * calling EMT.
998 * @param idHostCpu The ID of the current host CPU.
999 */
1000VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, RTCPUID idHostCpu)
1001{
1002 int idxCpu = RTMpCpuIdToSetIndex(idHostCpu);
1003 pVCpu->cpum.s.pvApicBase = g_aLApics[idxCpu].pv;
1004 pVCpu->cpum.s.fX2Apic = g_aLApics[idxCpu].fX2Apic;
1005// Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic));
1006}
1007
1008#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */
1009
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette