VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp@ 49680

Last change on this file since 49680 was 49623, checked in by vboxsync, 11 years ago

VMM: Warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 32.8 KB
Line 
1/* $Id: CPUMR0.cpp 49623 2013-11-22 12:26:29Z vboxsync $ */
2/** @file
3 * CPUM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/err.h>
27#include <VBox/log.h>
28#include <VBox/vmm/hm.h>
29#include <iprt/assert.h>
30#include <iprt/asm-amd64-x86.h>
31#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
32# include <iprt/mem.h>
33# include <iprt/memobj.h>
34# include <VBox/apic.h>
35#endif
36#include <iprt/x86.h>
37
38
39/*******************************************************************************
40* Structures and Typedefs *
41*******************************************************************************/
42#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
43/**
44 * Local APIC mappings.
45 */
46typedef struct CPUMHOSTLAPIC
47{
48 /** Indicates that the entry is in use and have valid data. */
49 bool fEnabled;
50 /** Whether it's operating in X2APIC mode (EXTD). */
51 bool fX2Apic;
52 /** The APIC version number. */
53 uint32_t uVersion;
54 /** Has APIC_REG_LVT_THMR. Not used. */
55 uint32_t fHasThermal;
56 /** The physical address of the APIC registers. */
57 RTHCPHYS PhysBase;
58 /** The memory object entering the physical address. */
59 RTR0MEMOBJ hMemObj;
60 /** The mapping object for hMemObj. */
61 RTR0MEMOBJ hMapObj;
62 /** The mapping address APIC registers.
63 * @remarks Different CPUs may use the same physical address to map their
64 * APICs, so this pointer is only valid when on the CPU owning the
65 * APIC. */
66 void *pv;
67} CPUMHOSTLAPIC;
68#endif
69
70
71/*******************************************************************************
72* Global Variables *
73*******************************************************************************/
74#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
75static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS];
76#endif
77
78
79/*******************************************************************************
80* Internal Functions *
81*******************************************************************************/
82#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
83static int cpumR0MapLocalApics(void);
84static void cpumR0UnmapLocalApics(void);
85#endif
86static int cpumR0SaveHostDebugState(PVMCPU pVCpu);
87
88
89/**
90 * Does the Ring-0 CPU initialization once during module load.
91 * XXX Host-CPU hot-plugging?
92 */
93VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
94{
95 int rc = VINF_SUCCESS;
96#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
97 rc = cpumR0MapLocalApics();
98#endif
99 return rc;
100}
101
102
103/**
104 * Terminate the module.
105 */
106VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
107{
108#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
109 cpumR0UnmapLocalApics();
110#endif
111 return VINF_SUCCESS;
112}
113
114
115/**
116 * Check the CPUID features of this particular CPU and disable relevant features
117 * for the guest which do not exist on this CPU. We have seen systems where the
118 * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
119 * @bugref{5436}.
120 *
121 * @note This function might be called simultaneously on more than one CPU!
122 *
123 * @param idCpu The identifier for the CPU the function is called on.
124 * @param pvUser1 Pointer to the VM structure.
125 * @param pvUser2 Ignored.
126 */
127static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
128{
129 NOREF(idCpu); NOREF(pvUser2);
130
131 struct
132 {
133 uint32_t uLeave; /* leave to check */
134 uint32_t ecx; /* which bits in ecx to unify between CPUs */
135 uint32_t edx; /* which bits in edx to unify between CPUs */
136 } aCpuidUnify[]
137 =
138 {
139 { 0x00000001, X86_CPUID_FEATURE_ECX_CX16
140 | X86_CPUID_FEATURE_ECX_MONITOR,
141 X86_CPUID_FEATURE_EDX_CX8 }
142 };
143 PVM pVM = (PVM)pvUser1;
144 PCPUM pCPUM = &pVM->cpum.s;
145 for (uint32_t i = 0; i < RT_ELEMENTS(aCpuidUnify); i++)
146 {
147 uint32_t uLeave = aCpuidUnify[i].uLeave;
148 uint32_t eax, ebx, ecx, edx;
149
150 ASMCpuId_Idx_ECX(uLeave, 0, &eax, &ebx, &ecx, &edx);
151 PCPUMCPUID paLeaves;
152 if (uLeave < 0x80000000)
153 paLeaves = &pCPUM->aGuestCpuIdStd[uLeave - 0x00000000];
154 else if (uLeave < 0xc0000000)
155 paLeaves = &pCPUM->aGuestCpuIdExt[uLeave - 0x80000000];
156 else
157 paLeaves = &pCPUM->aGuestCpuIdCentaur[uLeave - 0xc0000000];
158 /* unify important bits */
159 ASMAtomicAndU32(&paLeaves->ecx, ecx | ~aCpuidUnify[i].ecx);
160 ASMAtomicAndU32(&paLeaves->edx, edx | ~aCpuidUnify[i].edx);
161 }
162}
163
164
165/**
166 * Does Ring-0 CPUM initialization.
167 *
168 * This is mainly to check that the Host CPU mode is compatible
169 * with VBox.
170 *
171 * @returns VBox status code.
172 * @param pVM Pointer to the VM.
173 */
174VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM)
175{
176 LogFlow(("CPUMR0Init: %p\n", pVM));
177
178 /*
179 * Check CR0 & CR4 flags.
180 */
181 uint32_t u32CR0 = ASMGetCR0();
182 if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
183 {
184 Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
185 return VERR_UNSUPPORTED_CPU_MODE;
186 }
187
188 /*
189 * Check for sysenter and syscall usage.
190 */
191 if (ASMHasCpuId())
192 {
193 /*
194 * SYSENTER/SYSEXIT
195 *
196 * Intel docs claim you should test both the flag and family, model &
197 * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
198 * but don't support it. AMD CPUs may support this feature in legacy
199 * mode, they've banned it from long mode. Since we switch to 32-bit
200 * mode when entering raw-mode context the feature would become
201 * accessible again on AMD CPUs, so we have to check regardless of
202 * host bitness.
203 */
204 uint32_t u32CpuVersion;
205 uint32_t u32Dummy;
206 uint32_t fFeatures;
207 ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
208 uint32_t const u32Family = u32CpuVersion >> 8;
209 uint32_t const u32Model = (u32CpuVersion >> 4) & 0xF;
210 uint32_t const u32Stepping = u32CpuVersion & 0xF;
211 if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
212 && ( u32Family != 6 /* (> pentium pro) */
213 || u32Model >= 3
214 || u32Stepping >= 3
215 || !ASMIsIntelCpu())
216 )
217 {
218 /*
219 * Read the MSR and see if it's in use or not.
220 */
221 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
222 if (u32)
223 {
224 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
225 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
226 }
227 }
228
229 /*
230 * SYSCALL/SYSRET
231 *
232 * This feature is indicated by the SEP bit returned in EDX by CPUID
233 * function 0x80000001. Intel CPUs only supports this feature in
234 * long mode. Since we're not running 64-bit guests in raw-mode there
235 * are no issues with 32-bit intel hosts.
236 */
237 uint32_t cExt = 0;
238 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
239 if (ASMIsValidExtRange(cExt))
240 {
241 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
242 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
243 {
244#ifdef RT_ARCH_X86
245# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
246 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
247# else
248 if (!ASMIsIntelCpu())
249# endif
250#endif
251 {
252 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
253 if (fEfer & MSR_K6_EFER_SCE)
254 {
255 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
256 Log(("CPUMR0Init: host uses syscall\n"));
257 }
258 }
259 }
260 }
261
262 RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
263 }
264
265
266 /*
267 * Check if debug registers are armed.
268 * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
269 */
270 uint32_t u32DR7 = ASMGetDR7();
271 if (u32DR7 & X86_DR7_ENABLED_MASK)
272 {
273 for (VMCPUID i = 0; i < pVM->cCpus; i++)
274 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
275 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
276 }
277
278 return VINF_SUCCESS;
279}
280
281
282/**
283 * Trap handler for device-not-available fault (#NM).
284 * Device not available, FP or (F)WAIT instruction.
285 *
286 * @returns VBox status code.
287 * @retval VINF_SUCCESS if the guest FPU state is loaded.
288 * @retval VINF_EM_RAW_GUEST_TRAP if it is a guest trap.
289 *
290 * @param pVM Pointer to the VM.
291 * @param pVCpu Pointer to the VMCPU.
292 * @param pCtx Pointer to the guest-CPU context.
293 */
294VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
295{
296 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
297 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
298
299 /* If the FPU state has already been loaded, then it's a guest trap. */
300 if (CPUMIsGuestFPUStateActive(pVCpu))
301 {
302 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS))
303 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM)));
304 return VINF_EM_RAW_GUEST_TRAP;
305 }
306
307 /*
308 * There are two basic actions:
309 * 1. Save host fpu and restore guest fpu.
310 * 2. Generate guest trap.
311 *
312 * When entering the hypervisor we'll always enable MP (for proper wait
313 * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
314 * is taken from the guest OS in order to get proper SSE handling.
315 *
316 *
317 * Actions taken depending on the guest CR0 flags:
318 *
319 * 3 2 1
320 * TS | EM | MP | FPUInstr | WAIT :: VMM Action
321 * ------------------------------------------------------------------------
322 * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
323 * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
324 * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
325 * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
326 * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
327 * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
328 * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
329 * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
330 */
331
332 switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
333 {
334 case X86_CR0_MP | X86_CR0_TS:
335 case X86_CR0_MP | X86_CR0_TS | X86_CR0_EM:
336 return VINF_EM_RAW_GUEST_TRAP;
337 default:
338 break;
339 }
340
341 return CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
342}
343
344
345/**
346 * Saves the host-FPU/XMM state and loads the guest-FPU state into the CPU.
347 *
348 * @returns VBox status code.
349 *
350 * @param pVM Pointer to the VM.
351 * @param pVCpu Pointer to the VMCPU.
352 * @param pCtx Pointer to the guest-CPU context.
353 */
354VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
355{
356
357 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
358#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
359 if (CPUMIsGuestInLongModeEx(pCtx))
360 {
361 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
362
363 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
364 cpumR0SaveHostFPUState(&pVCpu->cpum.s);
365
366 /* Restore the state on entry as we need to be in 64-bit mode to access the full state. */
367 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
368 }
369 else
370#endif
371 {
372 NOREF(pCtx);
373 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
374 /** @todo Move the FFXR handling down into
375 * cpumR0SaveHostRestoreGuestFPUState to optimize the
376 * VBOX_WITH_KERNEL_USING_XMM handling. */
377 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
378 uint64_t uHostEfer = 0;
379 bool fRestoreEfer = false;
380 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
381 {
382 uHostEfer = ASMRdMsr(MSR_K6_EFER);
383 if (uHostEfer & MSR_K6_EFER_FFXSR)
384 {
385 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
386 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
387 fRestoreEfer = true;
388 }
389 }
390
391 /* Do the job and record that we've switched FPU state. */
392 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
393
394 /* Restore EFER. */
395 if (fRestoreEfer)
396 ASMWrMsr(MSR_K6_EFER, uHostEfer);
397 }
398
399 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
400 return VINF_SUCCESS;
401}
402
403
404/**
405 * Save guest FPU/XMM state
406 *
407 * @returns VBox status code.
408 * @param pVM Pointer to the VM.
409 * @param pVCpu Pointer to the VMCPU.
410 * @param pCtx Pointer to the guest CPU context.
411 */
412VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
413{
414 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
415 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
416 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
417 NOREF(pVM); NOREF(pCtx);
418
419#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
420 if (CPUMIsGuestInLongModeEx(pCtx))
421 {
422 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
423 {
424 HMR0SaveFPUState(pVM, pVCpu, pCtx);
425 cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
426 }
427 /* else nothing to do; we didn't perform a world switch */
428 }
429 else
430#endif
431 {
432#ifdef VBOX_WITH_KERNEL_USING_XMM
433 /*
434 * We've already saved the XMM registers in the assembly wrapper, so
435 * we have to save them before saving the entire FPU state and put them
436 * back afterwards.
437 */
438 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
439 * I'm not able to test such an optimization tonight.
440 * We could just all this in assembly. */
441 uint128_t aGuestXmmRegs[16];
442 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
443#endif
444
445 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
446 uint64_t uHostEfer = 0;
447 bool fRestoreEfer = false;
448 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
449 {
450 uHostEfer = ASMRdMsr(MSR_K6_EFER);
451 if (uHostEfer & MSR_K6_EFER_FFXSR)
452 {
453 ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
454 fRestoreEfer = true;
455 }
456 }
457
458 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
459
460 /* Restore EFER MSR */
461 if (fRestoreEfer)
462 ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR);
463
464#ifdef VBOX_WITH_KERNEL_USING_XMM
465 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
466#endif
467 }
468
469 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
470 return VINF_SUCCESS;
471}
472
473
474/**
475 * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
476 * DR7 with safe values.
477 *
478 * @returns VBox status code.
479 * @param pVCpu Pointer to the VMCPU.
480 */
481static int cpumR0SaveHostDebugState(PVMCPU pVCpu)
482{
483 /*
484 * Save the host state.
485 */
486#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
487 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
488 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
489#else
490 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
491 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
492 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
493 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
494#endif
495 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
496 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
497 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
498
499 /* Preemption paranoia. */
500 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
501
502 /*
503 * Make sure DR7 is harmless or else we could trigger breakpoints when
504 * load guest or hypervisor DRx values later.
505 */
506 if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL)
507 ASMSetDR7(X86_DR7_INIT_VAL);
508
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Saves the guest DRx state residing in host registers and restore the host
515 * register values.
516 *
517 * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
518 * since it's assumed that we're shadowing the guest DRx register values
519 * accurately when using the combined hypervisor debug register values
520 * (CPUMR0LoadHyperDebugState).
521 *
522 * @returns true if either guest or hypervisor debug registers were loaded.
523 * @param pVCpu The cross context CPU structure for the calling EMT.
524 * @param fDr6 Whether to include DR6 or not.
525 * @thread EMT(pVCpu)
526 */
527VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
528{
529 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
530 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
531
532 /*
533 * Do we need to save the guest DRx registered loaded into host registers?
534 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
535 */
536 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
537 {
538#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
539 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
540 {
541 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
542 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
543 if (!fDr6)
544 pVCpu->cpum.s.Guest.dr[6] = uDr6;
545 }
546 else
547#endif
548 {
549#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
550 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
551#else
552 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
553 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
554 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
555 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
556#endif
557 if (fDr6)
558 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
559 }
560 }
561 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
562 | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
563
564 /*
565 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
566 */
567 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
568 {
569 /* A bit of paranoia first... */
570 uint64_t uCurDR7 = ASMGetDR7();
571 if (uCurDR7 != X86_DR7_INIT_VAL)
572 ASMSetDR7(X86_DR7_INIT_VAL);
573
574#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
575 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
576 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
577#else
578 ASMSetDR0(pVCpu->cpum.s.Host.dr0);
579 ASMSetDR1(pVCpu->cpum.s.Host.dr1);
580 ASMSetDR2(pVCpu->cpum.s.Host.dr2);
581 ASMSetDR3(pVCpu->cpum.s.Host.dr3);
582#endif
583 /** @todo consider only updating if they differ, esp. DR6. Need to figure how
584 * expensive DRx reads are over DRx writes. */
585 ASMSetDR6(pVCpu->cpum.s.Host.dr6);
586 ASMSetDR7(pVCpu->cpum.s.Host.dr7);
587
588 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
589 }
590
591 return fDrXLoaded;
592}
593
594
595/**
596 * Saves the guest DRx state if it resides host registers.
597 *
598 * This does NOT clear any use flags, so the host registers remains loaded with
599 * the guest DRx state upon return. The purpose is only to make sure the values
600 * in the CPU context structure is up to date.
601 *
602 * @returns true if the host registers contains guest values, false if not.
603 * @param pVCpu The cross context CPU structure for the calling EMT.
604 * @param fDr6 Whether to include DR6 or not.
605 * @thread EMT(pVCpu)
606 */
607VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
608{
609 /*
610 * Do we need to save the guest DRx registered loaded into host registers?
611 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
612 */
613 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
614 {
615#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
616 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
617 {
618 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
619 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
620 if (!fDr6)
621 pVCpu->cpum.s.Guest.dr[6] = uDr6;
622 }
623 else
624#endif
625 {
626#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
627 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
628#else
629 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
630 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
631 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
632 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
633#endif
634 if (fDr6)
635 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
636 }
637 return true;
638 }
639 return false;
640}
641
642
643/**
644 * Lazily sync in the debug state.
645 *
646 * @param pVCpu The cross context CPU structure for the calling EMT.
647 * @param fDr6 Whether to include DR6 or not.
648 * @thread EMT(pVCpu)
649 */
650VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
651{
652 /*
653 * Save the host state and disarm all host BPs.
654 */
655 cpumR0SaveHostDebugState(pVCpu);
656 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
657
658 /*
659 * Activate the guest state DR0-3.
660 * DR7 and DR6 (if fDr6 is true) are left to the caller.
661 */
662#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
663 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
664 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
665 else
666#endif
667 {
668#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
669 cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);
670#else
671 ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]);
672 ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]);
673 ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]);
674 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
675#endif
676 if (fDr6)
677 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
678
679 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
680 }
681}
682
683
684/**
685 * Lazily sync in the hypervisor debug state
686 *
687 * @returns VBox status code.
688 * @param pVCpu The cross context CPU structure for the calling EMT.
689 * @param fDr6 Whether to include DR6 or not.
690 * @thread EMT(pVCpu)
691 */
692VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
693{
694 /*
695 * Save the host state and disarm all host BPs.
696 */
697 cpumR0SaveHostDebugState(pVCpu);
698 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
699
700 /*
701 * Make sure the hypervisor values are up to date.
702 */
703 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */, true);
704
705 /*
706 * Activate the guest state DR0-3.
707 * DR7 and DR6 (if fDr6 is true) are left to the caller.
708 */
709#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
710 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
711 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
712 else
713#endif
714 {
715#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
716 cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);
717#else
718 ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]);
719 ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]);
720 ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]);
721 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
722#endif
723 if (fDr6)
724 ASMSetDR6(X86_DR6_INIT_VAL);
725
726 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
727 }
728}
729
730#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
731
732/**
733 * Per-CPU callback that probes the CPU for APIC support.
734 *
735 * @param idCpu The identifier for the CPU the function is called on.
736 * @param pvUser1 Ignored.
737 * @param pvUser2 Ignored.
738 */
739static DECLCALLBACK(void) cpumR0MapLocalApicCpuProber(RTCPUID idCpu, void *pvUser1, void *pvUser2)
740{
741 NOREF(pvUser1); NOREF(pvUser2);
742 int iCpu = RTMpCpuIdToSetIndex(idCpu);
743 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
744
745 /*
746 * Check for APIC support.
747 */
748 uint32_t uMaxLeaf, u32EBX, u32ECX, u32EDX;
749 ASMCpuId(0, &uMaxLeaf, &u32EBX, &u32ECX, &u32EDX);
750 if ( ( ASMIsIntelCpuEx(u32EBX, u32ECX, u32EDX)
751 || ASMIsAmdCpuEx(u32EBX, u32ECX, u32EDX)
752 || ASMIsViaCentaurCpuEx(u32EBX, u32ECX, u32EDX))
753 && ASMIsValidStdRange(uMaxLeaf))
754 {
755 uint32_t uDummy;
756 ASMCpuId(1, &uDummy, &u32EBX, &u32ECX, &u32EDX);
757 if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
758 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
759 {
760 /*
761 * Safe to access the MSR. Read it and calc the BASE (a little complicated).
762 */
763 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
764 uint64_t u64Mask = MSR_IA32_APICBASE_BASE_MIN;
765
766 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
767 uint32_t uMaxExtLeaf;
768 ASMCpuId(0x80000000, &uMaxExtLeaf, &u32EBX, &u32ECX, &u32EDX);
769 if ( uMaxExtLeaf >= UINT32_C(0x80000008)
770 && ASMIsValidExtRange(uMaxExtLeaf))
771 {
772 uint32_t u32PhysBits;
773 ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
774 u32PhysBits &= 0xff;
775 u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
776 }
777
778 AssertCompile(sizeof(g_aLApics[iCpu].PhysBase) == sizeof(u64ApicBase));
779 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask;
780 g_aLApics[iCpu].fEnabled = RT_BOOL(u64ApicBase & MSR_IA32_APICBASE_EN);
781 g_aLApics[iCpu].fX2Apic = (u64ApicBase & (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN))
782 == (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN);
783 }
784 }
785}
786
787
788
789/**
790 * Per-CPU callback that verifies our APIC expectations.
791 *
792 * @param idCpu The identifier for the CPU the function is called on.
793 * @param pvUser1 Ignored.
794 * @param pvUser2 Ignored.
795 */
796static DECLCALLBACK(void) cpumR0MapLocalApicCpuChecker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
797{
798 NOREF(pvUser1); NOREF(pvUser2);
799
800 int iCpu = RTMpCpuIdToSetIndex(idCpu);
801 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
802 if (!g_aLApics[iCpu].fEnabled)
803 return;
804
805 /*
806 * 0x0X 82489 external APIC
807 * 0x1X Local APIC
808 * 0x2X..0xFF reserved
809 */
810 uint32_t uApicVersion;
811 if (g_aLApics[iCpu].fX2Apic)
812 uApicVersion = ApicX2RegRead32(APIC_REG_VERSION);
813 else
814 uApicVersion = ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_VERSION);
815 if ((APIC_REG_VERSION_GET_VER(uApicVersion) & 0xF0) == 0x10)
816 {
817 g_aLApics[iCpu].uVersion = uApicVersion;
818 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(uApicVersion) >= 5;
819
820#if 0 /* enable if you need it. */
821 if (g_aLApics[iCpu].fX2Apic)
822 SUPR0Printf("CPUM: X2APIC %02u - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
823 iCpu, uApicVersion,
824 ApicX2RegRead32(APIC_REG_LVT_LINT0), ApicX2RegRead32(APIC_REG_LVT_LINT1),
825 ApicX2RegRead32(APIC_REG_LVT_PC), ApicX2RegRead32(APIC_REG_LVT_THMR) );
826 else
827 SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
828 iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, uApicVersion,
829 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1),
830 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR) );
831#endif
832 }
833 else
834 {
835 g_aLApics[iCpu].fEnabled = false;
836 g_aLApics[iCpu].fX2Apic = false;
837 SUPR0Printf("VBox/CPUM: Unsupported APIC version %#x (iCpu=%d)\n", uApicVersion, iCpu);
838 }
839}
840
841
842/**
843 * Map the MMIO page of each local APIC in the system.
844 */
845static int cpumR0MapLocalApics(void)
846{
847 /*
848 * Check that we'll always stay within the array bounds.
849 */
850 if (RTMpGetArraySize() > RT_ELEMENTS(g_aLApics))
851 {
852 LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
853 return VERR_TOO_MANY_CPUS;
854 }
855
856 /*
857 * Create mappings for all online CPUs we think have legacy APICs.
858 */
859 int rc = RTMpOnAll(cpumR0MapLocalApicCpuProber, NULL, NULL);
860
861 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
862 {
863 if (g_aLApics[iCpu].fEnabled && !g_aLApics[iCpu].fX2Apic)
864 {
865 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
866 PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO);
867 if (RT_SUCCESS(rc))
868 {
869 rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void *)-1,
870 PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
871 if (RT_SUCCESS(rc))
872 {
873 g_aLApics[iCpu].pv = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
874 continue;
875 }
876 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
877 }
878 g_aLApics[iCpu].fEnabled = false;
879 }
880 g_aLApics[iCpu].pv = NULL;
881 }
882
883 /*
884 * Check the APICs.
885 */
886 if (RT_SUCCESS(rc))
887 rc = RTMpOnAll(cpumR0MapLocalApicCpuChecker, NULL, NULL);
888
889 if (RT_FAILURE(rc))
890 {
891 cpumR0UnmapLocalApics();
892 return rc;
893 }
894
895#ifdef LOG_ENABLED
896 /*
897 * Log the result (pretty useless, requires enabling CPUM in VBoxDrv
898 * and !VBOX_WITH_R0_LOGGING).
899 */
900 if (LogIsEnabled())
901 {
902 uint32_t cEnabled = 0;
903 uint32_t cX2Apics = 0;
904 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
905 if (g_aLApics[iCpu].fEnabled)
906 {
907 cEnabled++;
908 cX2Apics += g_aLApics[iCpu].fX2Apic;
909 }
910 Log(("CPUM: %u APICs, %u X2APICs\n", cEnabled, cX2Apics));
911 }
912#endif
913
914 return VINF_SUCCESS;
915}
916
917
918/**
919 * Unmap the Local APIC of all host CPUs.
920 */
921static void cpumR0UnmapLocalApics(void)
922{
923 for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
924 {
925 if (g_aLApics[iCpu].pv)
926 {
927 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
928 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
929 g_aLApics[iCpu].hMapObj = NIL_RTR0MEMOBJ;
930 g_aLApics[iCpu].hMemObj = NIL_RTR0MEMOBJ;
931 g_aLApics[iCpu].fEnabled = false;
932 g_aLApics[iCpu].fX2Apic = false;
933 g_aLApics[iCpu].pv = NULL;
934 }
935 }
936}
937
938
939/**
940 * Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch.
941 *
942 * Writes the Local APIC mapping address of the current host CPU to CPUMCPU so
943 * the world switchers can access the APIC registers for the purpose of
944 * disabling and re-enabling the NMIs. Must be called with disabled preemption
945 * or disabled interrupts!
946 *
947 * @param pVCpu Pointer to the cross context CPU structure of the
948 * calling EMT.
949 * @param idHostCpu The ID of the current host CPU.
950 */
951VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, RTCPUID idHostCpu)
952{
953 int idxCpu = RTMpCpuIdToSetIndex(idHostCpu);
954 pVCpu->cpum.s.pvApicBase = g_aLApics[idxCpu].pv;
955 pVCpu->cpum.s.fX2Apic = g_aLApics[idxCpu].fX2Apic;
956// Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic));
957}
958
959#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */
960
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette