1 | /* $Id: CPUMR0.cpp 47715 2013-08-14 00:11:35Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * CPUM - Host Context Ring 0.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2013 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 |
|
---|
19 | /*******************************************************************************
|
---|
20 | * Header Files *
|
---|
21 | *******************************************************************************/
|
---|
22 | #define LOG_GROUP LOG_GROUP_CPUM
|
---|
23 | #include <VBox/vmm/cpum.h>
|
---|
24 | #include "CPUMInternal.h"
|
---|
25 | #include <VBox/vmm/vm.h>
|
---|
26 | #include <VBox/err.h>
|
---|
27 | #include <VBox/log.h>
|
---|
28 | #include <VBox/vmm/hm.h>
|
---|
29 | #include <iprt/assert.h>
|
---|
30 | #include <iprt/asm-amd64-x86.h>
|
---|
31 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
32 | # include <iprt/mem.h>
|
---|
33 | # include <iprt/memobj.h>
|
---|
34 | # include <VBox/apic.h>
|
---|
35 | #endif
|
---|
36 | #include <iprt/x86.h>
|
---|
37 |
|
---|
38 |
|
---|
39 | /*******************************************************************************
|
---|
40 | * Structures and Typedefs *
|
---|
41 | *******************************************************************************/
|
---|
42 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
43 | /**
|
---|
44 | * Local APIC mappings.
|
---|
45 | */
|
---|
46 | typedef struct CPUMHOSTLAPIC
|
---|
47 | {
|
---|
48 | /** Indicates that the entry is in use and have valid data. */
|
---|
49 | bool fEnabled;
|
---|
50 | /** Has APIC_REG_LVT_THMR. Not used. */
|
---|
51 | uint32_t fHasThermal;
|
---|
52 | /** The physical address of the APIC registers. */
|
---|
53 | RTHCPHYS PhysBase;
|
---|
54 | /** The memory object entering the physical address. */
|
---|
55 | RTR0MEMOBJ hMemObj;
|
---|
56 | /** The mapping object for hMemObj. */
|
---|
57 | RTR0MEMOBJ hMapObj;
|
---|
58 | /** The mapping address APIC registers.
|
---|
59 | * @remarks Different CPUs may use the same physical address to map their
|
---|
60 | * APICs, so this pointer is only valid when on the CPU owning the
|
---|
61 | * APIC. */
|
---|
62 | void *pv;
|
---|
63 | } CPUMHOSTLAPIC;
|
---|
64 | #endif
|
---|
65 |
|
---|
66 |
|
---|
67 | /*******************************************************************************
|
---|
68 | * Global Variables *
|
---|
69 | *******************************************************************************/
|
---|
70 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
71 | static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS];
|
---|
72 | #endif
|
---|
73 |
|
---|
74 |
|
---|
75 | /*******************************************************************************
|
---|
76 | * Internal Functions *
|
---|
77 | *******************************************************************************/
|
---|
78 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
79 | static int cpumR0MapLocalApics(void);
|
---|
80 | static void cpumR0UnmapLocalApics(void);
|
---|
81 | #endif
|
---|
82 | static int cpumR0SaveHostDebugState(PVMCPU pVCpu);
|
---|
83 |
|
---|
84 |
|
---|
85 | /**
|
---|
86 | * Does the Ring-0 CPU initialization once during module load.
|
---|
87 | * XXX Host-CPU hot-plugging?
|
---|
88 | */
|
---|
89 | VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
|
---|
90 | {
|
---|
91 | int rc = VINF_SUCCESS;
|
---|
92 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
93 | rc = cpumR0MapLocalApics();
|
---|
94 | #endif
|
---|
95 | return rc;
|
---|
96 | }
|
---|
97 |
|
---|
98 |
|
---|
99 | /**
|
---|
100 | * Terminate the module.
|
---|
101 | */
|
---|
102 | VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
|
---|
103 | {
|
---|
104 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
105 | cpumR0UnmapLocalApics();
|
---|
106 | #endif
|
---|
107 | return VINF_SUCCESS;
|
---|
108 | }
|
---|
109 |
|
---|
110 |
|
---|
111 | /**
|
---|
112 | * Check the CPUID features of this particular CPU and disable relevant features
|
---|
113 | * for the guest which do not exist on this CPU. We have seen systems where the
|
---|
114 | * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
|
---|
115 | * @{bugref 5436}.
|
---|
116 | *
|
---|
117 | * @note This function might be called simultaneously on more than one CPU!
|
---|
118 | *
|
---|
119 | * @param idCpu The identifier for the CPU the function is called on.
|
---|
120 | * @param pvUser1 Pointer to the VM structure.
|
---|
121 | * @param pvUser2 Ignored.
|
---|
122 | */
|
---|
123 | static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
|
---|
124 | {
|
---|
125 | struct
|
---|
126 | {
|
---|
127 | uint32_t uLeave; /* leave to check */
|
---|
128 | uint32_t ecx; /* which bits in ecx to unify between CPUs */
|
---|
129 | uint32_t edx; /* which bits in edx to unify between CPUs */
|
---|
130 | } aCpuidUnify[]
|
---|
131 | =
|
---|
132 | {
|
---|
133 | { 0x00000001, X86_CPUID_FEATURE_ECX_CX16
|
---|
134 | | X86_CPUID_FEATURE_ECX_MONITOR,
|
---|
135 | X86_CPUID_FEATURE_EDX_CX8 }
|
---|
136 | };
|
---|
137 | PVM pVM = (PVM)pvUser1;
|
---|
138 | PCPUM pCPUM = &pVM->cpum.s;
|
---|
139 | for (uint32_t i = 0; i < RT_ELEMENTS(aCpuidUnify); i++)
|
---|
140 | {
|
---|
141 | uint32_t uLeave = aCpuidUnify[i].uLeave;
|
---|
142 | uint32_t eax, ebx, ecx, edx;
|
---|
143 |
|
---|
144 | ASMCpuId_Idx_ECX(uLeave, 0, &eax, &ebx, &ecx, &edx);
|
---|
145 | PCPUMCPUID paLeaves;
|
---|
146 | if (uLeave < 0x80000000)
|
---|
147 | paLeaves = &pCPUM->aGuestCpuIdStd[uLeave - 0x00000000];
|
---|
148 | else if (uLeave < 0xc0000000)
|
---|
149 | paLeaves = &pCPUM->aGuestCpuIdExt[uLeave - 0x80000000];
|
---|
150 | else
|
---|
151 | paLeaves = &pCPUM->aGuestCpuIdCentaur[uLeave - 0xc0000000];
|
---|
152 | /* unify important bits */
|
---|
153 | ASMAtomicAndU32(&paLeaves->ecx, ecx | ~aCpuidUnify[i].ecx);
|
---|
154 | ASMAtomicAndU32(&paLeaves->edx, edx | ~aCpuidUnify[i].edx);
|
---|
155 | }
|
---|
156 | }
|
---|
157 |
|
---|
158 |
|
---|
159 | /**
|
---|
160 | * Does Ring-0 CPUM initialization.
|
---|
161 | *
|
---|
162 | * This is mainly to check that the Host CPU mode is compatible
|
---|
163 | * with VBox.
|
---|
164 | *
|
---|
165 | * @returns VBox status code.
|
---|
166 | * @param pVM Pointer to the VM.
|
---|
167 | */
|
---|
168 | VMMR0_INT_DECL(int) CPUMR0Init(PVM pVM)
|
---|
169 | {
|
---|
170 | LogFlow(("CPUMR0Init: %p\n", pVM));
|
---|
171 |
|
---|
172 | /*
|
---|
173 | * Check CR0 & CR4 flags.
|
---|
174 | */
|
---|
175 | uint32_t u32CR0 = ASMGetCR0();
|
---|
176 | if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
|
---|
177 | {
|
---|
178 | Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
|
---|
179 | return VERR_UNSUPPORTED_CPU_MODE;
|
---|
180 | }
|
---|
181 |
|
---|
182 | /*
|
---|
183 | * Check for sysenter and syscall usage.
|
---|
184 | */
|
---|
185 | if (ASMHasCpuId())
|
---|
186 | {
|
---|
187 | /*
|
---|
188 | * SYSENTER/SYSEXIT
|
---|
189 | *
|
---|
190 | * Intel docs claim you should test both the flag and family, model &
|
---|
191 | * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
|
---|
192 | * but don't support it. AMD CPUs may support this feature in legacy
|
---|
193 | * mode, they've banned it from long mode. Since we switch to 32-bit
|
---|
194 | * mode when entering raw-mode context the feature would become
|
---|
195 | * accessible again on AMD CPUs, so we have to check regardless of
|
---|
196 | * host bitness.
|
---|
197 | */
|
---|
198 | uint32_t u32CpuVersion;
|
---|
199 | uint32_t u32Dummy;
|
---|
200 | uint32_t fFeatures;
|
---|
201 | ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
|
---|
202 | uint32_t u32Family = u32CpuVersion >> 8;
|
---|
203 | uint32_t u32Model = (u32CpuVersion >> 4) & 0xF;
|
---|
204 | uint32_t u32Stepping = u32CpuVersion & 0xF;
|
---|
205 | if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
|
---|
206 | && ( u32Family != 6 /* (> pentium pro) */
|
---|
207 | || u32Model >= 3
|
---|
208 | || u32Stepping >= 3
|
---|
209 | || !ASMIsIntelCpu())
|
---|
210 | )
|
---|
211 | {
|
---|
212 | /*
|
---|
213 | * Read the MSR and see if it's in use or not.
|
---|
214 | */
|
---|
215 | uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
|
---|
216 | if (u32)
|
---|
217 | {
|
---|
218 | pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
|
---|
219 | Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
|
---|
220 | }
|
---|
221 | }
|
---|
222 |
|
---|
223 | /*
|
---|
224 | * SYSCALL/SYSRET
|
---|
225 | *
|
---|
226 | * This feature is indicated by the SEP bit returned in EDX by CPUID
|
---|
227 | * function 0x80000001. Intel CPUs only supports this feature in
|
---|
228 | * long mode. Since we're not running 64-bit guests in raw-mode there
|
---|
229 | * are no issues with 32-bit intel hosts.
|
---|
230 | */
|
---|
231 | uint32_t cExt = 0;
|
---|
232 | ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
|
---|
233 | if ( cExt >= 0x80000001
|
---|
234 | && cExt <= 0x8000ffff)
|
---|
235 | {
|
---|
236 | uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
|
---|
237 | if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
|
---|
238 | {
|
---|
239 | #ifdef RT_ARCH_X86
|
---|
240 | # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
241 | if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
|
---|
242 | # else
|
---|
243 | if (!ASMIsIntelCpu())
|
---|
244 | # endif
|
---|
245 | #endif
|
---|
246 | {
|
---|
247 | uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
|
---|
248 | if (fEfer & MSR_K6_EFER_SCE)
|
---|
249 | {
|
---|
250 | pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
|
---|
251 | Log(("CPUMR0Init: host uses syscall\n"));
|
---|
252 | }
|
---|
253 | }
|
---|
254 | }
|
---|
255 | }
|
---|
256 |
|
---|
257 | RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
|
---|
258 | }
|
---|
259 |
|
---|
260 |
|
---|
261 | /*
|
---|
262 | * Check if debug registers are armed.
|
---|
263 | * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
|
---|
264 | */
|
---|
265 | uint32_t u32DR7 = ASMGetDR7();
|
---|
266 | if (u32DR7 & X86_DR7_ENABLED_MASK)
|
---|
267 | {
|
---|
268 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
269 | pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
|
---|
270 | Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
|
---|
271 | }
|
---|
272 |
|
---|
273 | return VINF_SUCCESS;
|
---|
274 | }
|
---|
275 |
|
---|
276 |
|
---|
277 | /**
|
---|
278 | * Lazily sync in the FPU/XMM state
|
---|
279 | *
|
---|
280 | * @returns VBox status code.
|
---|
281 | * @param pVM Pointer to the VM.
|
---|
282 | * @param pVCpu Pointer to the VMCPU.
|
---|
283 | * @param pCtx Pointer to the guest CPU context.
|
---|
284 | */
|
---|
285 | VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
286 | {
|
---|
287 | Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
|
---|
288 | Assert(ASMGetCR4() & X86_CR4_OSFSXR);
|
---|
289 |
|
---|
290 | /* If the FPU state has already been loaded, then it's a guest trap. */
|
---|
291 | if (CPUMIsGuestFPUStateActive(pVCpu))
|
---|
292 | {
|
---|
293 | Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM))
|
---|
294 | || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)));
|
---|
295 | return VINF_EM_RAW_GUEST_TRAP;
|
---|
296 | }
|
---|
297 |
|
---|
298 | /*
|
---|
299 | * There are two basic actions:
|
---|
300 | * 1. Save host fpu and restore guest fpu.
|
---|
301 | * 2. Generate guest trap.
|
---|
302 | *
|
---|
303 | * When entering the hypervisor we'll always enable MP (for proper wait
|
---|
304 | * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
|
---|
305 | * is taken from the guest OS in order to get proper SSE handling.
|
---|
306 | *
|
---|
307 | *
|
---|
308 | * Actions taken depending on the guest CR0 flags:
|
---|
309 | *
|
---|
310 | * 3 2 1
|
---|
311 | * TS | EM | MP | FPUInstr | WAIT :: VMM Action
|
---|
312 | * ------------------------------------------------------------------------
|
---|
313 | * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
|
---|
314 | * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
|
---|
315 | * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
|
---|
316 | * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
|
---|
317 | * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
|
---|
318 | * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
|
---|
319 | * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
|
---|
320 | * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
|
---|
321 | */
|
---|
322 |
|
---|
323 | switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
|
---|
324 | {
|
---|
325 | case X86_CR0_MP | X86_CR0_TS:
|
---|
326 | case X86_CR0_MP | X86_CR0_TS | X86_CR0_EM:
|
---|
327 | return VINF_EM_RAW_GUEST_TRAP;
|
---|
328 | default:
|
---|
329 | break;
|
---|
330 | }
|
---|
331 |
|
---|
332 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
333 | if (CPUMIsGuestInLongModeEx(pCtx))
|
---|
334 | {
|
---|
335 | Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
|
---|
336 |
|
---|
337 | /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
|
---|
338 | cpumR0SaveHostFPUState(&pVCpu->cpum.s);
|
---|
339 |
|
---|
340 | /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
|
---|
341 | pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
|
---|
342 | }
|
---|
343 | else
|
---|
344 | #endif
|
---|
345 | {
|
---|
346 | #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
|
---|
347 | # if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
|
---|
348 | Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
|
---|
349 | /** @todo Move the FFXR handling down into
|
---|
350 | * cpumR0SaveHostRestoreguestFPUState to optimize the
|
---|
351 | * VBOX_WITH_KERNEL_USING_XMM handling. */
|
---|
352 | /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
|
---|
353 | uint64_t SavedEFER = 0;
|
---|
354 | if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
|
---|
355 | {
|
---|
356 | SavedEFER = ASMRdMsr(MSR_K6_EFER);
|
---|
357 | if (SavedEFER & MSR_K6_EFER_FFXSR)
|
---|
358 | {
|
---|
359 | ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR);
|
---|
360 | pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
|
---|
361 | }
|
---|
362 | }
|
---|
363 |
|
---|
364 | /* Do the job and record that we've switched FPU state. */
|
---|
365 | cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
|
---|
366 |
|
---|
367 | /* Restore EFER. */
|
---|
368 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
|
---|
369 | ASMWrMsr(MSR_K6_EFER, SavedEFER);
|
---|
370 |
|
---|
371 | # else
|
---|
372 | uint64_t oldMsrEFERHost = 0;
|
---|
373 | uint32_t oldCR0 = ASMGetCR0();
|
---|
374 |
|
---|
375 | /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
|
---|
376 | if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
|
---|
377 | {
|
---|
378 | /** @todo Do we really need to read this every time?? The host could change this on the fly though.
|
---|
379 | * bird: what about starting by skipping the ASMWrMsr below if we didn't
|
---|
380 | * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
|
---|
381 | oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
|
---|
382 | if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
|
---|
383 | {
|
---|
384 | ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
|
---|
385 | pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
|
---|
386 | }
|
---|
387 | }
|
---|
388 |
|
---|
389 | /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
|
---|
390 | int rc = CPUMHandleLazyFPU(pVCpu);
|
---|
391 | AssertRC(rc);
|
---|
392 | Assert(CPUMIsGuestFPUStateActive(pVCpu));
|
---|
393 |
|
---|
394 | /* Restore EFER MSR */
|
---|
395 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
|
---|
396 | ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
|
---|
397 |
|
---|
398 | /* CPUMHandleLazyFPU could have changed CR0; restore it. */
|
---|
399 | ASMSetCR0(oldCR0);
|
---|
400 | # endif
|
---|
401 |
|
---|
402 | #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
|
---|
403 |
|
---|
404 | /*
|
---|
405 | * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
|
---|
406 | * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
|
---|
407 | */
|
---|
408 | pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();
|
---|
409 | if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
|
---|
410 | pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
|
---|
411 |
|
---|
412 | cpumR0LoadFPU(pCtx);
|
---|
413 |
|
---|
414 | /*
|
---|
415 | * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
|
---|
416 | *
|
---|
417 | * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
|
---|
418 | */
|
---|
419 | if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
|
---|
420 | {
|
---|
421 | /** @todo Do we really need to read this every time?? The host could change this on the fly though. */
|
---|
422 | uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);
|
---|
423 |
|
---|
424 | if (msrEFERHost & MSR_K6_EFER_FFXSR)
|
---|
425 | {
|
---|
426 | /* fxrstor doesn't restore the XMM state! */
|
---|
427 | cpumR0LoadXMM(pCtx);
|
---|
428 | pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
|
---|
429 | }
|
---|
430 | }
|
---|
431 |
|
---|
432 | #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
|
---|
433 | }
|
---|
434 |
|
---|
435 | Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
|
---|
436 | return VINF_SUCCESS;
|
---|
437 | }
|
---|
438 |
|
---|
439 |
|
---|
440 | /**
|
---|
441 | * Save guest FPU/XMM state
|
---|
442 | *
|
---|
443 | * @returns VBox status code.
|
---|
444 | * @param pVM Pointer to the VM.
|
---|
445 | * @param pVCpu Pointer to the VMCPU.
|
---|
446 | * @param pCtx Pointer to the guest CPU context.
|
---|
447 | */
|
---|
448 | VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
449 | {
|
---|
450 | Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
|
---|
451 | Assert(ASMGetCR4() & X86_CR4_OSFSXR);
|
---|
452 | AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
|
---|
453 | NOREF(pCtx);
|
---|
454 |
|
---|
455 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
456 | if (CPUMIsGuestInLongModeEx(pCtx))
|
---|
457 | {
|
---|
458 | if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
|
---|
459 | {
|
---|
460 | HMR0SaveFPUState(pVM, pVCpu, pCtx);
|
---|
461 | cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
|
---|
462 | }
|
---|
463 | /* else nothing to do; we didn't perform a world switch */
|
---|
464 | }
|
---|
465 | else
|
---|
466 | #endif
|
---|
467 | {
|
---|
468 | #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
|
---|
469 | # ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
470 | /*
|
---|
471 | * We've already saved the XMM registers in the assembly wrapper, so
|
---|
472 | * we have to save them before saving the entire FPU state and put them
|
---|
473 | * back afterwards.
|
---|
474 | */
|
---|
475 | /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
|
---|
476 | * I'm not able to test such an optimization tonight.
|
---|
477 | * We could just all this in assembly. */
|
---|
478 | uint128_t aGuestXmmRegs[16];
|
---|
479 | memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
|
---|
480 | # endif
|
---|
481 |
|
---|
482 | /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
|
---|
483 | uint64_t oldMsrEFERHost = 0;
|
---|
484 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
|
---|
485 | {
|
---|
486 | oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
|
---|
487 | ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
|
---|
488 | }
|
---|
489 | cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
|
---|
490 |
|
---|
491 | /* Restore EFER MSR */
|
---|
492 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
|
---|
493 | ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
|
---|
494 |
|
---|
495 | # ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
496 | memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
|
---|
497 | # endif
|
---|
498 |
|
---|
499 | #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
|
---|
500 | # ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
501 | # error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now."
|
---|
502 | # endif
|
---|
503 | cpumR0SaveFPU(pCtx);
|
---|
504 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
|
---|
505 | {
|
---|
506 | /* fxsave doesn't save the XMM state! */
|
---|
507 | cpumR0SaveXMM(pCtx);
|
---|
508 | }
|
---|
509 |
|
---|
510 | /*
|
---|
511 | * Restore the original FPU control word and MXCSR.
|
---|
512 | * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
|
---|
513 | */
|
---|
514 | cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);
|
---|
515 | if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
|
---|
516 | cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
|
---|
517 | #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
|
---|
518 | }
|
---|
519 |
|
---|
520 | pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
|
---|
521 | return VINF_SUCCESS;
|
---|
522 | }
|
---|
523 |
|
---|
524 |
|
---|
525 | /**
|
---|
526 | * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
|
---|
527 | * DR7 with safe values.
|
---|
528 | *
|
---|
529 | * @returns VBox status code.
|
---|
530 | * @param pVCpu Pointer to the VMCPU.
|
---|
531 | */
|
---|
532 | static int cpumR0SaveHostDebugState(PVMCPU pVCpu)
|
---|
533 | {
|
---|
534 | /*
|
---|
535 | * Save the host state.
|
---|
536 | */
|
---|
537 | #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
538 | AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
|
---|
539 | cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
|
---|
540 | #else
|
---|
541 | pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
|
---|
542 | pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
|
---|
543 | pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
|
---|
544 | pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
|
---|
545 | #endif
|
---|
546 | pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
|
---|
547 | /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
|
---|
548 | pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
|
---|
549 |
|
---|
550 | /* Preemption paranoia. */
|
---|
551 | ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
|
---|
552 |
|
---|
553 | /*
|
---|
554 | * Make sure DR7 is harmless or else we could trigger breakpoints when
|
---|
555 | * load guest or hypervisor DRx values later.
|
---|
556 | */
|
---|
557 | if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL)
|
---|
558 | ASMSetDR7(X86_DR7_INIT_VAL);
|
---|
559 |
|
---|
560 | return VINF_SUCCESS;
|
---|
561 | }
|
---|
562 |
|
---|
563 |
|
---|
564 | /**
|
---|
565 | * Saves the guest DRx state residing in host registers and restore the host
|
---|
566 | * register values.
|
---|
567 | *
|
---|
568 | * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
|
---|
569 | * since it's assumed that we're shadowing the guest DRx register values
|
---|
570 | * accurately when using the combined hypervisor debug register values
|
---|
571 | * (CPUMR0LoadHyperDebugState).
|
---|
572 | *
|
---|
573 | * @returns true if either guest or hypervisor debug registers were loaded.
|
---|
574 | * @param pVCpu The cross context CPU structure for the calling EMT.
|
---|
575 | * @param fDr6 Whether to include DR6 or not.
|
---|
576 | * @thread EMT(pVCpu)
|
---|
577 | */
|
---|
578 | VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
|
---|
579 | {
|
---|
580 | bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
|
---|
581 |
|
---|
582 | /*
|
---|
583 | * Do we need to save the guest DRx registered loaded into host registers?
|
---|
584 | * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
|
---|
585 | */
|
---|
586 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
|
---|
587 | {
|
---|
588 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
589 | if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
|
---|
590 | {
|
---|
591 | uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
|
---|
592 | HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
|
---|
593 | if (!fDr6)
|
---|
594 | pVCpu->cpum.s.Guest.dr[6] = uDr6;
|
---|
595 | }
|
---|
596 | else
|
---|
597 | #endif
|
---|
598 | {
|
---|
599 | #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
600 | cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
|
---|
601 | #else
|
---|
602 | pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
|
---|
603 | pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
|
---|
604 | pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
|
---|
605 | pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
|
---|
606 | #endif
|
---|
607 | if (fDr6)
|
---|
608 | pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
|
---|
609 | }
|
---|
610 | }
|
---|
611 | ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
|
---|
612 | | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
|
---|
613 |
|
---|
614 | /*
|
---|
615 | * Restore the host's debug state. DR0-3, DR6 and only then DR7!
|
---|
616 | */
|
---|
617 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
|
---|
618 | {
|
---|
619 | /* A bit of paranoia first... */
|
---|
620 | uint64_t uCurDR7 = ASMGetDR7();
|
---|
621 | if (uCurDR7 != X86_DR7_INIT_VAL)
|
---|
622 | ASMSetDR7(X86_DR7_INIT_VAL);
|
---|
623 |
|
---|
624 | #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
625 | AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
|
---|
626 | cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
|
---|
627 | #else
|
---|
628 | ASMSetDR0(pVCpu->cpum.s.Host.dr0);
|
---|
629 | ASMSetDR1(pVCpu->cpum.s.Host.dr1);
|
---|
630 | ASMSetDR2(pVCpu->cpum.s.Host.dr2);
|
---|
631 | ASMSetDR3(pVCpu->cpum.s.Host.dr3);
|
---|
632 | #endif
|
---|
633 | /** @todo consider only updating if they differ, esp. DR6. Need to figure how
|
---|
634 | * expensive DRx reads are over DRx writes. */
|
---|
635 | ASMSetDR6(pVCpu->cpum.s.Host.dr6);
|
---|
636 | ASMSetDR7(pVCpu->cpum.s.Host.dr7);
|
---|
637 |
|
---|
638 | ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
|
---|
639 | }
|
---|
640 |
|
---|
641 | return fDrXLoaded;
|
---|
642 | }
|
---|
643 |
|
---|
644 |
|
---|
645 | /**
|
---|
646 | * Saves the guest DRx state if it resides host registers.
|
---|
647 | *
|
---|
648 | * This does NOT clear any use flags, so the host registers remains loaded with
|
---|
649 | * the guest DRx state upon return. The purpose is only to make sure the values
|
---|
650 | * in the CPU context structure is up to date.
|
---|
651 | *
|
---|
652 | * @returns true if the host registers contains guest values, false if not.
|
---|
653 | * @param pVCpu The cross context CPU structure for the calling EMT.
|
---|
654 | * @param fDr6 Whether to include DR6 or not.
|
---|
655 | * @thread EMT(pVCpu)
|
---|
656 | */
|
---|
657 | VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
|
---|
658 | {
|
---|
659 | /*
|
---|
660 | * Do we need to save the guest DRx registered loaded into host registers?
|
---|
661 | * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
|
---|
662 | */
|
---|
663 | if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
|
---|
664 | {
|
---|
665 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
666 | if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
|
---|
667 | {
|
---|
668 | uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
|
---|
669 | HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
|
---|
670 | if (!fDr6)
|
---|
671 | pVCpu->cpum.s.Guest.dr[6] = uDr6;
|
---|
672 | }
|
---|
673 | else
|
---|
674 | #endif
|
---|
675 | {
|
---|
676 | #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
677 | cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
|
---|
678 | #else
|
---|
679 | pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
|
---|
680 | pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
|
---|
681 | pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
|
---|
682 | pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
|
---|
683 | #endif
|
---|
684 | if (fDr6)
|
---|
685 | pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
|
---|
686 | }
|
---|
687 | return true;
|
---|
688 | }
|
---|
689 | return false;
|
---|
690 | }
|
---|
691 |
|
---|
692 |
|
---|
693 | /**
|
---|
694 | * Lazily sync in the debug state.
|
---|
695 | *
|
---|
696 | * @param pVCpu The cross context CPU structure for the calling EMT.
|
---|
697 | * @param fDr6 Whether to include DR6 or not.
|
---|
698 | * @thread EMT(pVCpu)
|
---|
699 | */
|
---|
700 | VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
|
---|
701 | {
|
---|
702 | /*
|
---|
703 | * Save the host state and disarm all host BPs.
|
---|
704 | */
|
---|
705 | cpumR0SaveHostDebugState(pVCpu);
|
---|
706 | Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
|
---|
707 |
|
---|
708 | /*
|
---|
709 | * Activate the guest state DR0-3.
|
---|
710 | * DR7 and DR6 (if fDr6 is true) are left to the caller.
|
---|
711 | */
|
---|
712 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
713 | if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
|
---|
714 | ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
|
---|
715 | else
|
---|
716 | #endif
|
---|
717 | {
|
---|
718 | #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
719 | cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);
|
---|
720 | #else
|
---|
721 | ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]);
|
---|
722 | ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]);
|
---|
723 | ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]);
|
---|
724 | ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
|
---|
725 | #endif
|
---|
726 | if (fDr6)
|
---|
727 | ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
|
---|
728 |
|
---|
729 | ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
|
---|
730 | }
|
---|
731 | }
|
---|
732 |
|
---|
733 |
|
---|
734 | /**
|
---|
735 | * Lazily sync in the hypervisor debug state
|
---|
736 | *
|
---|
737 | * @returns VBox status code.
|
---|
738 | * @param pVCpu The cross context CPU structure for the calling EMT.
|
---|
739 | * @param fDr6 Whether to include DR6 or not.
|
---|
740 | * @thread EMT(pVCpu)
|
---|
741 | */
|
---|
742 | VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
|
---|
743 | {
|
---|
744 | /*
|
---|
745 | * Save the host state and disarm all host BPs.
|
---|
746 | */
|
---|
747 | cpumR0SaveHostDebugState(pVCpu);
|
---|
748 | Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
|
---|
749 |
|
---|
750 | /*
|
---|
751 | * Make sure the hypervisor values are up to date.
|
---|
752 | */
|
---|
753 | CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */, true);
|
---|
754 |
|
---|
755 | /*
|
---|
756 | * Activate the guest state DR0-3.
|
---|
757 | * DR7 and DR6 (if fDr6 is true) are left to the caller.
|
---|
758 | */
|
---|
759 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
760 | if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
|
---|
761 | ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
|
---|
762 | else
|
---|
763 | #endif
|
---|
764 | {
|
---|
765 | #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
766 | cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);
|
---|
767 | #else
|
---|
768 | ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]);
|
---|
769 | ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]);
|
---|
770 | ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]);
|
---|
771 | ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
|
---|
772 | #endif
|
---|
773 | if (fDr6)
|
---|
774 | ASMSetDR6(X86_DR6_INIT_VAL);
|
---|
775 |
|
---|
776 | ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
|
---|
777 | }
|
---|
778 | }
|
---|
779 |
|
---|
780 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
781 |
|
---|
782 | /**
|
---|
783 | * Worker for cpumR0MapLocalApics. Check each CPU for a present Local APIC.
|
---|
784 | * Play safe and treat each CPU separate.
|
---|
785 | *
|
---|
786 | * @param idCpu The identifier for the CPU the function is called on.
|
---|
787 | * @param pvUser1 Ignored.
|
---|
788 | * @param pvUser2 Ignored.
|
---|
789 | */
|
---|
790 | static DECLCALLBACK(void) cpumR0MapLocalApicWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
|
---|
791 | {
|
---|
792 | NOREF(pvUser1); NOREF(pvUser2);
|
---|
793 | int iCpu = RTMpCpuIdToSetIndex(idCpu);
|
---|
794 | AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
|
---|
795 |
|
---|
796 | uint32_t uMaxLeaf, u32EBX, u32ECX, u32EDX;
|
---|
797 | ASMCpuId(0, &uMaxLeaf, &u32EBX, &u32ECX, &u32EDX);
|
---|
798 | if ( ( ASMIsIntelCpuEx(u32EBX, u32ECX, u32EDX)
|
---|
799 | || ASMIsAmdCpuEx(u32EBX, u32ECX, u32EDX)
|
---|
800 | || ASMIsViaCentaurCpuEx(u32EBX, u32ECX, u32EDX))
|
---|
801 | && ASMIsValidStdRange(uMaxLeaf))
|
---|
802 | {
|
---|
803 | uint32_t uDummy;
|
---|
804 | ASMCpuId(1, &uDummy, &u32EBX, &u32ECX, &u32EDX);
|
---|
805 | if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
|
---|
806 | && (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
|
---|
807 | {
|
---|
808 | uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
|
---|
809 | uint64_t u64Mask = UINT64_C(0x0000000ffffff000);
|
---|
810 |
|
---|
811 | /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
|
---|
812 | uint32_t uMaxExtLeaf;
|
---|
813 | ASMCpuId(0x80000000, &uMaxExtLeaf, &u32EBX, &u32ECX, &u32EDX);
|
---|
814 | if ( uMaxExtLeaf >= UINT32_C(0x80000008)
|
---|
815 | && ASMIsValidExtRange(uMaxExtLeaf))
|
---|
816 | {
|
---|
817 | uint32_t u32PhysBits;
|
---|
818 | ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
|
---|
819 | u32PhysBits &= 0xff;
|
---|
820 | u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
|
---|
821 | }
|
---|
822 |
|
---|
823 | uint64_t const u64PhysBase = u64ApicBase & u64Mask;
|
---|
824 | g_aLApics[iCpu].PhysBase = (RTHCPHYS)u64PhysBase;
|
---|
825 | g_aLApics[iCpu].fEnabled = g_aLApics[iCpu].PhysBase == u64PhysBase;
|
---|
826 | }
|
---|
827 | }
|
---|
828 | }
|
---|
829 |
|
---|
830 |
|
---|
831 | /**
|
---|
832 | * Map the MMIO page of each local APIC in the system.
|
---|
833 | */
|
---|
834 | static int cpumR0MapLocalApics(void)
|
---|
835 | {
|
---|
836 | /*
|
---|
837 | * Check that we'll always stay within the array bounds.
|
---|
838 | */
|
---|
839 | if (RTMpGetArraySize() > RT_ELEMENTS(g_aLApics))
|
---|
840 | {
|
---|
841 | LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
|
---|
842 | return VERR_TOO_MANY_CPUS;
|
---|
843 | }
|
---|
844 |
|
---|
845 | /*
|
---|
846 | * Create mappings for all online CPUs we think have APICs.
|
---|
847 | */
|
---|
848 | /** @todo r=bird: This code is not adequately handling CPUs that are
|
---|
849 | * offline or unplugged at init time and later bought into action. */
|
---|
850 | int rc = RTMpOnAll(cpumR0MapLocalApicWorker, NULL, NULL);
|
---|
851 |
|
---|
852 | for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
|
---|
853 | {
|
---|
854 | if (g_aLApics[iCpu].fEnabled)
|
---|
855 | {
|
---|
856 | rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
|
---|
857 | PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO);
|
---|
858 | if (RT_SUCCESS(rc))
|
---|
859 | {
|
---|
860 | rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void *)-1,
|
---|
861 | PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
|
---|
862 | if (RT_SUCCESS(rc))
|
---|
863 | {
|
---|
864 | void *pvApicBase = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
|
---|
865 |
|
---|
866 | /*
|
---|
867 | * 0x0X 82489 external APIC
|
---|
868 | * 0x1X Local APIC
|
---|
869 | * 0x2X..0xFF reserved
|
---|
870 | */
|
---|
871 | /** @todo r=bird: The local APIC is usually at the same address for all CPUs,
|
---|
872 | * and therefore inaccessible by the other CPUs. */
|
---|
873 | uint32_t ApicVersion = ApicRegRead(pvApicBase, APIC_REG_VERSION);
|
---|
874 | if ((APIC_REG_VERSION_GET_VER(ApicVersion) & 0xF0) == 0x10)
|
---|
875 | {
|
---|
876 | g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(ApicVersion) >= 5;
|
---|
877 | g_aLApics[iCpu].pv = pvApicBase;
|
---|
878 | Log(("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#x, lint0=%#x lint1=%#x pc=%#x thmr=%#x\n",
|
---|
879 | iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, ApicVersion,
|
---|
880 | ApicRegRead(pvApicBase, APIC_REG_LVT_LINT0),
|
---|
881 | ApicRegRead(pvApicBase, APIC_REG_LVT_LINT1),
|
---|
882 | ApicRegRead(pvApicBase, APIC_REG_LVT_PC),
|
---|
883 | ApicRegRead(pvApicBase, APIC_REG_LVT_THMR)
|
---|
884 | ));
|
---|
885 | continue;
|
---|
886 | }
|
---|
887 |
|
---|
888 | RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
|
---|
889 | }
|
---|
890 | RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
|
---|
891 | }
|
---|
892 | g_aLApics[iCpu].fEnabled = false;
|
---|
893 | }
|
---|
894 | }
|
---|
895 | if (RT_FAILURE(rc))
|
---|
896 | {
|
---|
897 | cpumR0UnmapLocalApics();
|
---|
898 | return rc;
|
---|
899 | }
|
---|
900 |
|
---|
901 | return VINF_SUCCESS;
|
---|
902 | }
|
---|
903 |
|
---|
904 |
|
---|
905 | /**
|
---|
906 | * Unmap the Local APIC of all host CPUs.
|
---|
907 | */
|
---|
908 | static void cpumR0UnmapLocalApics(void)
|
---|
909 | {
|
---|
910 | for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
|
---|
911 | {
|
---|
912 | if (g_aLApics[iCpu].pv)
|
---|
913 | {
|
---|
914 | RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
|
---|
915 | RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
|
---|
916 | g_aLApics[iCpu].hMapObj = NIL_RTR0MEMOBJ;
|
---|
917 | g_aLApics[iCpu].hMemObj = NIL_RTR0MEMOBJ;
|
---|
918 | g_aLApics[iCpu].fEnabled = false;
|
---|
919 | g_aLApics[iCpu].pv = NULL;
|
---|
920 | }
|
---|
921 | }
|
---|
922 | }
|
---|
923 |
|
---|
924 |
|
---|
925 | /**
|
---|
926 | * Write the Local APIC mapping address of the current host CPU to CPUM to be
|
---|
927 | * able to access the APIC registers in the raw mode switcher for disabling/
|
---|
928 | * re-enabling the NMI. Must be called with disabled preemption or disabled
|
---|
929 | * interrupts!
|
---|
930 | *
|
---|
931 | * @param pVM Pointer to the VM.
|
---|
932 | * @param idHostCpu The ID of the current host CPU.
|
---|
933 | */
|
---|
934 | VMMR0_INT_DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu)
|
---|
935 | {
|
---|
936 | pVM->cpum.s.pvApicBase = g_aLApics[RTMpCpuIdToSetIndex(idHostCpu)].pv;
|
---|
937 | }
|
---|
938 |
|
---|
939 | #endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */
|
---|
940 |
|
---|