VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp@ 48998

Last change on this file since 48998 was 48683, checked in by vboxsync, 11 years ago

VMM/CPUMR0: const.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.9 KB
Line 
1/* $Id: CPUMR0.cpp 48683 2013-09-25 13:41:57Z vboxsync $ */
2/** @file
3 * CPUM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/err.h>
27#include <VBox/log.h>
28#include <VBox/vmm/hm.h>
29#include <iprt/assert.h>
30#include <iprt/asm-amd64-x86.h>
31#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
32# include <iprt/mem.h>
33# include <iprt/memobj.h>
34# include <VBox/apic.h>
35#endif
36#include <iprt/x86.h>
37
38
39/*******************************************************************************
40* Structures and Typedefs *
41*******************************************************************************/
42#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
43/**
44 * Local APIC mappings.
45 */
46typedef struct CPUMHOSTLAPIC
47{
48 /** Indicates that the entry is in use and have valid data. */
49 bool fEnabled;
50 /** Whether it's operating in X2APIC mode (EXTD). */
51 bool fX2Apic;
52 /** The APIC version number. */
53 uint32_t uVersion;
54 /** Has APIC_REG_LVT_THMR. Not used. */
55 uint32_t fHasThermal;
56 /** The physical address of the APIC registers. */
57 RTHCPHYS PhysBase;
58 /** The memory object entering the physical address. */
59 RTR0MEMOBJ hMemObj;
60 /** The mapping object for hMemObj. */
61 RTR0MEMOBJ hMapObj;
62 /** The mapping address APIC registers.
63 * @remarks Different CPUs may use the same physical address to map their
64 * APICs, so this pointer is only valid when on the CPU owning the
65 * APIC. */
66 void *pv;
67} CPUMHOSTLAPIC;
68#endif
69
70
71/*******************************************************************************
72* Global Variables *
73*******************************************************************************/
74#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
75static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS];
76#endif
77
78
79/*******************************************************************************
80* Internal Functions *
81*******************************************************************************/
82#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
83static int cpumR0MapLocalApics(void);
84static void cpumR0UnmapLocalApics(void);
85#endif
86static int cpumR0SaveHostDebugState(PVMCPU pVCpu);
87
88
89/**
90 * Does the Ring-0 CPU initialization once during module load.
91 * XXX Host-CPU hot-plugging?
92 */
93VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
94{
95 int rc = VINF_SUCCESS;
96#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
97 rc = cpumR0MapLocalApics();
98#endif
99 return rc;
100}
101
102
103/**
104 * Terminate the module.
105 */
106VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
107{
108#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
109 cpumR0UnmapLocalApics();
110#endif
111 return VINF_SUCCESS;
112}
113
114
115/**
116 * Check the CPUID features of this particular CPU and disable relevant features
117 * for the guest which do not exist on this CPU. We have seen systems where the
118 * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
119 * @{bugref 5436}.
120 *
121 * @note This function might be called simultaneously on more than one CPU!
122 *
123 * @param idCpu The identifier for the CPU the function is called on.
124 * @param pvUser1 Pointer to the VM structure.
125 * @param pvUser2 Ignored.
126 */
127static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
128{
129 struct
130 {
131 uint32_t uLeave; /* leave to check */
132 uint32_t ecx; /* which bits in ecx to unify between CPUs */
133 uint32_t edx; /* which bits in edx to unify between CPUs */
134 } aCpuidUnify[]
135 =
136 {
137 { 0x00000001, X86_CPUID_FEATURE_ECX_CX16
138 | X86_CPUID_FEATURE_ECX_MONITOR,
139 X86_CPUID_FEATURE_EDX_CX8 }
140 };
141 PVM pVM = (PVM)pvUser1;
142 PCPUM pCPUM = &pVM->cpum.s;
143 for (uint32_t i = 0; i < RT_ELEMENTS(aCpuidUnify); i++)
144 {
145 uint32_t uLeave = aCpuidUnify[i].uLeave;
146 uint32_t eax, ebx, ecx, edx;
147
148 ASMCpuId_Idx_ECX(uLeave, 0, &eax, &ebx, &ecx, &edx);
149 PCPUMCPUID paLeaves;
150 if (uLeave < 0x80000000)
151 paLeaves = &pCPUM->aGuestCpuIdStd[uLeave - 0x00000000];
152 else if (uLeave < 0xc0000000)
153 paLeaves = &pCPUM->aGuestCpuIdExt[uLeave - 0x80000000];
154 else
155 paLeaves = &pCPUM->aGuestCpuIdCentaur[uLeave - 0xc0000000];
156 /* unify important bits */
157 ASMAtomicAndU32(&paLeaves->ecx, ecx | ~aCpuidUnify[i].ecx);
158 ASMAtomicAndU32(&paLeaves->edx, edx | ~aCpuidUnify[i].edx);
159 }
160}
161
162
163/**
164 * Does Ring-0 CPUM initialization.
165 *
166 * This is mainly to check that the Host CPU mode is compatible
167 * with VBox.
168 *
169 * @returns VBox status code.
170 * @param pVM Pointer to the VM.
171 */
172VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM)
173{
174 LogFlow(("CPUMR0Init: %p\n", pVM));
175
176 /*
177 * Check CR0 & CR4 flags.
178 */
179 uint32_t u32CR0 = ASMGetCR0();
180 if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
181 {
182 Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
183 return VERR_UNSUPPORTED_CPU_MODE;
184 }
185
186 /*
187 * Check for sysenter and syscall usage.
188 */
189 if (ASMHasCpuId())
190 {
191 /*
192 * SYSENTER/SYSEXIT
193 *
194 * Intel docs claim you should test both the flag and family, model &
195 * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
196 * but don't support it. AMD CPUs may support this feature in legacy
197 * mode, they've banned it from long mode. Since we switch to 32-bit
198 * mode when entering raw-mode context the feature would become
199 * accessible again on AMD CPUs, so we have to check regardless of
200 * host bitness.
201 */
202 uint32_t u32CpuVersion;
203 uint32_t u32Dummy;
204 uint32_t fFeatures;
205 ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
206 uint32_t const u32Family = u32CpuVersion >> 8;
207 uint32_t const u32Model = (u32CpuVersion >> 4) & 0xF;
208 uint32_t const u32Stepping = u32CpuVersion & 0xF;
209 if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
210 && ( u32Family != 6 /* (> pentium pro) */
211 || u32Model >= 3
212 || u32Stepping >= 3
213 || !ASMIsIntelCpu())
214 )
215 {
216 /*
217 * Read the MSR and see if it's in use or not.
218 */
219 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
220 if (u32)
221 {
222 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
223 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
224 }
225 }
226
227 /*
228 * SYSCALL/SYSRET
229 *
230 * This feature is indicated by the SEP bit returned in EDX by CPUID
231 * function 0x80000001. Intel CPUs only supports this feature in
232 * long mode. Since we're not running 64-bit guests in raw-mode there
233 * are no issues with 32-bit intel hosts.
234 */
235 uint32_t cExt = 0;
236 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
237 if ( cExt >= 0x80000001
238 && cExt <= 0x8000ffff)
239 {
240 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
241 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
242 {
243#ifdef RT_ARCH_X86
244# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
245 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
246# else
247 if (!ASMIsIntelCpu())
248# endif
249#endif
250 {
251 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
252 if (fEfer & MSR_K6_EFER_SCE)
253 {
254 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
255 Log(("CPUMR0Init: host uses syscall\n"));
256 }
257 }
258 }
259 }
260
261 RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
262 }
263
264
265 /*
266 * Check if debug registers are armed.
267 * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
268 */
269 uint32_t u32DR7 = ASMGetDR7();
270 if (u32DR7 & X86_DR7_ENABLED_MASK)
271 {
272 for (VMCPUID i = 0; i < pVM->cCpus; i++)
273 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
274 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
275 }
276
277 return VINF_SUCCESS;
278}
279
280
281/**
282 * Lazily sync the guest-FPU/XMM state if possible.
283 *
284 * Loads the guest-FPU state, if it isn't already loaded, into the CPU if the
285 * guest is not expecting a #NM trap.
286 *
287 * @returns VBox status code.
288 * @retval VINF_SUCCESS if the guest FPU state is loaded.
289 * @retval VINF_EM_RAW_GUEST_TRAP if it is a guest trap.
290 *
291 * @remarks This relies on CPUMIsGuestFPUStateActive() reflecting reality.
292 */
293VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
294{
295 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
296 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
297
298 /* If the FPU state has already been loaded, then it's a guest trap. */
299 if (CPUMIsGuestFPUStateActive(pVCpu))
300 {
301 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM))
302 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)));
303 return VINF_EM_RAW_GUEST_TRAP;
304 }
305
306 /*
307 * There are two basic actions:
308 * 1. Save host fpu and restore guest fpu.
309 * 2. Generate guest trap.
310 *
311 * When entering the hypervisor we'll always enable MP (for proper wait
312 * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
313 * is taken from the guest OS in order to get proper SSE handling.
314 *
315 *
316 * Actions taken depending on the guest CR0 flags:
317 *
318 * 3 2 1
319 * TS | EM | MP | FPUInstr | WAIT :: VMM Action
320 * ------------------------------------------------------------------------
321 * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
322 * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
323 * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
324 * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
325 * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
326 * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
327 * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
328 * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
329 */
330
331 switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
332 {
333 case X86_CR0_MP | X86_CR0_TS:
334 case X86_CR0_MP | X86_CR0_TS | X86_CR0_EM:
335 return VINF_EM_RAW_GUEST_TRAP;
336 default:
337 break;
338 }
339
340#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
341 if (CPUMIsGuestInLongModeEx(pCtx))
342 {
343 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
344
345 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
346 cpumR0SaveHostFPUState(&pVCpu->cpum.s);
347
348 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
349 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
350 }
351 else
352#endif
353 {
354#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
355# if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
356 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
357 /** @todo Move the FFXR handling down into
358 * cpumR0SaveHostRestoreguestFPUState to optimize the
359 * VBOX_WITH_KERNEL_USING_XMM handling. */
360 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
361 uint64_t SavedEFER = 0;
362 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
363 {
364 SavedEFER = ASMRdMsr(MSR_K6_EFER);
365 if (SavedEFER & MSR_K6_EFER_FFXSR)
366 {
367 ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR);
368 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
369 }
370 }
371
372 /* Do the job and record that we've switched FPU state. */
373 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
374
375 /* Restore EFER. */
376 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
377 ASMWrMsr(MSR_K6_EFER, SavedEFER);
378
379# else
380 uint64_t oldMsrEFERHost = 0;
381 uint32_t oldCR0 = ASMGetCR0();
382
383 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
384 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
385 {
386 /** @todo Do we really need to read this every time?? The host could change this on the fly though.
387 * bird: what about starting by skipping the ASMWrMsr below if we didn't
388 * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
389 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
390 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
391 {
392 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
393 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
394 }
395 }
396
397 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
398 int rc = CPUMHandleLazyFPU(pVCpu);
399 AssertRC(rc);
400 Assert(CPUMIsGuestFPUStateActive(pVCpu));
401
402 /* Restore EFER MSR */
403 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
404 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
405
406 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
407 ASMSetCR0(oldCR0);
408# endif
409
410#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
411
412 /*
413 * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
414 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
415 */
416 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();
417 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
418 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
419
420 cpumR0LoadFPU(pCtx);
421
422 /*
423 * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
424 *
425 * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
426 */
427 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
428 {
429 /** @todo Do we really need to read this every time?? The host could change this on the fly though. */
430 uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);
431
432 if (msrEFERHost & MSR_K6_EFER_FFXSR)
433 {
434 /* fxrstor doesn't restore the XMM state! */
435 cpumR0LoadXMM(pCtx);
436 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
437 }
438 }
439
440#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
441 }
442
443 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
444 return VINF_SUCCESS;
445}
446
447
448/**
449 * Save guest FPU/XMM state
450 *
451 * @returns VBox status code.
452 * @param pVM Pointer to the VM.
453 * @param pVCpu Pointer to the VMCPU.
454 * @param pCtx Pointer to the guest CPU context.
455 */
456VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
457{
458 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
459 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
460 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
461 NOREF(pCtx);
462
463#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
464 if (CPUMIsGuestInLongModeEx(pCtx))
465 {
466 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
467 {
468 HMR0SaveFPUState(pVM, pVCpu, pCtx);
469 cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
470 }
471 /* else nothing to do; we didn't perform a world switch */
472 }
473 else
474#endif
475 {
476#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
477# ifdef VBOX_WITH_KERNEL_USING_XMM
478 /*
479 * We've already saved the XMM registers in the assembly wrapper, so
480 * we have to save them before saving the entire FPU state and put them
481 * back afterwards.
482 */
483 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
484 * I'm not able to test such an optimization tonight.
485 * We could just all this in assembly. */
486 uint128_t aGuestXmmRegs[16];
487 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
488# endif
489
490 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
491 uint64_t oldMsrEFERHost = 0;
492 bool fRestoreEfer = false;
493 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
494 {
495 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
496 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
497 {
498 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
499 fRestoreEfer = true;
500 }
501 }
502 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
503
504 /* Restore EFER MSR */
505 if (fRestoreEfer)
506 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
507
508# ifdef VBOX_WITH_KERNEL_USING_XMM
509 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
510# endif
511
512#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
513# ifdef VBOX_WITH_KERNEL_USING_XMM
514# error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now."
515# endif
516 cpumR0SaveFPU(pCtx);
517 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
518 {
519 /* fxsave doesn't save the XMM state! */
520 cpumR0SaveXMM(pCtx);
521 }
522
523 /*
524 * Restore the original FPU control word and MXCSR.
525 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
526 */
527 cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);
528 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
529 cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
530#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
531 }
532
533 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
534 return VINF_SUCCESS;
535}
536
537
538/**
539 * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
540 * DR7 with safe values.
541 *
542 * @returns VBox status code.
543 * @param pVCpu Pointer to the VMCPU.
544 */
545static int cpumR0SaveHostDebugState(PVMCPU pVCpu)
546{
547 /*
548 * Save the host state.
549 */
550#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
551 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
552 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
553#else
554 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
555 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
556 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
557 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
558#endif
559 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
560 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
561 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
562
563 /* Preemption paranoia. */
564 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
565
566 /*
567 * Make sure DR7 is harmless or else we could trigger breakpoints when
568 * load guest or hypervisor DRx values later.
569 */
570 if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL)
571 ASMSetDR7(X86_DR7_INIT_VAL);
572
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Saves the guest DRx state residing in host registers and restore the host
579 * register values.
580 *
581 * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
582 * since it's assumed that we're shadowing the guest DRx register values
583 * accurately when using the combined hypervisor debug register values
584 * (CPUMR0LoadHyperDebugState).
585 *
586 * @returns true if either guest or hypervisor debug registers were loaded.
587 * @param pVCpu The cross context CPU structure for the calling EMT.
588 * @param fDr6 Whether to include DR6 or not.
589 * @thread EMT(pVCpu)
590 */
591VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
592{
593 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
594
595 /*
596 * Do we need to save the guest DRx registered loaded into host registers?
597 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
598 */
599 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
600 {
601#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
602 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
603 {
604 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
605 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
606 if (!fDr6)
607 pVCpu->cpum.s.Guest.dr[6] = uDr6;
608 }
609 else
610#endif
611 {
612#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
613 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
614#else
615 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
616 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
617 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
618 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
619#endif
620 if (fDr6)
621 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
622 }
623 }
624 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
625 | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
626
627 /*
628 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
629 */
630 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
631 {
632 /* A bit of paranoia first... */
633 uint64_t uCurDR7 = ASMGetDR7();
634 if (uCurDR7 != X86_DR7_INIT_VAL)
635 ASMSetDR7(X86_DR7_INIT_VAL);
636
637#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
638 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
639 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
640#else
641 ASMSetDR0(pVCpu->cpum.s.Host.dr0);
642 ASMSetDR1(pVCpu->cpum.s.Host.dr1);
643 ASMSetDR2(pVCpu->cpum.s.Host.dr2);
644 ASMSetDR3(pVCpu->cpum.s.Host.dr3);
645#endif
646 /** @todo consider only updating if they differ, esp. DR6. Need to figure how
647 * expensive DRx reads are over DRx writes. */
648 ASMSetDR6(pVCpu->cpum.s.Host.dr6);
649 ASMSetDR7(pVCpu->cpum.s.Host.dr7);
650
651 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
652 }
653
654 return fDrXLoaded;
655}
656
657
658/**
659 * Saves the guest DRx state if it resides host registers.
660 *
661 * This does NOT clear any use flags, so the host registers remains loaded with
662 * the guest DRx state upon return. The purpose is only to make sure the values
663 * in the CPU context structure is up to date.
664 *
665 * @returns true if the host registers contains guest values, false if not.
666 * @param pVCpu The cross context CPU structure for the calling EMT.
667 * @param fDr6 Whether to include DR6 or not.
668 * @thread EMT(pVCpu)
669 */
670VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
671{
672 /*
673 * Do we need to save the guest DRx registered loaded into host registers?
674 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
675 */
676 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
677 {
678#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
679 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
680 {
681 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
682 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
683 if (!fDr6)
684 pVCpu->cpum.s.Guest.dr[6] = uDr6;
685 }
686 else
687#endif
688 {
689#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
690 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
691#else
692 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
693 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
694 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
695 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
696#endif
697 if (fDr6)
698 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
699 }
700 return true;
701 }
702 return false;
703}
704
705
706/**
707 * Lazily sync in the debug state.
708 *
709 * @param pVCpu The cross context CPU structure for the calling EMT.
710 * @param fDr6 Whether to include DR6 or not.
711 * @thread EMT(pVCpu)
712 */
713VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
714{
715 /*
716 * Save the host state and disarm all host BPs.
717 */
718 cpumR0SaveHostDebugState(pVCpu);
719 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
720
721 /*
722 * Activate the guest state DR0-3.
723 * DR7 and DR6 (if fDr6 is true) are left to the caller.
724 */
725#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
726 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
727 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
728 else
729#endif
730 {
731#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
732 cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);
733#else
734 ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]);
735 ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]);
736 ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]);
737 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
738#endif
739 if (fDr6)
740 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
741
742 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
743 }
744}
745
746
747/**
748 * Lazily sync in the hypervisor debug state
749 *
750 * @returns VBox status code.
751 * @param pVCpu The cross context CPU structure for the calling EMT.
752 * @param fDr6 Whether to include DR6 or not.
753 * @thread EMT(pVCpu)
754 */
755VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
756{
757 /*
758 * Save the host state and disarm all host BPs.
759 */
760 cpumR0SaveHostDebugState(pVCpu);
761 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
762
763 /*
764 * Make sure the hypervisor values are up to date.
765 */
766 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */, true);
767
768 /*
769 * Activate the guest state DR0-3.
770 * DR7 and DR6 (if fDr6 is true) are left to the caller.
771 */
772#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
773 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
774 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
775 else
776#endif
777 {
778#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
779 cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);
780#else
781 ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]);
782 ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]);
783 ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]);
784 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
785#endif
786 if (fDr6)
787 ASMSetDR6(X86_DR6_INIT_VAL);
788
789 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
790 }
791}
792
793#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
794
795/**
796 * Per-CPU callback that probes the CPU for APIC support.
797 *
798 * @param idCpu The identifier for the CPU the function is called on.
799 * @param pvUser1 Ignored.
800 * @param pvUser2 Ignored.
801 */
802static DECLCALLBACK(void) cpumR0MapLocalApicCpuProber(RTCPUID idCpu, void *pvUser1, void *pvUser2)
803{
804 NOREF(pvUser1); NOREF(pvUser2);
805 int iCpu = RTMpCpuIdToSetIndex(idCpu);
806 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
807
808 /*
809 * Check for APIC support.
810 */
811 uint32_t uMaxLeaf, u32EBX, u32ECX, u32EDX;
812 ASMCpuId(0, &uMaxLeaf, &u32EBX, &u32ECX, &u32EDX);
813 if ( ( ASMIsIntelCpuEx(u32EBX, u32ECX, u32EDX)
814 || ASMIsAmdCpuEx(u32EBX, u32ECX, u32EDX)
815 || ASMIsViaCentaurCpuEx(u32EBX, u32ECX, u32EDX))
816 && ASMIsValidStdRange(uMaxLeaf))
817 {
818 uint32_t uDummy;
819 ASMCpuId(1, &uDummy, &u32EBX, &u32ECX, &u32EDX);
820 if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
821 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
822 {
823 /*
824 * Safe to access the MSR. Read it and calc the BASE (a little complicated).
825 */
826 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
827 uint64_t u64Mask = MSR_IA32_APICBASE_BASE_MIN;
828
829 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
830 uint32_t uMaxExtLeaf;
831 ASMCpuId(0x80000000, &uMaxExtLeaf, &u32EBX, &u32ECX, &u32EDX);
832 if ( uMaxExtLeaf >= UINT32_C(0x80000008)
833 && ASMIsValidExtRange(uMaxExtLeaf))
834 {
835 uint32_t u32PhysBits;
836 ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
837 u32PhysBits &= 0xff;
838 u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
839 }
840
841 AssertCompile(sizeof(g_aLApics[iCpu].PhysBase) == sizeof(u64ApicBase));
842 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask;
843 g_aLApics[iCpu].fEnabled = RT_BOOL(u64ApicBase & MSR_IA32_APICBASE_EN);
844 g_aLApics[iCpu].fX2Apic = (u64ApicBase & (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN))
845 == (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN);
846 }
847 }
848}
849
850
851
852/**
853 * Per-CPU callback that verifies our APIC expectations.
854 *
855 * @param idCpu The identifier for the CPU the function is called on.
856 * @param pvUser1 Ignored.
857 * @param pvUser2 Ignored.
858 */
859static DECLCALLBACK(void) cpumR0MapLocalApicCpuChecker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
860{
861 int iCpu = RTMpCpuIdToSetIndex(idCpu);
862 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
863 if (!g_aLApics[iCpu].fEnabled)
864 return;
865
866 /*
867 * 0x0X 82489 external APIC
868 * 0x1X Local APIC
869 * 0x2X..0xFF reserved
870 */
871 uint32_t uApicVersion;
872 if (g_aLApics[iCpu].fX2Apic)
873 uApicVersion = ApicX2RegRead32(APIC_REG_VERSION);
874 else
875 uApicVersion = ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_VERSION);
876 if ((APIC_REG_VERSION_GET_VER(uApicVersion) & 0xF0) == 0x10)
877 {
878 g_aLApics[iCpu].uVersion = uApicVersion;
879 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(uApicVersion) >= 5;
880
881#if 0 /* enable if you need it. */
882 if (g_aLApics[iCpu].fX2Apic)
883 SUPR0Printf("CPUM: X2APIC %02u - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
884 iCpu, uApicVersion,
885 ApicX2RegRead32(APIC_REG_LVT_LINT0), ApicX2RegRead32(APIC_REG_LVT_LINT1),
886 ApicX2RegRead32(APIC_REG_LVT_PC), ApicX2RegRead32(APIC_REG_LVT_THMR) );
887 else
888 SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
889 iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, uApicVersion,
890 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1),
891 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR) );
892#endif
893 }
894 else
895 {
896 g_aLApics[iCpu].fEnabled = false;
897 g_aLApics[iCpu].fX2Apic = false;
898 SUPR0Printf("VBox/CPUM: Unsupported APIC version %#x (iCpu=%d)\n", uApicVersion, iCpu);
899 }
900}
901
902
903/**
904 * Map the MMIO page of each local APIC in the system.
905 */
906static int cpumR0MapLocalApics(void)
907{
908 /*
909 * Check that we'll always stay within the array bounds.
910 */
911 if (RTMpGetArraySize() > RT_ELEMENTS(g_aLApics))
912 {
913 LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
914 return VERR_TOO_MANY_CPUS;
915 }
916
917 /*
918 * Create mappings for all online CPUs we think have legacy APICs.
919 */
920 int rc = RTMpOnAll(cpumR0MapLocalApicCpuProber, NULL, NULL);
921
922 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
923 {
924 if (g_aLApics[iCpu].fEnabled && !g_aLApics[iCpu].fX2Apic)
925 {
926 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
927 PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO);
928 if (RT_SUCCESS(rc))
929 {
930 rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void *)-1,
931 PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
932 if (RT_SUCCESS(rc))
933 {
934 g_aLApics[iCpu].pv = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
935 continue;
936 }
937 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
938 }
939 g_aLApics[iCpu].fEnabled = false;
940 }
941 g_aLApics[iCpu].pv = NULL;
942 }
943
944 /*
945 * Check the APICs.
946 */
947 if (RT_SUCCESS(rc))
948 rc = RTMpOnAll(cpumR0MapLocalApicCpuChecker, NULL, NULL);
949
950 if (RT_FAILURE(rc))
951 {
952 cpumR0UnmapLocalApics();
953 return rc;
954 }
955
956#ifdef LOG_ENABLED
957 /*
958 * Log the result (pretty useless, requires enabling CPUM in VBoxDrv
959 * and !VBOX_WITH_R0_LOGGING).
960 */
961 if (LogIsEnabled())
962 {
963 uint32_t cEnabled = 0;
964 uint32_t cX2Apics = 0;
965 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
966 if (g_aLApics[iCpu].fEnabled)
967 {
968 cEnabled++;
969 cX2Apics += g_aLApics[iCpu].fX2Apic;
970 }
971 Log(("CPUM: %u APICs, %u X2APICs\n", cEnabled, cX2Apics));
972 }
973#endif
974
975 return VINF_SUCCESS;
976}
977
978
979/**
980 * Unmap the Local APIC of all host CPUs.
981 */
982static void cpumR0UnmapLocalApics(void)
983{
984 for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
985 {
986 if (g_aLApics[iCpu].pv)
987 {
988 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
989 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
990 g_aLApics[iCpu].hMapObj = NIL_RTR0MEMOBJ;
991 g_aLApics[iCpu].hMemObj = NIL_RTR0MEMOBJ;
992 g_aLApics[iCpu].fEnabled = false;
993 g_aLApics[iCpu].fX2Apic = false;
994 g_aLApics[iCpu].pv = NULL;
995 }
996 }
997}
998
999
1000/**
1001 * Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch.
1002 *
1003 * Writes the Local APIC mapping address of the current host CPU to CPUMCPU so
1004 * the world switchers can access the APIC registers for the purpose of
1005 * disabling and re-enabling the NMIs. Must be called with disabled preemption
1006 * or disabled interrupts!
1007 *
1008 * @param pVCpu Pointer to the cross context CPU structure of the
1009 * calling EMT.
1010 * @param idHostCpu The ID of the current host CPU.
1011 */
1012VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, RTCPUID idHostCpu)
1013{
1014 int idxCpu = RTMpCpuIdToSetIndex(idHostCpu);
1015 pVCpu->cpum.s.pvApicBase = g_aLApics[idxCpu].pv;
1016 pVCpu->cpum.s.fX2Apic = g_aLApics[idxCpu].fX2Apic;
1017// Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic));
1018}
1019
1020#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */
1021
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette