VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp@ 48426

Last change on this file since 48426 was 48003, checked in by vboxsync, 11 years ago

VMM: CPUMR0Init -> CPUMR0InitVM renamed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.6 KB
Line 
1/* $Id: CPUMR0.cpp 48003 2013-08-22 17:45:07Z vboxsync $ */
2/** @file
3 * CPUM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include "CPUMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/err.h>
27#include <VBox/log.h>
28#include <VBox/vmm/hm.h>
29#include <iprt/assert.h>
30#include <iprt/asm-amd64-x86.h>
31#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
32# include <iprt/mem.h>
33# include <iprt/memobj.h>
34# include <VBox/apic.h>
35#endif
36#include <iprt/x86.h>
37
38
39/*******************************************************************************
40* Structures and Typedefs *
41*******************************************************************************/
42#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
43/**
44 * Local APIC mappings.
45 */
46typedef struct CPUMHOSTLAPIC
47{
48 /** Indicates that the entry is in use and have valid data. */
49 bool fEnabled;
50 /** Whether it's operating in X2APIC mode (EXTD). */
51 bool fX2Apic;
52 /** The APIC version number. */
53 uint32_t uVersion;
54 /** Has APIC_REG_LVT_THMR. Not used. */
55 uint32_t fHasThermal;
56 /** The physical address of the APIC registers. */
57 RTHCPHYS PhysBase;
58 /** The memory object entering the physical address. */
59 RTR0MEMOBJ hMemObj;
60 /** The mapping object for hMemObj. */
61 RTR0MEMOBJ hMapObj;
62 /** The mapping address APIC registers.
63 * @remarks Different CPUs may use the same physical address to map their
64 * APICs, so this pointer is only valid when on the CPU owning the
65 * APIC. */
66 void *pv;
67} CPUMHOSTLAPIC;
68#endif
69
70
71/*******************************************************************************
72* Global Variables *
73*******************************************************************************/
74#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
75static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS];
76#endif
77
78
79/*******************************************************************************
80* Internal Functions *
81*******************************************************************************/
82#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
83static int cpumR0MapLocalApics(void);
84static void cpumR0UnmapLocalApics(void);
85#endif
86static int cpumR0SaveHostDebugState(PVMCPU pVCpu);
87
88
89/**
90 * Does the Ring-0 CPU initialization once during module load.
91 * XXX Host-CPU hot-plugging?
92 */
93VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
94{
95 int rc = VINF_SUCCESS;
96#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
97 rc = cpumR0MapLocalApics();
98#endif
99 return rc;
100}
101
102
103/**
104 * Terminate the module.
105 */
106VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
107{
108#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
109 cpumR0UnmapLocalApics();
110#endif
111 return VINF_SUCCESS;
112}
113
114
115/**
116 * Check the CPUID features of this particular CPU and disable relevant features
117 * for the guest which do not exist on this CPU. We have seen systems where the
118 * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
119 * @{bugref 5436}.
120 *
121 * @note This function might be called simultaneously on more than one CPU!
122 *
123 * @param idCpu The identifier for the CPU the function is called on.
124 * @param pvUser1 Pointer to the VM structure.
125 * @param pvUser2 Ignored.
126 */
127static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
128{
129 struct
130 {
131 uint32_t uLeave; /* leave to check */
132 uint32_t ecx; /* which bits in ecx to unify between CPUs */
133 uint32_t edx; /* which bits in edx to unify between CPUs */
134 } aCpuidUnify[]
135 =
136 {
137 { 0x00000001, X86_CPUID_FEATURE_ECX_CX16
138 | X86_CPUID_FEATURE_ECX_MONITOR,
139 X86_CPUID_FEATURE_EDX_CX8 }
140 };
141 PVM pVM = (PVM)pvUser1;
142 PCPUM pCPUM = &pVM->cpum.s;
143 for (uint32_t i = 0; i < RT_ELEMENTS(aCpuidUnify); i++)
144 {
145 uint32_t uLeave = aCpuidUnify[i].uLeave;
146 uint32_t eax, ebx, ecx, edx;
147
148 ASMCpuId_Idx_ECX(uLeave, 0, &eax, &ebx, &ecx, &edx);
149 PCPUMCPUID paLeaves;
150 if (uLeave < 0x80000000)
151 paLeaves = &pCPUM->aGuestCpuIdStd[uLeave - 0x00000000];
152 else if (uLeave < 0xc0000000)
153 paLeaves = &pCPUM->aGuestCpuIdExt[uLeave - 0x80000000];
154 else
155 paLeaves = &pCPUM->aGuestCpuIdCentaur[uLeave - 0xc0000000];
156 /* unify important bits */
157 ASMAtomicAndU32(&paLeaves->ecx, ecx | ~aCpuidUnify[i].ecx);
158 ASMAtomicAndU32(&paLeaves->edx, edx | ~aCpuidUnify[i].edx);
159 }
160}
161
162
163/**
164 * Does Ring-0 CPUM initialization.
165 *
166 * This is mainly to check that the Host CPU mode is compatible
167 * with VBox.
168 *
169 * @returns VBox status code.
170 * @param pVM Pointer to the VM.
171 */
172VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM)
173{
174 LogFlow(("CPUMR0Init: %p\n", pVM));
175
176 /*
177 * Check CR0 & CR4 flags.
178 */
179 uint32_t u32CR0 = ASMGetCR0();
180 if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
181 {
182 Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
183 return VERR_UNSUPPORTED_CPU_MODE;
184 }
185
186 /*
187 * Check for sysenter and syscall usage.
188 */
189 if (ASMHasCpuId())
190 {
191 /*
192 * SYSENTER/SYSEXIT
193 *
194 * Intel docs claim you should test both the flag and family, model &
195 * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
196 * but don't support it. AMD CPUs may support this feature in legacy
197 * mode, they've banned it from long mode. Since we switch to 32-bit
198 * mode when entering raw-mode context the feature would become
199 * accessible again on AMD CPUs, so we have to check regardless of
200 * host bitness.
201 */
202 uint32_t u32CpuVersion;
203 uint32_t u32Dummy;
204 uint32_t fFeatures;
205 ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
206 uint32_t u32Family = u32CpuVersion >> 8;
207 uint32_t u32Model = (u32CpuVersion >> 4) & 0xF;
208 uint32_t u32Stepping = u32CpuVersion & 0xF;
209 if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
210 && ( u32Family != 6 /* (> pentium pro) */
211 || u32Model >= 3
212 || u32Stepping >= 3
213 || !ASMIsIntelCpu())
214 )
215 {
216 /*
217 * Read the MSR and see if it's in use or not.
218 */
219 uint32_t u32 = ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
220 if (u32)
221 {
222 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSENTER;
223 Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
224 }
225 }
226
227 /*
228 * SYSCALL/SYSRET
229 *
230 * This feature is indicated by the SEP bit returned in EDX by CPUID
231 * function 0x80000001. Intel CPUs only supports this feature in
232 * long mode. Since we're not running 64-bit guests in raw-mode there
233 * are no issues with 32-bit intel hosts.
234 */
235 uint32_t cExt = 0;
236 ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
237 if ( cExt >= 0x80000001
238 && cExt <= 0x8000ffff)
239 {
240 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
241 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
242 {
243#ifdef RT_ARCH_X86
244# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
245 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
246# else
247 if (!ASMIsIntelCpu())
248# endif
249#endif
250 {
251 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
252 if (fEfer & MSR_K6_EFER_SCE)
253 {
254 pVM->cpum.s.fHostUseFlags |= CPUM_USE_SYSCALL;
255 Log(("CPUMR0Init: host uses syscall\n"));
256 }
257 }
258 }
259 }
260
261 RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
262 }
263
264
265 /*
266 * Check if debug registers are armed.
267 * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
268 */
269 uint32_t u32DR7 = ASMGetDR7();
270 if (u32DR7 & X86_DR7_ENABLED_MASK)
271 {
272 for (VMCPUID i = 0; i < pVM->cCpus; i++)
273 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
274 Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
275 }
276
277 return VINF_SUCCESS;
278}
279
280
281/**
282 * Lazily sync in the FPU/XMM state
283 *
284 * @returns VBox status code.
285 * @param pVM Pointer to the VM.
286 * @param pVCpu Pointer to the VMCPU.
287 * @param pCtx Pointer to the guest CPU context.
288 */
289VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
290{
291 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
292 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
293
294 /* If the FPU state has already been loaded, then it's a guest trap. */
295 if (CPUMIsGuestFPUStateActive(pVCpu))
296 {
297 Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM))
298 || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)));
299 return VINF_EM_RAW_GUEST_TRAP;
300 }
301
302 /*
303 * There are two basic actions:
304 * 1. Save host fpu and restore guest fpu.
305 * 2. Generate guest trap.
306 *
307 * When entering the hypervisor we'll always enable MP (for proper wait
308 * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
309 * is taken from the guest OS in order to get proper SSE handling.
310 *
311 *
312 * Actions taken depending on the guest CR0 flags:
313 *
314 * 3 2 1
315 * TS | EM | MP | FPUInstr | WAIT :: VMM Action
316 * ------------------------------------------------------------------------
317 * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
318 * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
319 * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
320 * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
321 * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
322 * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
323 * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
324 * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
325 */
326
327 switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
328 {
329 case X86_CR0_MP | X86_CR0_TS:
330 case X86_CR0_MP | X86_CR0_TS | X86_CR0_EM:
331 return VINF_EM_RAW_GUEST_TRAP;
332 default:
333 break;
334 }
335
336#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
337 if (CPUMIsGuestInLongModeEx(pCtx))
338 {
339 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
340
341 /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
342 cpumR0SaveHostFPUState(&pVCpu->cpum.s);
343
344 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
345 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_FPU_STATE;
346 }
347 else
348#endif
349 {
350#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
351# if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
352 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
353 /** @todo Move the FFXR handling down into
354 * cpumR0SaveHostRestoreguestFPUState to optimize the
355 * VBOX_WITH_KERNEL_USING_XMM handling. */
356 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
357 uint64_t SavedEFER = 0;
358 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
359 {
360 SavedEFER = ASMRdMsr(MSR_K6_EFER);
361 if (SavedEFER & MSR_K6_EFER_FFXSR)
362 {
363 ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR);
364 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
365 }
366 }
367
368 /* Do the job and record that we've switched FPU state. */
369 cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
370
371 /* Restore EFER. */
372 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
373 ASMWrMsr(MSR_K6_EFER, SavedEFER);
374
375# else
376 uint64_t oldMsrEFERHost = 0;
377 uint32_t oldCR0 = ASMGetCR0();
378
379 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
380 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
381 {
382 /** @todo Do we really need to read this every time?? The host could change this on the fly though.
383 * bird: what about starting by skipping the ASMWrMsr below if we didn't
384 * change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
385 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
386 if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
387 {
388 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
389 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
390 }
391 }
392
393 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
394 int rc = CPUMHandleLazyFPU(pVCpu);
395 AssertRC(rc);
396 Assert(CPUMIsGuestFPUStateActive(pVCpu));
397
398 /* Restore EFER MSR */
399 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
400 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
401
402 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
403 ASMSetCR0(oldCR0);
404# endif
405
406#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
407
408 /*
409 * Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
410 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
411 */
412 pVCpu->cpum.s.Host.fpu.FCW = CPUMGetFCW();
413 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
414 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR();
415
416 cpumR0LoadFPU(pCtx);
417
418 /*
419 * The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
420 *
421 * MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
422 */
423 if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
424 {
425 /** @todo Do we really need to read this every time?? The host could change this on the fly though. */
426 uint64_t msrEFERHost = ASMRdMsr(MSR_K6_EFER);
427
428 if (msrEFERHost & MSR_K6_EFER_FFXSR)
429 {
430 /* fxrstor doesn't restore the XMM state! */
431 cpumR0LoadXMM(pCtx);
432 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
433 }
434 }
435
436#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
437 }
438
439 Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
440 return VINF_SUCCESS;
441}
442
443
444/**
445 * Save guest FPU/XMM state
446 *
447 * @returns VBox status code.
448 * @param pVM Pointer to the VM.
449 * @param pVCpu Pointer to the VMCPU.
450 * @param pCtx Pointer to the guest CPU context.
451 */
452VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
453{
454 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
455 Assert(ASMGetCR4() & X86_CR4_OSFSXR);
456 AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
457 NOREF(pCtx);
458
459#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
460 if (CPUMIsGuestInLongModeEx(pCtx))
461 {
462 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
463 {
464 HMR0SaveFPUState(pVM, pVCpu, pCtx);
465 cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
466 }
467 /* else nothing to do; we didn't perform a world switch */
468 }
469 else
470#endif
471 {
472#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
473# ifdef VBOX_WITH_KERNEL_USING_XMM
474 /*
475 * We've already saved the XMM registers in the assembly wrapper, so
476 * we have to save them before saving the entire FPU state and put them
477 * back afterwards.
478 */
479 /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
480 * I'm not able to test such an optimization tonight.
481 * We could just all this in assembly. */
482 uint128_t aGuestXmmRegs[16];
483 memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
484# endif
485
486 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
487 uint64_t oldMsrEFERHost = 0;
488 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
489 {
490 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
491 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
492 }
493 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
494
495 /* Restore EFER MSR */
496 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
497 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
498
499# ifdef VBOX_WITH_KERNEL_USING_XMM
500 memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
501# endif
502
503#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
504# ifdef VBOX_WITH_KERNEL_USING_XMM
505# error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now."
506# endif
507 cpumR0SaveFPU(pCtx);
508 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
509 {
510 /* fxsave doesn't save the XMM state! */
511 cpumR0SaveXMM(pCtx);
512 }
513
514 /*
515 * Restore the original FPU control word and MXCSR.
516 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host.
517 */
518 cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);
519 if (pVM->cpum.s.CPUFeatures.edx.u1SSE)
520 cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);
521#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
522 }
523
524 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
525 return VINF_SUCCESS;
526}
527
528
529/**
530 * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
531 * DR7 with safe values.
532 *
533 * @returns VBox status code.
534 * @param pVCpu Pointer to the VMCPU.
535 */
536static int cpumR0SaveHostDebugState(PVMCPU pVCpu)
537{
538 /*
539 * Save the host state.
540 */
541#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
542 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
543 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);
544#else
545 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
546 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
547 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
548 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
549#endif
550 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
551 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
552 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
553
554 /* Preemption paranoia. */
555 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
556
557 /*
558 * Make sure DR7 is harmless or else we could trigger breakpoints when
559 * load guest or hypervisor DRx values later.
560 */
561 if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL)
562 ASMSetDR7(X86_DR7_INIT_VAL);
563
564 return VINF_SUCCESS;
565}
566
567
568/**
569 * Saves the guest DRx state residing in host registers and restore the host
570 * register values.
571 *
572 * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
573 * since it's assumed that we're shadowing the guest DRx register values
574 * accurately when using the combined hypervisor debug register values
575 * (CPUMR0LoadHyperDebugState).
576 *
577 * @returns true if either guest or hypervisor debug registers were loaded.
578 * @param pVCpu The cross context CPU structure for the calling EMT.
579 * @param fDr6 Whether to include DR6 or not.
580 * @thread EMT(pVCpu)
581 */
582VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
583{
584 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
585
586 /*
587 * Do we need to save the guest DRx registered loaded into host registers?
588 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
589 */
590 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
591 {
592#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
593 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
594 {
595 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
596 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
597 if (!fDr6)
598 pVCpu->cpum.s.Guest.dr[6] = uDr6;
599 }
600 else
601#endif
602 {
603#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
604 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
605#else
606 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
607 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
608 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
609 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
610#endif
611 if (fDr6)
612 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
613 }
614 }
615 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
616 | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
617
618 /*
619 * Restore the host's debug state. DR0-3, DR6 and only then DR7!
620 */
621 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
622 {
623 /* A bit of paranoia first... */
624 uint64_t uCurDR7 = ASMGetDR7();
625 if (uCurDR7 != X86_DR7_INIT_VAL)
626 ASMSetDR7(X86_DR7_INIT_VAL);
627
628#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
629 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
630 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
631#else
632 ASMSetDR0(pVCpu->cpum.s.Host.dr0);
633 ASMSetDR1(pVCpu->cpum.s.Host.dr1);
634 ASMSetDR2(pVCpu->cpum.s.Host.dr2);
635 ASMSetDR3(pVCpu->cpum.s.Host.dr3);
636#endif
637 /** @todo consider only updating if they differ, esp. DR6. Need to figure how
638 * expensive DRx reads are over DRx writes. */
639 ASMSetDR6(pVCpu->cpum.s.Host.dr6);
640 ASMSetDR7(pVCpu->cpum.s.Host.dr7);
641
642 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
643 }
644
645 return fDrXLoaded;
646}
647
648
649/**
650 * Saves the guest DRx state if it resides host registers.
651 *
652 * This does NOT clear any use flags, so the host registers remains loaded with
653 * the guest DRx state upon return. The purpose is only to make sure the values
654 * in the CPU context structure is up to date.
655 *
656 * @returns true if the host registers contains guest values, false if not.
657 * @param pVCpu The cross context CPU structure for the calling EMT.
658 * @param fDr6 Whether to include DR6 or not.
659 * @thread EMT(pVCpu)
660 */
661VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
662{
663 /*
664 * Do we need to save the guest DRx registered loaded into host registers?
665 * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
666 */
667 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
668 {
669#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
670 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
671 {
672 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
673 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
674 if (!fDr6)
675 pVCpu->cpum.s.Guest.dr[6] = uDr6;
676 }
677 else
678#endif
679 {
680#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
681 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
682#else
683 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
684 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
685 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
686 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
687#endif
688 if (fDr6)
689 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
690 }
691 return true;
692 }
693 return false;
694}
695
696
697/**
698 * Lazily sync in the debug state.
699 *
700 * @param pVCpu The cross context CPU structure for the calling EMT.
701 * @param fDr6 Whether to include DR6 or not.
702 * @thread EMT(pVCpu)
703 */
704VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
705{
706 /*
707 * Save the host state and disarm all host BPs.
708 */
709 cpumR0SaveHostDebugState(pVCpu);
710 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
711
712 /*
713 * Activate the guest state DR0-3.
714 * DR7 and DR6 (if fDr6 is true) are left to the caller.
715 */
716#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
717 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
718 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
719 else
720#endif
721 {
722#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
723 cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);
724#else
725 ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]);
726 ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]);
727 ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]);
728 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
729#endif
730 if (fDr6)
731 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
732
733 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
734 }
735}
736
737
738/**
739 * Lazily sync in the hypervisor debug state
740 *
741 * @returns VBox status code.
742 * @param pVCpu The cross context CPU structure for the calling EMT.
743 * @param fDr6 Whether to include DR6 or not.
744 * @thread EMT(pVCpu)
745 */
746VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
747{
748 /*
749 * Save the host state and disarm all host BPs.
750 */
751 cpumR0SaveHostDebugState(pVCpu);
752 Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
753
754 /*
755 * Make sure the hypervisor values are up to date.
756 */
757 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */, true);
758
759 /*
760 * Activate the guest state DR0-3.
761 * DR7 and DR6 (if fDr6 is true) are left to the caller.
762 */
763#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
764 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
765 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
766 else
767#endif
768 {
769#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
770 cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);
771#else
772 ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]);
773 ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]);
774 ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]);
775 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
776#endif
777 if (fDr6)
778 ASMSetDR6(X86_DR6_INIT_VAL);
779
780 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
781 }
782}
783
784#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
785
786/**
787 * Per-CPU callback that probes the CPU for APIC support.
788 *
789 * @param idCpu The identifier for the CPU the function is called on.
790 * @param pvUser1 Ignored.
791 * @param pvUser2 Ignored.
792 */
793static DECLCALLBACK(void) cpumR0MapLocalApicCpuProber(RTCPUID idCpu, void *pvUser1, void *pvUser2)
794{
795 NOREF(pvUser1); NOREF(pvUser2);
796 int iCpu = RTMpCpuIdToSetIndex(idCpu);
797 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
798
799 /*
800 * Check for APIC support.
801 */
802 uint32_t uMaxLeaf, u32EBX, u32ECX, u32EDX;
803 ASMCpuId(0, &uMaxLeaf, &u32EBX, &u32ECX, &u32EDX);
804 if ( ( ASMIsIntelCpuEx(u32EBX, u32ECX, u32EDX)
805 || ASMIsAmdCpuEx(u32EBX, u32ECX, u32EDX)
806 || ASMIsViaCentaurCpuEx(u32EBX, u32ECX, u32EDX))
807 && ASMIsValidStdRange(uMaxLeaf))
808 {
809 uint32_t uDummy;
810 ASMCpuId(1, &uDummy, &u32EBX, &u32ECX, &u32EDX);
811 if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
812 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
813 {
814 /*
815 * Safe to access the MSR. Read it and calc the BASE (a little complicated).
816 */
817 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
818 uint64_t u64Mask = MSR_IA32_APICBASE_BASE_MIN;
819
820 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
821 uint32_t uMaxExtLeaf;
822 ASMCpuId(0x80000000, &uMaxExtLeaf, &u32EBX, &u32ECX, &u32EDX);
823 if ( uMaxExtLeaf >= UINT32_C(0x80000008)
824 && ASMIsValidExtRange(uMaxExtLeaf))
825 {
826 uint32_t u32PhysBits;
827 ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
828 u32PhysBits &= 0xff;
829 u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
830 }
831
832 AssertCompile(sizeof(g_aLApics[iCpu].PhysBase) == sizeof(u64ApicBase));
833 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask;
834 g_aLApics[iCpu].fEnabled = RT_BOOL(u64ApicBase & MSR_IA32_APICBASE_EN);
835 g_aLApics[iCpu].fX2Apic = (u64ApicBase & (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN))
836 == (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN);
837 }
838 }
839}
840
841
842
843/**
844 * Per-CPU callback that verifies our APIC expectations.
845 *
846 * @param idCpu The identifier for the CPU the function is called on.
847 * @param pvUser1 Ignored.
848 * @param pvUser2 Ignored.
849 */
850static DECLCALLBACK(void) cpumR0MapLocalApicCpuChecker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
851{
852 int iCpu = RTMpCpuIdToSetIndex(idCpu);
853 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
854 if (!g_aLApics[iCpu].fEnabled)
855 return;
856
857 /*
858 * 0x0X 82489 external APIC
859 * 0x1X Local APIC
860 * 0x2X..0xFF reserved
861 */
862 uint32_t uApicVersion;
863 if (g_aLApics[iCpu].fX2Apic)
864 uApicVersion = ApicX2RegRead32(APIC_REG_VERSION);
865 else
866 uApicVersion = ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_VERSION);
867 if ((APIC_REG_VERSION_GET_VER(uApicVersion) & 0xF0) == 0x10)
868 {
869 g_aLApics[iCpu].uVersion = uApicVersion;
870 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(uApicVersion) >= 5;
871
872#if 0 /* enable if you need it. */
873 if (g_aLApics[iCpu].fX2Apic)
874 SUPR0Printf("CPUM: X2APIC %02u - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
875 iCpu, uApicVersion,
876 ApicX2RegRead32(APIC_REG_LVT_LINT0), ApicX2RegRead32(APIC_REG_LVT_LINT1),
877 ApicX2RegRead32(APIC_REG_LVT_PC), ApicX2RegRead32(APIC_REG_LVT_THMR) );
878 else
879 SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x\n",
880 iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, uApicVersion,
881 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1),
882 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR) );
883#endif
884 }
885 else
886 {
887 g_aLApics[iCpu].fEnabled = false;
888 g_aLApics[iCpu].fX2Apic = false;
889 SUPR0Printf("VBox/CPUM: Unsupported APIC version %#x (iCpu=%d)\n", uApicVersion, iCpu);
890 }
891}
892
893
894/**
895 * Map the MMIO page of each local APIC in the system.
896 */
897static int cpumR0MapLocalApics(void)
898{
899 /*
900 * Check that we'll always stay within the array bounds.
901 */
902 if (RTMpGetArraySize() > RT_ELEMENTS(g_aLApics))
903 {
904 LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
905 return VERR_TOO_MANY_CPUS;
906 }
907
908 /*
909 * Create mappings for all online CPUs we think have legacy APICs.
910 */
911 int rc = RTMpOnAll(cpumR0MapLocalApicCpuProber, NULL, NULL);
912
913 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
914 {
915 if (g_aLApics[iCpu].fEnabled && !g_aLApics[iCpu].fX2Apic)
916 {
917 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
918 PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO);
919 if (RT_SUCCESS(rc))
920 {
921 rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void *)-1,
922 PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
923 if (RT_SUCCESS(rc))
924 {
925 g_aLApics[iCpu].pv = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
926 continue;
927 }
928 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
929 }
930 g_aLApics[iCpu].fEnabled = false;
931 }
932 g_aLApics[iCpu].pv = NULL;
933 }
934
935 /*
936 * Check the APICs.
937 */
938 if (RT_SUCCESS(rc))
939 rc = RTMpOnAll(cpumR0MapLocalApicCpuChecker, NULL, NULL);
940
941 if (RT_FAILURE(rc))
942 {
943 cpumR0UnmapLocalApics();
944 return rc;
945 }
946
947#ifdef LOG_ENABLED
948 /*
949 * Log the result (pretty useless, requires enabling CPUM in VBoxDrv
950 * and !VBOX_WITH_R0_LOGGING).
951 */
952 if (LogIsEnabled())
953 {
954 uint32_t cEnabled = 0;
955 uint32_t cX2Apics = 0;
956 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
957 if (g_aLApics[iCpu].fEnabled)
958 {
959 cEnabled++;
960 cX2Apics += g_aLApics[iCpu].fX2Apic;
961 }
962 Log(("CPUM: %u APICs, %u X2APICs\n", cEnabled, cX2Apics));
963 }
964#endif
965
966 return VINF_SUCCESS;
967}
968
969
970/**
971 * Unmap the Local APIC of all host CPUs.
972 */
973static void cpumR0UnmapLocalApics(void)
974{
975 for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
976 {
977 if (g_aLApics[iCpu].pv)
978 {
979 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
980 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
981 g_aLApics[iCpu].hMapObj = NIL_RTR0MEMOBJ;
982 g_aLApics[iCpu].hMemObj = NIL_RTR0MEMOBJ;
983 g_aLApics[iCpu].fEnabled = false;
984 g_aLApics[iCpu].fX2Apic = false;
985 g_aLApics[iCpu].pv = NULL;
986 }
987 }
988}
989
990
991/**
992 * Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch.
993 *
994 * Writes the Local APIC mapping address of the current host CPU to CPUMCPU so
995 * the world switchers can access the APIC registers for the purpose of
996 * disabling and re-enabling the NMIs. Must be called with disabled preemption
997 * or disabled interrupts!
998 *
999 * @param pVCpu Pointer to the cross context CPU structure of the
1000 * calling EMT.
1001 * @param idHostCpu The ID of the current host CPU.
1002 */
1003VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, RTCPUID idHostCpu)
1004{
1005 int idxCpu = RTMpCpuIdToSetIndex(idHostCpu);
1006 pVCpu->cpum.s.pvApicBase = g_aLApics[idxCpu].pv;
1007 pVCpu->cpum.s.fX2Apic = g_aLApics[idxCpu].fX2Apic;
1008// Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic));
1009}
1010
1011#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */
1012
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette