VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 64279

Last change on this file since 64279 was 64112, checked in by vboxsync, 9 years ago

VMM/APIC: comment nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 109.6 KB
Line 
1/* $Id: APICAll.cpp 64112 2016-09-30 14:18:59Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/vmcpuset.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
33/** An ordered array of valid LVT masks. */
34static const uint32_t g_au32LvtValidMasks[] =
35{
36 XAPIC_LVT_TIMER_VALID,
37 XAPIC_LVT_THERMAL_VALID,
38 XAPIC_LVT_PERF_VALID,
39 XAPIC_LVT_LINT_VALID, /* LINT0 */
40 XAPIC_LVT_LINT_VALID, /* LINT1 */
41 XAPIC_LVT_ERROR_VALID
42};
43#endif
44
45#if 0
46/** @todo CMCI */
47static const uint32_t g_au32LvtExtValidMask[] =
48{
49 XAPIC_LVT_CMCI_VALID
50};
51#endif
52
53
54/**
55 * Checks if a vector is set in an APIC 256-bit sparse register.
56 *
57 * @returns true if the specified vector is set, false otherwise.
58 * @param pApicReg The APIC 256-bit spare register.
59 * @param uVector The vector to check if set.
60 */
61DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
62{
63 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
64 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
65}
66
67
68/**
69 * Sets the vector in an APIC 256-bit sparse register.
70 *
71 * @param pApicReg The APIC 256-bit spare register.
72 * @param uVector The vector to set.
73 */
74DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
75{
76 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
77 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
78}
79
80
81/**
82 * Clears the vector in an APIC 256-bit sparse register.
83 *
84 * @param pApicReg The APIC 256-bit spare register.
85 * @param uVector The vector to clear.
86 */
87DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
88{
89 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
90 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
91}
92
93
94#if 0 /* unused */
95/**
96 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
97 *
98 * @returns true if the specified vector is set, false otherwise.
99 * @param pvPib Opaque pointer to the PIB.
100 * @param uVector The vector to check if set.
101 */
102DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
103{
104 return ASMBitTest(pvPib, uVector);
105}
106#endif /* unused */
107
108
109/**
110 * Atomically sets the PIB notification bit.
111 *
112 * @returns non-zero if the bit was already set, 0 otherwise.
113 * @param pApicPib Pointer to the PIB.
114 */
115DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
116{
117 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
118}
119
120
121/**
122 * Atomically tests and clears the PIB notification bit.
123 *
124 * @returns non-zero if the bit was already set, 0 otherwise.
125 * @param pApicPib Pointer to the PIB.
126 */
127DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
128{
129 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
130}
131
132
133/**
134 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
135 *
136 * @param pvPib Opaque pointer to the PIB.
137 * @param uVector The vector to set.
138 */
139DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
140{
141 ASMAtomicBitSet(pvPib, uVector);
142}
143
144#if 0 /* unused */
145/**
146 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
147 *
148 * @param pvPib Opaque pointer to the PIB.
149 * @param uVector The vector to clear.
150 */
151DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
152{
153 ASMAtomicBitClear(pvPib, uVector);
154}
155#endif /* unused */
156
157#if 0 /* unused */
158/**
159 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
160 * register.
161 *
162 * @param pApicReg The APIC 256-bit spare register.
163 * @param idxFragment The index of the 32-bit fragment in @a
164 * pApicReg.
165 * @param u32Fragment The 32-bit vector fragment to OR.
166 */
167DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
168{
169 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
170 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
171}
172#endif /* unused */
173
174
175#if 0 /* unused */
176/**
177 * Atomically AND's a fragment (32 vectors) into an APIC
178 * 256-bit sparse register.
179 *
180 * @param pApicReg The APIC 256-bit spare register.
181 * @param idxFragment The index of the 32-bit fragment in @a
182 * pApicReg.
183 * @param u32Fragment The 32-bit vector fragment to AND.
184 */
185DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
186{
187 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
188 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
189}
190#endif /* unused */
191
192
193/**
194 * Reports and returns appropriate error code for invalid MSR accesses.
195 *
196 * @returns Strict VBox status code.
197 * @retval VINF_CPUM_R3_MSR_WRITE if the MSR write could not be serviced in the
198 * current context (raw-mode or ring-0).
199 * @retval VINF_CPUM_R3_MSR_READ if the MSR read could not be serviced in the
200 * current context (raw-mode or ring-0).
201 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
202 * appropriate actions.
203 *
204 * @param pVCpu The cross context virtual CPU structure.
205 * @param u32Reg The MSR being accessed.
206 * @param enmAccess The invalid-access type.
207 */
208static VBOXSTRICTRC apicMsrAccessError(PVMCPU pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
209{
210 static struct
211 {
212 const char *pszBefore; /* The error message before printing the MSR index */
213 const char *pszAfter; /* The error message after printing the MSR index */
214 int rcRZ; /* The RZ error code */
215 } const s_aAccess[] =
216 {
217 /* enmAccess pszBefore pszAfter rcRZ */
218 /* 0 */ { "read MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_READ },
219 /* 1 */ { "write MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_WRITE },
220 /* 2 */ { "read reserved/unknown MSR", "", VINF_CPUM_R3_MSR_READ },
221 /* 3 */ { "write reserved/unknown MSR", "", VINF_CPUM_R3_MSR_WRITE },
222 /* 4 */ { "read write-only MSR", "", VINF_CPUM_R3_MSR_READ },
223 /* 5 */ { "write read-only MSR", "", VINF_CPUM_R3_MSR_WRITE },
224 /* 6 */ { "read reserved bits of MSR", "", VINF_CPUM_R3_MSR_READ },
225 /* 7 */ { "write reserved bits of MSR", "", VINF_CPUM_R3_MSR_WRITE },
226 /* 8 */ { "write an invalid value to MSR", "", VINF_CPUM_R3_MSR_WRITE },
227 /* 9 */ { "write MSR", "disallowed by configuration", VINF_CPUM_R3_MSR_WRITE }
228 };
229 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
230
231 size_t const i = enmAccess;
232 Assert(i < RT_ELEMENTS(s_aAccess));
233#ifdef IN_RING3
234 LogRelMax(5, ("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg,
235 s_aAccess[i].pszAfter));
236 return VERR_CPUM_RAISE_GP_0;
237#else
238 RT_NOREF_PV(u32Reg); RT_NOREF_PV(pVCpu);
239 return s_aAccess[i].rcRZ;
240#endif
241}
242
243
244/**
245 * Gets the descriptive APIC mode.
246 *
247 * @returns The name.
248 * @param enmMode The xAPIC mode.
249 */
250const char *apicGetModeName(APICMODE enmMode)
251{
252 switch (enmMode)
253 {
254 case APICMODE_DISABLED: return "Disabled";
255 case APICMODE_XAPIC: return "xAPIC";
256 case APICMODE_X2APIC: return "x2APIC";
257 default: break;
258 }
259 return "Invalid";
260}
261
262
263/**
264 * Gets the descriptive destination format name.
265 *
266 * @returns The destination format name.
267 * @param enmDestFormat The destination format.
268 */
269const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
270{
271 switch (enmDestFormat)
272 {
273 case XAPICDESTFORMAT_FLAT: return "Flat";
274 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
275 default: break;
276 }
277 return "Invalid";
278}
279
280
281/**
282 * Gets the descriptive delivery mode name.
283 *
284 * @returns The delivery mode name.
285 * @param enmDeliveryMode The delivery mode.
286 */
287const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
288{
289 switch (enmDeliveryMode)
290 {
291 case XAPICDELIVERYMODE_FIXED: return "Fixed";
292 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
293 case XAPICDELIVERYMODE_SMI: return "SMI";
294 case XAPICDELIVERYMODE_NMI: return "NMI";
295 case XAPICDELIVERYMODE_INIT: return "INIT";
296 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
297 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
298 default: break;
299 }
300 return "Invalid";
301}
302
303
304/**
305 * Gets the descriptive destination mode name.
306 *
307 * @returns The destination mode name.
308 * @param enmDestMode The destination mode.
309 */
310const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
311{
312 switch (enmDestMode)
313 {
314 case XAPICDESTMODE_PHYSICAL: return "Physical";
315 case XAPICDESTMODE_LOGICAL: return "Logical";
316 default: break;
317 }
318 return "Invalid";
319}
320
321
322/**
323 * Gets the descriptive trigger mode name.
324 *
325 * @returns The trigger mode name.
326 * @param enmTriggerMode The trigger mode.
327 */
328const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
329{
330 switch (enmTriggerMode)
331 {
332 case XAPICTRIGGERMODE_EDGE: return "Edge";
333 case XAPICTRIGGERMODE_LEVEL: return "Level";
334 default: break;
335 }
336 return "Invalid";
337}
338
339
340/**
341 * Gets the destination shorthand name.
342 *
343 * @returns The destination shorthand name.
344 * @param enmDestShorthand The destination shorthand.
345 */
346const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
347{
348 switch (enmDestShorthand)
349 {
350 case XAPICDESTSHORTHAND_NONE: return "None";
351 case XAPICDESTSHORTHAND_SELF: return "Self";
352 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
353 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
354 default: break;
355 }
356 return "Invalid";
357}
358
359
360/**
361 * Gets the timer mode name.
362 *
363 * @returns The timer mode name.
364 * @param enmTimerMode The timer mode.
365 */
366const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
367{
368 switch (enmTimerMode)
369 {
370 case XAPICTIMERMODE_ONESHOT: return "One-shot";
371 case XAPICTIMERMODE_PERIODIC: return "Periodic";
372 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
373 default: break;
374 }
375 return "Invalid";
376}
377
378
379/**
380 * Gets the APIC mode given the base MSR value.
381 *
382 * @returns The APIC mode.
383 * @param uApicBaseMsr The APIC Base MSR value.
384 */
385APICMODE apicGetMode(uint64_t uApicBaseMsr)
386{
387 uint32_t const uMode = (uApicBaseMsr >> 10) & UINT64_C(3);
388 APICMODE const enmMode = (APICMODE)uMode;
389#ifdef VBOX_STRICT
390 /* Paranoia. */
391 switch (uMode)
392 {
393 case APICMODE_DISABLED:
394 case APICMODE_INVALID:
395 case APICMODE_XAPIC:
396 case APICMODE_X2APIC:
397 break;
398 default:
399 AssertMsgFailed(("Invalid mode"));
400 }
401#endif
402 return enmMode;
403}
404
405
406/**
407 * Returns whether the APIC is hardware enabled or not.
408 *
409 * @returns true if enabled, false otherwise.
410 */
411DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
412{
413 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
414 return RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN);
415}
416
417
418/**
419 * Finds the most significant set bit in an APIC 256-bit sparse register.
420 *
421 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
422 * @param pReg The APIC 256-bit sparse register.
423 * @param rcNotFound What to return when no bit is set.
424 */
425static int apicGetHighestSetBitInReg(volatile const XAPIC256BITREG *pReg, int rcNotFound)
426{
427 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
428 unsigned const uFragmentShift = 5;
429 AssertCompile(1 << uFragmentShift == sizeof(pReg->u[0].u32Reg) * 8);
430 for (ssize_t i = cFragments - 1; i >= 0; i--)
431 {
432 uint32_t const uFragment = pReg->u[i].u32Reg;
433 if (uFragment)
434 {
435 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
436 --idxSetBit;
437 idxSetBit |= i << uFragmentShift;
438 return idxSetBit;
439 }
440 }
441 return rcNotFound;
442}
443
444
445/**
446 * Reads a 32-bit register at a specified offset.
447 *
448 * @returns The value at the specified offset.
449 * @param pXApicPage The xAPIC page.
450 * @param offReg The offset of the register being read.
451 */
452DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
453{
454 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
455 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
456 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
457 return uValue;
458}
459
460
461/**
462 * Writes a 32-bit register at a specified offset.
463 *
464 * @param pXApicPage The xAPIC page.
465 * @param offReg The offset of the register being written.
466 * @param uReg The value of the register.
467 */
468DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
469{
470 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
471 uint8_t *pbXApic = (uint8_t *)pXApicPage;
472 *(uint32_t *)(pbXApic + offReg) = uReg;
473}
474
475
476/**
477 * Broadcasts the EOI to the I/O APICs.
478 *
479 * @param pVCpu The cross context virtual CPU structure.
480 * @param uVector The interrupt vector corresponding to the EOI.
481 */
482DECLINLINE(int) apicBusBroadcastEoi(PVMCPU pVCpu, uint8_t uVector)
483{
484 PVM pVM = pVCpu->CTX_SUFF(pVM);
485 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
486 return pApicDev->CTX_SUFF(pApicHlp)->pfnBusBroadcastEoi(pApicDev->CTX_SUFF(pDevIns), uVector);
487}
488
489
490/**
491 * Sets an error in the internal ESR of the specified APIC.
492 *
493 * @param pVCpu The cross context virtual CPU structure.
494 * @param uError The error.
495 * @thread Any.
496 */
497DECLINLINE(void) apicSetError(PVMCPU pVCpu, uint32_t uError)
498{
499 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
500 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
501}
502
503
504/**
505 * Clears all errors in the internal ESR.
506 *
507 * @returns The value of the internal ESR before clearing.
508 * @param pVCpu The cross context virtual CPU structure.
509 */
510DECLINLINE(uint32_t) apicClearAllErrors(PVMCPU pVCpu)
511{
512 VMCPU_ASSERT_EMT(pVCpu);
513 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
514 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
515}
516
517
518/**
519 * Signals the guest if a pending interrupt is ready to be serviced.
520 *
521 * @param pVCpu The cross context virtual CPU structure.
522 */
523static void apicSignalNextPendingIntr(PVMCPU pVCpu)
524{
525 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
526
527 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
528 if (pXApicPage->svr.u.fApicSoftwareEnable)
529 {
530 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1 /* rcNotFound */);
531 if (irrv >= 0)
532 {
533 Assert(irrv <= (int)UINT8_MAX);
534 uint8_t const uVector = irrv;
535 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
536 if ( !uPpr
537 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
538 {
539 Log2(("APIC%u: apicSignalNextPendingIntr: Signaling pending interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
540 apicSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
541 }
542 else
543 {
544 Log2(("APIC%u: apicSignalNextPendingIntr: Nothing to signal. uVector=%#x uPpr=%#x uTpr=%#x\n", pVCpu->idCpu,
545 uVector, uPpr, pXApicPage->tpr.u8Tpr));
546 }
547 }
548 }
549 else
550 {
551 Log2(("APIC%u: apicSignalNextPendingIntr: APIC software-disabled, clearing pending interrupt\n", pVCpu->idCpu));
552 apicClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
553 }
554}
555
556
557/**
558 * Sets the Spurious-Interrupt Vector Register (SVR).
559 *
560 * @returns Strict VBox status code.
561 * @param pVCpu The cross context virtual CPU structure.
562 * @param uSvr The SVR value.
563 */
564static VBOXSTRICTRC apicSetSvr(PVMCPU pVCpu, uint32_t uSvr)
565{
566 VMCPU_ASSERT_EMT(pVCpu);
567
568 uint32_t uValidMask = XAPIC_SVR_VALID;
569 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
570 if (pXApicPage->version.u.fEoiBroadcastSupression)
571 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
572
573 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
574 && (uSvr & ~uValidMask))
575 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
576
577 Log2(("APIC%u: apicSetSvr: uSvr=%#RX32\n", pVCpu->idCpu, uSvr));
578 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
579 if (!pXApicPage->svr.u.fApicSoftwareEnable)
580 {
581 /** @todo CMCI. */
582 pXApicPage->lvt_timer.u.u1Mask = 1;
583#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
584 pXApicPage->lvt_thermal.u.u1Mask = 1;
585#endif
586 pXApicPage->lvt_perf.u.u1Mask = 1;
587 pXApicPage->lvt_lint0.u.u1Mask = 1;
588 pXApicPage->lvt_lint1.u.u1Mask = 1;
589 pXApicPage->lvt_error.u.u1Mask = 1;
590 }
591
592 apicSignalNextPendingIntr(pVCpu);
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Sends an interrupt to one or more APICs.
599 *
600 * @returns Strict VBox status code.
601 * @param pVM The cross context VM structure.
602 * @param pVCpu The cross context virtual CPU structure, can be
603 * NULL if the source of the interrupt is not an
604 * APIC (for e.g. a bus).
605 * @param uVector The interrupt vector.
606 * @param enmTriggerMode The trigger mode.
607 * @param enmDeliveryMode The delivery mode.
608 * @param pDestCpuSet The destination CPU set.
609 * @param pfIntrAccepted Where to store whether this interrupt was
610 * accepted by the target APIC(s) or not.
611 * Optional, can be NULL.
612 * @param rcRZ The return code if the operation cannot be
613 * performed in the current context.
614 */
615static VBOXSTRICTRC apicSendIntr(PVM pVM, PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
616 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, bool *pfIntrAccepted, int rcRZ)
617{
618 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
619 VMCPUID const cCpus = pVM->cCpus;
620 bool fAccepted = false;
621 switch (enmDeliveryMode)
622 {
623 case XAPICDELIVERYMODE_FIXED:
624 {
625 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
626 {
627 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
628 && apicIsEnabled(&pVM->aCpus[idCpu]))
629 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
630 }
631 break;
632 }
633
634 case XAPICDELIVERYMODE_LOWEST_PRIO:
635 {
636 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
637 if ( idCpu < pVM->cCpus
638 && apicIsEnabled(&pVM->aCpus[idCpu]))
639 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
640 else
641 AssertMsgFailed(("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode! idCpu=%u\n", idCpu));
642 break;
643 }
644
645 case XAPICDELIVERYMODE_SMI:
646 {
647 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
648 {
649 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
650 {
651 Log2(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
652 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_SMI);
653 fAccepted = true;
654 }
655 }
656 break;
657 }
658
659 case XAPICDELIVERYMODE_NMI:
660 {
661 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
662 {
663 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
664 && apicIsEnabled(&pVM->aCpus[idCpu]))
665 {
666 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
667 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_NMI);
668 fAccepted = true;
669 }
670 }
671 break;
672 }
673
674 case XAPICDELIVERYMODE_INIT:
675 {
676#ifdef IN_RING3
677 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
678 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
679 {
680 Log2(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
681 VMMR3SendInitIpi(pVM, idCpu);
682 fAccepted = true;
683 }
684#else
685 /* We need to return to ring-3 to deliver the INIT. */
686 rcStrict = rcRZ;
687 fAccepted = true;
688#endif
689 break;
690 }
691
692 case XAPICDELIVERYMODE_STARTUP:
693 {
694#ifdef IN_RING3
695 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
696 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
697 {
698 Log2(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
699 VMMR3SendStartupIpi(pVM, idCpu, uVector);
700 fAccepted = true;
701 }
702#else
703 /* We need to return to ring-3 to deliver the SIPI. */
704 rcStrict = rcRZ;
705 fAccepted = true;
706 Log2(("APIC: apicSendIntr: SIPI issued, returning to RZ. rc=%Rrc\n", rcRZ));
707#endif
708 break;
709 }
710
711 case XAPICDELIVERYMODE_EXTINT:
712 {
713 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
714 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
715 {
716 Log2(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
717 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_EXTINT);
718 fAccepted = true;
719 }
720 break;
721 }
722
723 default:
724 {
725 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
726 apicGetDeliveryModeName(enmDeliveryMode)));
727 break;
728 }
729 }
730
731 /*
732 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
733 * interrupt is being sent by an APIC.
734 *
735 * The 'receive illegal vector' will be set on the target APIC when the interrupt
736 * gets generated, see apicPostInterrupt().
737 *
738 * See Intel spec. 10.5.3 "Error Handling".
739 */
740 if ( rcStrict != rcRZ
741 && pVCpu)
742 {
743 /*
744 * Flag only errors when the delivery mode is fixed and not others.
745 *
746 * Ubuntu 10.04-3 amd64 live CD with 2 VCPUs gets upset as it sends an SIPI to the
747 * 2nd VCPU with vector 6 and checks the ESR for no errors, see @bugref{8245#c86}.
748 */
749 /** @todo The spec says this for LVT, but not explcitly for ICR-lo
750 * but it probably is true. */
751 if (enmDeliveryMode == XAPICDELIVERYMODE_FIXED)
752 {
753 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
754 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
755 }
756 }
757
758 if (pfIntrAccepted)
759 *pfIntrAccepted = fAccepted;
760
761 return rcStrict;
762}
763
764
765/**
766 * Checks if this APIC belongs to a logical destination.
767 *
768 * @returns true if the APIC belongs to the logical
769 * destination, false otherwise.
770 * @param pVCpu The cross context virtual CPU structure.
771 * @param fDest The destination mask.
772 *
773 * @thread Any.
774 */
775static bool apicIsLogicalDest(PVMCPU pVCpu, uint32_t fDest)
776{
777 if (XAPIC_IN_X2APIC_MODE(pVCpu))
778 {
779 /*
780 * Flat logical mode is not supported in x2APIC mode.
781 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
782 * - High 16 bits is the cluster ID.
783 * - Low 16 bits: each bit represents a unique APIC within the cluster.
784 */
785 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
786 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
787 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
788 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
789 return false;
790 }
791
792#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
793 /*
794 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
795 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
796 */
797 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
798 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
799 return true;
800
801 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
802 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
803 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
804 {
805 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
806 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
807 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
808 }
809
810 /*
811 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
812 * - High 4 bits is the cluster ID.
813 * - Low 4 bits: each bit represents a unique APIC within the cluster.
814 */
815 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
816 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
817 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
818 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
819 return false;
820#else
821# error "Implement Pentium and P6 family APIC architectures"
822#endif
823}
824
825
826/**
827 * Figures out the set of destination CPUs for a given destination mode, format
828 * and delivery mode setting.
829 *
830 * @param pVM The cross context VM structure.
831 * @param fDestMask The destination mask.
832 * @param fBroadcastMask The broadcast mask.
833 * @param enmDestMode The destination mode.
834 * @param enmDeliveryMode The delivery mode.
835 * @param pDestCpuSet The destination CPU set to update.
836 */
837static void apicGetDestCpuSet(PVM pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
838 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
839{
840 VMCPUSET_EMPTY(pDestCpuSet);
841
842 /*
843 * Physical destination mode only supports either a broadcast or a single target.
844 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
845 * as a regular broadcast like in fixed delivery mode.
846 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
847 * to the target like in fixed delivery mode.
848 *
849 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
850 */
851 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
852 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
853 {
854 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
855 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
856 }
857
858 uint32_t const cCpus = pVM->cCpus;
859 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
860 {
861 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
862#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
863 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
864 uint8_t u8LowestTpr = UINT8_C(0xff);
865 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
866 {
867 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
868 if (apicIsLogicalDest(pVCpuDest, fDestMask))
869 {
870 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
871 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
872
873 /*
874 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
875 * Hence the use of "<=" in the check below.
876 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
877 */
878 if (u8Tpr <= u8LowestTpr)
879 {
880 u8LowestTpr = u8Tpr;
881 idCpuLowestTpr = idCpu;
882 }
883 }
884 }
885 if (idCpuLowestTpr != NIL_VMCPUID)
886 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
887#else
888# error "Implement Pentium and P6 family APIC architectures"
889#endif
890 return;
891 }
892
893 /*
894 * x2APIC:
895 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
896 * xAPIC:
897 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
898 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
899 *
900 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
901 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
902 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
903 */
904 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
905 {
906 VMCPUSET_FILL(pDestCpuSet);
907 return;
908 }
909
910 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
911 {
912 /* The destination mask is interpreted as the physical APIC ID of a single target. */
913#if 1
914 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
915 if (RT_LIKELY(fDestMask < cCpus))
916 VMCPUSET_ADD(pDestCpuSet, fDestMask);
917#else
918 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
919 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
920 {
921 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
922 if (XAPIC_IN_X2APIC_MODE(pVCpuDest))
923 {
924 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDest);
925 if (pX2ApicPage->id.u32ApicId == fDestMask)
926 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
927 }
928 else
929 {
930 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
931 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
932 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
933 }
934 }
935#endif
936 }
937 else
938 {
939 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
940
941 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
942 if (RT_UNLIKELY(!fDestMask))
943 return;
944
945 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
946 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
947 {
948 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
949 if (apicIsLogicalDest(pVCpuDest, fDestMask))
950 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
951 }
952 }
953}
954
955
956/**
957 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
958 * Command Register (ICR).
959 *
960 * @returns VBox status code.
961 * @param pVCpu The cross context virtual CPU structure.
962 * @param rcRZ The return code if the operation cannot be
963 * performed in the current context.
964 */
965DECLINLINE(VBOXSTRICTRC) apicSendIpi(PVMCPU pVCpu, int rcRZ)
966{
967 VMCPU_ASSERT_EMT(pVCpu);
968
969 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
970 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
971 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
972 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
973 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
974 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
975 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
976
977 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
978 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
979
980#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
981 /*
982 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
983 * Apparently, this also applies to NMI, SMI, lowest-priority and fixed delivery modes,
984 * see @bugref{8245#c116}.
985 *
986 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
987 */
988 if ( enmTriggerMode == XAPICTRIGGERMODE_LEVEL
989 && enmInitLevel == XAPICINITLEVEL_DEASSERT
990 && ( enmDeliveryMode == XAPICDELIVERYMODE_FIXED
991 || enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO
992 || enmDeliveryMode == XAPICDELIVERYMODE_SMI
993 || enmDeliveryMode == XAPICDELIVERYMODE_NMI
994 || enmDeliveryMode == XAPICDELIVERYMODE_INIT))
995 {
996 Log2(("APIC%u: %s level de-assert unsupported, ignoring!\n", apicGetDeliveryModeName(enmDeliveryMode), pVCpu->idCpu));
997 return VINF_SUCCESS;
998 }
999#else
1000# error "Implement Pentium and P6 family APIC architectures"
1001#endif
1002
1003 /*
1004 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
1005 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
1006 */
1007 VMCPUSET DestCpuSet;
1008 switch (enmDestShorthand)
1009 {
1010 case XAPICDESTSHORTHAND_NONE:
1011 {
1012 PVM pVM = pVCpu->CTX_SUFF(pVM);
1013 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
1014 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1015 break;
1016 }
1017
1018 case XAPICDESTSHORTHAND_SELF:
1019 {
1020 VMCPUSET_EMPTY(&DestCpuSet);
1021 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1022 break;
1023 }
1024
1025 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
1026 {
1027 VMCPUSET_FILL(&DestCpuSet);
1028 break;
1029 }
1030
1031 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
1032 {
1033 VMCPUSET_FILL(&DestCpuSet);
1034 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
1035 break;
1036 }
1037 }
1038
1039 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
1040 NULL /* pfIntrAccepted */, rcRZ);
1041}
1042
1043
1044/**
1045 * Sets the Interrupt Command Register (ICR) high dword.
1046 *
1047 * @returns Strict VBox status code.
1048 * @param pVCpu The cross context virtual CPU structure.
1049 * @param uIcrHi The ICR high dword.
1050 */
1051static VBOXSTRICTRC apicSetIcrHi(PVMCPU pVCpu, uint32_t uIcrHi)
1052{
1053 VMCPU_ASSERT_EMT(pVCpu);
1054 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1055
1056 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1057 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1058 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrHiWrite);
1059 Log2(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1060
1061 return VINF_SUCCESS;
1062}
1063
1064
1065/**
1066 * Sets the Interrupt Command Register (ICR) low dword.
1067 *
1068 * @returns Strict VBox status code.
1069 * @param pVCpu The cross context virtual CPU structure.
1070 * @param uIcrLo The ICR low dword.
1071 * @param rcRZ The return code if the operation cannot be performed
1072 * in the current context.
1073 * @param fUpdateStat Whether to update the ICR low write statistics
1074 * counter.
1075 */
1076static VBOXSTRICTRC apicSetIcrLo(PVMCPU pVCpu, uint32_t uIcrLo, int rcRZ, bool fUpdateStat)
1077{
1078 VMCPU_ASSERT_EMT(pVCpu);
1079
1080 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1081 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR_VALID;
1082 Log2(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1083
1084 if (fUpdateStat)
1085 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrLoWrite);
1086 RT_NOREF(fUpdateStat);
1087
1088 return apicSendIpi(pVCpu, rcRZ);
1089}
1090
1091
1092/**
1093 * Sets the Interrupt Command Register (ICR).
1094 *
1095 * @returns Strict VBox status code.
1096 * @param pVCpu The cross context virtual CPU structure.
1097 * @param u64Icr The ICR (High and Low combined).
1098 * @param rcRZ The return code if the operation cannot be performed
1099 * in the current context.
1100 *
1101 * @remarks This function is used by both x2APIC interface and the Hyper-V
1102 * interface, see APICHvSetIcr. The Hyper-V spec isn't clear what
1103 * happens when invalid bits are set. For the time being, it will
1104 * \#GP like a regular x2APIC access.
1105 */
1106static VBOXSTRICTRC apicSetIcr(PVMCPU pVCpu, uint64_t u64Icr, int rcRZ)
1107{
1108 VMCPU_ASSERT_EMT(pVCpu);
1109
1110 /* Validate. */
1111 uint32_t const uLo = RT_LO_U32(u64Icr);
1112 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR_VALID)))
1113 {
1114 /* Update high dword first, then update the low dword which sends the IPI. */
1115 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1116 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1117 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrFullWrite);
1118 return apicSetIcrLo(pVCpu, uLo, rcRZ, false /* fUpdateStat */);
1119 }
1120 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1121}
1122
1123
1124/**
1125 * Sets the Error Status Register (ESR).
1126 *
1127 * @returns Strict VBox status code.
1128 * @param pVCpu The cross context virtual CPU structure.
1129 * @param uEsr The ESR value.
1130 */
1131static VBOXSTRICTRC apicSetEsr(PVMCPU pVCpu, uint32_t uEsr)
1132{
1133 VMCPU_ASSERT_EMT(pVCpu);
1134
1135 Log2(("APIC%u: apicSetEsr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1136
1137 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1138 && (uEsr & ~XAPIC_ESR_WO_VALID))
1139 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1140
1141 /*
1142 * Writes to the ESR causes the internal state to be updated in the register,
1143 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1144 */
1145 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1146 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1147 return VINF_SUCCESS;
1148}
1149
1150
1151/**
1152 * Updates the Processor Priority Register (PPR).
1153 *
1154 * @param pVCpu The cross context virtual CPU structure.
1155 */
1156static void apicUpdatePpr(PVMCPU pVCpu)
1157{
1158 VMCPU_ASSERT_EMT(pVCpu);
1159
1160 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1161 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1162 uint8_t const uIsrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
1163 uint8_t uPpr;
1164 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1165 uPpr = pXApicPage->tpr.u8Tpr;
1166 else
1167 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1168 pXApicPage->ppr.u8Ppr = uPpr;
1169}
1170
1171
1172/**
1173 * Gets the Processor Priority Register (PPR).
1174 *
1175 * @returns The PPR value.
1176 * @param pVCpu The cross context virtual CPU structure.
1177 */
1178static uint8_t apicGetPpr(PVMCPU pVCpu)
1179{
1180 VMCPU_ASSERT_EMT(pVCpu);
1181 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprRead);
1182
1183 /*
1184 * With virtualized APIC registers or with TPR virtualization, the hardware may
1185 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1186 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1187 *
1188 * In all other instances, whenever the TPR or ISR changes, we need to update the PPR
1189 * as well (e.g. like we do manually in apicR3InitIpi and by calling apicUpdatePpr).
1190 */
1191 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1192 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1193 apicUpdatePpr(pVCpu);
1194 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1195 return pXApicPage->ppr.u8Ppr;
1196}
1197
1198
1199/**
1200 * Sets the Task Priority Register (TPR).
1201 *
1202 * @returns Strict VBox status code.
1203 * @param pVCpu The cross context virtual CPU structure.
1204 * @param uTpr The TPR value.
1205 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1206 * this write.
1207 */
1208static VBOXSTRICTRC apicSetTprEx(PVMCPU pVCpu, uint32_t uTpr, bool fForceX2ApicBehaviour)
1209{
1210 VMCPU_ASSERT_EMT(pVCpu);
1211
1212 Log2(("APIC%u: apicSetTprEx: uTpr=%#RX32\n", pVCpu->idCpu, uTpr));
1213 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprWrite);
1214
1215 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1216 if ( fX2ApicMode
1217 && (uTpr & ~XAPIC_TPR_VALID))
1218 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1219
1220 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1221 pXApicPage->tpr.u8Tpr = uTpr;
1222 apicUpdatePpr(pVCpu);
1223 apicSignalNextPendingIntr(pVCpu);
1224 return VINF_SUCCESS;
1225}
1226
1227
1228/**
1229 * Sets the End-Of-Interrupt (EOI) register.
1230 *
1231 * @returns Strict VBox status code.
1232 * @param pVCpu The cross context virtual CPU structure.
1233 * @param uEoi The EOI value.
1234 * @param rcBusy The busy return code when the write cannot
1235 * be completed successfully in this context.
1236 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1237 * this write.
1238 */
1239static VBOXSTRICTRC apicSetEoi(PVMCPU pVCpu, uint32_t uEoi, int rcBusy, bool fForceX2ApicBehaviour)
1240{
1241 VMCPU_ASSERT_EMT(pVCpu);
1242
1243 Log2(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1244 STAM_COUNTER_INC(&pVCpu->apic.s.StatEoiWrite);
1245
1246 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1247 if ( fX2ApicMode
1248 && (uEoi & ~XAPIC_EOI_WO_VALID))
1249 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1250
1251 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1252 int isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, -1 /* rcNotFound */);
1253 if (isrv >= 0)
1254 {
1255 /*
1256 * Broadcast the EOI to the I/O APIC(s).
1257 *
1258 * We'll handle the EOI broadcast first as there is tiny chance we get rescheduled to
1259 * ring-3 due to contention on the I/O APIC lock. This way we don't mess with the rest
1260 * of the APIC state and simply restart the EOI write operation from ring-3.
1261 */
1262 Assert(isrv <= (int)UINT8_MAX);
1263 uint8_t const uVector = isrv;
1264 bool const fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1265 if (fLevelTriggered)
1266 {
1267 int rc = apicBusBroadcastEoi(pVCpu, uVector);
1268 if (rc == VINF_SUCCESS)
1269 { /* likely */ }
1270 else
1271 return rcBusy;
1272
1273 /*
1274 * Clear the vector from the TMR.
1275 *
1276 * The broadcast to I/O APIC can re-trigger new interrupts to arrive via the bus. However,
1277 * APICUpdatePendingInterrupts() which updates TMR can only be done from EMT which we
1278 * currently are on, so no possibility of concurrent updates.
1279 */
1280 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1281
1282 /*
1283 * Clear the remote IRR bit for level-triggered, fixed mode LINT0 interrupt.
1284 * The LINT1 pin does not support level-triggered interrupts.
1285 * See Intel spec. 10.5.1 "Local Vector Table".
1286 */
1287 uint32_t const uLvtLint0 = pXApicPage->lvt_lint0.all.u32LvtLint0;
1288 if ( XAPIC_LVT_GET_REMOTE_IRR(uLvtLint0)
1289 && XAPIC_LVT_GET_VECTOR(uLvtLint0) == uVector
1290 && XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint0) == XAPICDELIVERYMODE_FIXED)
1291 {
1292 ASMAtomicAndU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, ~XAPIC_LVT_REMOTE_IRR);
1293 Log2(("APIC%u: apicSetEoi: Cleared remote-IRR for LINT0. uVector=%#x\n", pVCpu->idCpu, uVector));
1294 }
1295
1296 Log2(("APIC%u: apicSetEoi: Cleared level triggered interrupt from TMR. uVector=%#x\n", pVCpu->idCpu, uVector));
1297 }
1298
1299 /*
1300 * Mark interrupt as serviced, update the PPR and signal pending interrupts.
1301 */
1302 Log2(("APIC%u: apicSetEoi: Clearing interrupt from ISR. uVector=%#x\n", pVCpu->idCpu, uVector));
1303 apicClearVectorInReg(&pXApicPage->isr, uVector);
1304 apicUpdatePpr(pVCpu);
1305 apicSignalNextPendingIntr(pVCpu);
1306 }
1307 else
1308 {
1309#ifdef DEBUG_ramshankar
1310 /** @todo Figure out if this is done intentionally by guests or is a bug
1311 * in our emulation. Happened with Win10 SMP VM during reboot after
1312 * installation of guest additions with 3D support. */
1313 AssertMsgFailed(("APIC%u: apicSetEoi: Failed to find any ISR bit\n", pVCpu->idCpu));
1314#endif
1315 }
1316
1317 return VINF_SUCCESS;
1318}
1319
1320
1321/**
1322 * Sets the Logical Destination Register (LDR).
1323 *
1324 * @returns Strict VBox status code.
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param uLdr The LDR value.
1327 *
1328 * @remarks LDR is read-only in x2APIC mode.
1329 */
1330static VBOXSTRICTRC apicSetLdr(PVMCPU pVCpu, uint32_t uLdr)
1331{
1332 VMCPU_ASSERT_EMT(pVCpu);
1333 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1334 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu) || pApic->fHyperVCompatMode); RT_NOREF_PV(pApic);
1335
1336 Log2(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1337
1338 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1339 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR_VALID);
1340 return VINF_SUCCESS;
1341}
1342
1343
1344/**
1345 * Sets the Destination Format Register (DFR).
1346 *
1347 * @returns Strict VBox status code.
1348 * @param pVCpu The cross context virtual CPU structure.
1349 * @param uDfr The DFR value.
1350 *
1351 * @remarks DFR is not available in x2APIC mode.
1352 */
1353static VBOXSTRICTRC apicSetDfr(PVMCPU pVCpu, uint32_t uDfr)
1354{
1355 VMCPU_ASSERT_EMT(pVCpu);
1356 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1357
1358 uDfr &= XAPIC_DFR_VALID;
1359 uDfr |= XAPIC_DFR_RSVD_MB1;
1360
1361 Log2(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1362
1363 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1364 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1365 return VINF_SUCCESS;
1366}
1367
1368
1369/**
1370 * Sets the Timer Divide Configuration Register (DCR).
1371 *
1372 * @returns Strict VBox status code.
1373 * @param pVCpu The cross context virtual CPU structure.
1374 * @param uTimerDcr The timer DCR value.
1375 */
1376static VBOXSTRICTRC apicSetTimerDcr(PVMCPU pVCpu, uint32_t uTimerDcr)
1377{
1378 VMCPU_ASSERT_EMT(pVCpu);
1379 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1380 && (uTimerDcr & ~XAPIC_TIMER_DCR_VALID))
1381 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1382
1383 Log2(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1384
1385 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1386 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1387 return VINF_SUCCESS;
1388}
1389
1390
1391/**
1392 * Gets the timer's Current Count Register (CCR).
1393 *
1394 * @returns VBox status code.
1395 * @param pVCpu The cross context virtual CPU structure.
1396 * @param rcBusy The busy return code for the timer critical section.
1397 * @param puValue Where to store the LVT timer CCR.
1398 */
1399static VBOXSTRICTRC apicGetTimerCcr(PVMCPU pVCpu, int rcBusy, uint32_t *puValue)
1400{
1401 VMCPU_ASSERT_EMT(pVCpu);
1402 Assert(puValue);
1403
1404 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1405 *puValue = 0;
1406
1407 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1408 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1409 return VINF_SUCCESS;
1410
1411 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1412 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1413 if (!uInitialCount)
1414 return VINF_SUCCESS;
1415
1416 /*
1417 * Reading the virtual-sync clock requires locking its timer because it's not
1418 * a simple atomic operation, see tmVirtualSyncGetEx().
1419 *
1420 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1421 */
1422 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1423 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1424
1425 int rc = TMTimerLock(pTimer, rcBusy);
1426 if (rc == VINF_SUCCESS)
1427 {
1428 /* If the current-count register is 0, it implies the timer expired. */
1429 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1430 if (uCurrentCount)
1431 {
1432 uint64_t const cTicksElapsed = TMTimerGet(pApicCpu->CTX_SUFF(pTimer)) - pApicCpu->u64TimerInitial;
1433 TMTimerUnlock(pTimer);
1434 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1435 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1436 if (uInitialCount > uDelta)
1437 *puValue = uInitialCount - uDelta;
1438 }
1439 else
1440 TMTimerUnlock(pTimer);
1441 }
1442 return rc;
1443}
1444
1445
1446/**
1447 * Sets the timer's Initial-Count Register (ICR).
1448 *
1449 * @returns Strict VBox status code.
1450 * @param pVCpu The cross context virtual CPU structure.
1451 * @param rcBusy The busy return code for the timer critical section.
1452 * @param uInitialCount The timer ICR.
1453 */
1454static VBOXSTRICTRC apicSetTimerIcr(PVMCPU pVCpu, int rcBusy, uint32_t uInitialCount)
1455{
1456 VMCPU_ASSERT_EMT(pVCpu);
1457
1458 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1459 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1460 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1461 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1462
1463 Log2(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1464 STAM_COUNTER_INC(&pApicCpu->StatTimerIcrWrite);
1465
1466 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1467 if ( pApic->fSupportsTscDeadline
1468 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1469 return VINF_SUCCESS;
1470
1471 /*
1472 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1473 * so obtain the lock -before- updating it here to be consistent with the
1474 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1475 */
1476 int rc = TMTimerLock(pTimer, rcBusy);
1477 if (rc == VINF_SUCCESS)
1478 {
1479 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1480 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1481 if (uInitialCount)
1482 apicStartTimer(pVCpu, uInitialCount);
1483 else
1484 apicStopTimer(pVCpu);
1485 TMTimerUnlock(pTimer);
1486 }
1487 return rc;
1488}
1489
1490
1491/**
1492 * Sets an LVT entry.
1493 *
1494 * @returns Strict VBox status code.
1495 * @param pVCpu The cross context virtual CPU structure.
1496 * @param offLvt The LVT entry offset in the xAPIC page.
1497 * @param uLvt The LVT value to set.
1498 */
1499static VBOXSTRICTRC apicSetLvtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1500{
1501 VMCPU_ASSERT_EMT(pVCpu);
1502
1503#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1504 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1505 || offLvt == XAPIC_OFF_LVT_THERMAL
1506 || offLvt == XAPIC_OFF_LVT_PERF
1507 || offLvt == XAPIC_OFF_LVT_LINT0
1508 || offLvt == XAPIC_OFF_LVT_LINT1
1509 || offLvt == XAPIC_OFF_LVT_ERROR,
1510 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1511
1512 /*
1513 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1514 * and raise #GP(0) in x2APIC mode.
1515 */
1516 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1517 if (offLvt == XAPIC_OFF_LVT_TIMER)
1518 {
1519 if ( !pApic->fSupportsTscDeadline
1520 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1521 {
1522 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1523 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1524 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1525 /** @todo TSC-deadline timer mode transition */
1526 }
1527 }
1528
1529 /*
1530 * Validate rest of the LVT bits.
1531 */
1532 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1533 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1534
1535 /*
1536 * For x2APIC, disallow setting of invalid/reserved bits.
1537 * For xAPIC, mask out invalid/reserved bits (i.e. ignore them).
1538 */
1539 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1540 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1541 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1542
1543 uLvt &= g_au32LvtValidMasks[idxLvt];
1544
1545 /*
1546 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1547 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1548 */
1549 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1550 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1551 uLvt |= XAPIC_LVT_MASK;
1552
1553 /*
1554 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1555 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signaling the
1556 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1557 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1558 * the interrupt for the vector happens to be generated, see apicPostInterrupt().
1559 *
1560 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1561 */
1562 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1563 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1564 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1565
1566 Log2(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1567
1568 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1569 return VINF_SUCCESS;
1570#else
1571# error "Implement Pentium and P6 family APIC architectures"
1572#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1573}
1574
1575
1576#if 0
1577/**
1578 * Sets an LVT entry in the extended LVT range.
1579 *
1580 * @returns VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure.
1582 * @param offLvt The LVT entry offset in the xAPIC page.
1583 * @param uValue The LVT value to set.
1584 */
1585static int apicSetLvtExtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1586{
1587 VMCPU_ASSERT_EMT(pVCpu);
1588 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1589
1590 /** @todo support CMCI. */
1591 return VERR_NOT_IMPLEMENTED;
1592}
1593#endif
1594
1595
1596/**
1597 * Hints TM about the APIC timer frequency.
1598 *
1599 * @param pApicCpu The APIC CPU state.
1600 * @param uInitialCount The new initial count.
1601 * @param uTimerShift The new timer shift.
1602 * @thread Any.
1603 */
1604void apicHintTimerFreq(PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1605{
1606 Assert(pApicCpu);
1607
1608 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1609 || pApicCpu->uHintedTimerShift != uTimerShift)
1610 {
1611 uint32_t uHz;
1612 if (uInitialCount)
1613 {
1614 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1615 uHz = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer)) / cTicksPerPeriod;
1616 }
1617 else
1618 uHz = 0;
1619
1620 TMTimerSetFrequencyHint(pApicCpu->CTX_SUFF(pTimer), uHz);
1621 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1622 pApicCpu->uHintedTimerShift = uTimerShift;
1623 }
1624}
1625
1626
1627/**
1628 * Gets the Interrupt Command Register (ICR), without performing any interface
1629 * checks.
1630 *
1631 * @returns The ICR value.
1632 * @param pVCpu The cross context virtual CPU structure.
1633 */
1634DECLINLINE(uint64_t) apicGetIcrNoCheck(PVMCPU pVCpu)
1635{
1636 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1637 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1638 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1639 uint64_t const uIcr = RT_MAKE_U64(uLo, uHi);
1640 return uIcr;
1641}
1642
1643
1644/**
1645 * Reads an APIC register.
1646 *
1647 * @returns VBox status code.
1648 * @param pApicDev The APIC device instance.
1649 * @param pVCpu The cross context virtual CPU structure.
1650 * @param offReg The offset of the register being read.
1651 * @param puValue Where to store the register value.
1652 */
1653DECLINLINE(VBOXSTRICTRC) apicReadRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t *puValue)
1654{
1655 VMCPU_ASSERT_EMT(pVCpu);
1656 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1657
1658 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1659 uint32_t uValue = 0;
1660 VBOXSTRICTRC rc = VINF_SUCCESS;
1661 switch (offReg)
1662 {
1663 case XAPIC_OFF_ID:
1664 case XAPIC_OFF_VERSION:
1665 case XAPIC_OFF_TPR:
1666 case XAPIC_OFF_EOI:
1667 case XAPIC_OFF_RRD:
1668 case XAPIC_OFF_LDR:
1669 case XAPIC_OFF_DFR:
1670 case XAPIC_OFF_SVR:
1671 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1672 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1673 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1674 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1675 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1676 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1677 case XAPIC_OFF_ESR:
1678 case XAPIC_OFF_ICR_LO:
1679 case XAPIC_OFF_ICR_HI:
1680 case XAPIC_OFF_LVT_TIMER:
1681#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1682 case XAPIC_OFF_LVT_THERMAL:
1683#endif
1684 case XAPIC_OFF_LVT_PERF:
1685 case XAPIC_OFF_LVT_LINT0:
1686 case XAPIC_OFF_LVT_LINT1:
1687 case XAPIC_OFF_LVT_ERROR:
1688 case XAPIC_OFF_TIMER_ICR:
1689 case XAPIC_OFF_TIMER_DCR:
1690 {
1691 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1692 || ( offReg != XAPIC_OFF_DFR
1693 && offReg != XAPIC_OFF_ICR_HI
1694 && offReg != XAPIC_OFF_EOI));
1695 uValue = apicReadRaw32(pXApicPage, offReg);
1696 Log2(("APIC%u: apicReadRegister: offReg=%#x uValue=%#x\n", pVCpu->idCpu, offReg, uValue));
1697 break;
1698 }
1699
1700 case XAPIC_OFF_PPR:
1701 {
1702 uValue = apicGetPpr(pVCpu);
1703 break;
1704 }
1705
1706 case XAPIC_OFF_TIMER_CCR:
1707 {
1708 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1709 rc = apicGetTimerCcr(pVCpu, VINF_IOM_R3_MMIO_READ, &uValue);
1710 break;
1711 }
1712
1713 case XAPIC_OFF_APR:
1714 {
1715#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1716 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1717 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1718#else
1719# error "Implement Pentium and P6 family APIC architectures"
1720#endif
1721 break;
1722 }
1723
1724 default:
1725 {
1726 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1727 rc = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu,
1728 offReg);
1729 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1730 break;
1731 }
1732 }
1733
1734 *puValue = uValue;
1735 return rc;
1736}
1737
1738
1739/**
1740 * Writes an APIC register.
1741 *
1742 * @returns Strict VBox status code.
1743 * @param pApicDev The APIC device instance.
1744 * @param pVCpu The cross context virtual CPU structure.
1745 * @param offReg The offset of the register being written.
1746 * @param uValue The register value.
1747 */
1748DECLINLINE(VBOXSTRICTRC) apicWriteRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t uValue)
1749{
1750 VMCPU_ASSERT_EMT(pVCpu);
1751 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1752 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1753
1754 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1755 switch (offReg)
1756 {
1757 case XAPIC_OFF_TPR:
1758 {
1759 rcStrict = apicSetTprEx(pVCpu, uValue, false /* fForceX2ApicBehaviour */);
1760 break;
1761 }
1762
1763 case XAPIC_OFF_LVT_TIMER:
1764#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1765 case XAPIC_OFF_LVT_THERMAL:
1766#endif
1767 case XAPIC_OFF_LVT_PERF:
1768 case XAPIC_OFF_LVT_LINT0:
1769 case XAPIC_OFF_LVT_LINT1:
1770 case XAPIC_OFF_LVT_ERROR:
1771 {
1772 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1773 break;
1774 }
1775
1776 case XAPIC_OFF_TIMER_ICR:
1777 {
1778 rcStrict = apicSetTimerIcr(pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1779 break;
1780 }
1781
1782 case XAPIC_OFF_EOI:
1783 {
1784 rcStrict = apicSetEoi(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, false /* fForceX2ApicBehaviour */);
1785 break;
1786 }
1787
1788 case XAPIC_OFF_LDR:
1789 {
1790 rcStrict = apicSetLdr(pVCpu, uValue);
1791 break;
1792 }
1793
1794 case XAPIC_OFF_DFR:
1795 {
1796 rcStrict = apicSetDfr(pVCpu, uValue);
1797 break;
1798 }
1799
1800 case XAPIC_OFF_SVR:
1801 {
1802 rcStrict = apicSetSvr(pVCpu, uValue);
1803 break;
1804 }
1805
1806 case XAPIC_OFF_ICR_LO:
1807 {
1808 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, true /* fUpdateStat */);
1809 break;
1810 }
1811
1812 case XAPIC_OFF_ICR_HI:
1813 {
1814 rcStrict = apicSetIcrHi(pVCpu, uValue);
1815 break;
1816 }
1817
1818 case XAPIC_OFF_TIMER_DCR:
1819 {
1820 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1821 break;
1822 }
1823
1824 case XAPIC_OFF_ESR:
1825 {
1826 rcStrict = apicSetEsr(pVCpu, uValue);
1827 break;
1828 }
1829
1830 case XAPIC_OFF_APR:
1831 case XAPIC_OFF_RRD:
1832 {
1833#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1834 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1835#else
1836# error "Implement Pentium and P6 family APIC architectures"
1837#endif
1838 break;
1839 }
1840
1841 /* Read-only, write ignored: */
1842 case XAPIC_OFF_VERSION:
1843 case XAPIC_OFF_ID:
1844 break;
1845
1846 /* Unavailable/reserved in xAPIC mode: */
1847 case X2APIC_OFF_SELF_IPI:
1848 /* Read-only registers: */
1849 case XAPIC_OFF_PPR:
1850 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1851 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1852 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1853 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1854 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1855 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1856 case XAPIC_OFF_TIMER_CCR:
1857 default:
1858 {
1859 rcStrict = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu,
1860 offReg);
1861 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1862 break;
1863 }
1864 }
1865
1866 return rcStrict;
1867}
1868
1869
1870/**
1871 * @interface_method_impl{PDMAPICREG,pfnReadMsrR3}
1872 */
1873APICBOTHCBDECL(VBOXSTRICTRC) apicReadMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1874{
1875 /*
1876 * Validate.
1877 */
1878 VMCPU_ASSERT_EMT(pVCpu);
1879 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1880 Assert(pu64Value);
1881 RT_NOREF_PV(pDevIns);
1882
1883 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1884#ifndef IN_RING3
1885 if (pApic->fRZEnabled)
1886 { /* likely */}
1887 else
1888 return VINF_CPUM_R3_MSR_READ;
1889#endif
1890
1891 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrRead));
1892
1893 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1894 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
1895 || pApic->fHyperVCompatMode))
1896 {
1897 switch (u32Reg)
1898 {
1899 /* Special handling for x2APIC: */
1900 case MSR_IA32_X2APIC_ICR:
1901 {
1902 *pu64Value = apicGetIcrNoCheck(pVCpu);
1903 break;
1904 }
1905
1906 /* Special handling, compatible with xAPIC: */
1907 case MSR_IA32_X2APIC_TIMER_CCR:
1908 {
1909 uint32_t uValue;
1910 rcStrict = apicGetTimerCcr(pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1911 *pu64Value = uValue;
1912 break;
1913 }
1914
1915 /* Special handling, compatible with xAPIC: */
1916 case MSR_IA32_X2APIC_PPR:
1917 {
1918 *pu64Value = apicGetPpr(pVCpu);
1919 break;
1920 }
1921
1922 /* Raw read, compatible with xAPIC: */
1923 case MSR_IA32_X2APIC_ID:
1924 case MSR_IA32_X2APIC_VERSION:
1925 case MSR_IA32_X2APIC_TPR:
1926 case MSR_IA32_X2APIC_LDR:
1927 case MSR_IA32_X2APIC_SVR:
1928 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1929 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1930 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1931 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1932 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1933 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1934 case MSR_IA32_X2APIC_ESR:
1935 case MSR_IA32_X2APIC_LVT_TIMER:
1936 case MSR_IA32_X2APIC_LVT_THERMAL:
1937 case MSR_IA32_X2APIC_LVT_PERF:
1938 case MSR_IA32_X2APIC_LVT_LINT0:
1939 case MSR_IA32_X2APIC_LVT_LINT1:
1940 case MSR_IA32_X2APIC_LVT_ERROR:
1941 case MSR_IA32_X2APIC_TIMER_ICR:
1942 case MSR_IA32_X2APIC_TIMER_DCR:
1943 {
1944 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1945 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1946 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1947 break;
1948 }
1949
1950 /* Write-only MSRs: */
1951 case MSR_IA32_X2APIC_SELF_IPI:
1952 case MSR_IA32_X2APIC_EOI:
1953 {
1954 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1955 break;
1956 }
1957
1958 /* Reserved MSRs: */
1959 case MSR_IA32_X2APIC_LVT_CMCI:
1960 default:
1961 {
1962 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1963 break;
1964 }
1965 }
1966 }
1967 else
1968 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1969
1970 return rcStrict;
1971}
1972
1973
1974/**
1975 * @interface_method_impl{PDMAPICREG,pfnWriteMsrR3}
1976 */
1977APICBOTHCBDECL(VBOXSTRICTRC) apicWriteMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t u64Value)
1978{
1979 /*
1980 * Validate.
1981 */
1982 VMCPU_ASSERT_EMT(pVCpu);
1983 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1984 RT_NOREF_PV(pDevIns);
1985
1986 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1987#ifndef IN_RING3
1988 if (pApic->fRZEnabled)
1989 { /* likely */ }
1990 else
1991 return VINF_CPUM_R3_MSR_WRITE;
1992#endif
1993
1994 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrWrite));
1995
1996 /*
1997 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
1998 * accesses where they are ignored. Hence, we need to validate each register before
1999 * invoking the generic/xAPIC write functions.
2000 *
2001 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
2002 * case first and handle validating the remaining bits on a per-register basis.
2003 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
2004 */
2005 if ( u32Reg != MSR_IA32_X2APIC_ICR
2006 && RT_HI_U32(u64Value))
2007 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
2008
2009 uint32_t u32Value = RT_LO_U32(u64Value);
2010 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2011 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
2012 || pApic->fHyperVCompatMode))
2013 {
2014 switch (u32Reg)
2015 {
2016 case MSR_IA32_X2APIC_TPR:
2017 {
2018 rcStrict = apicSetTprEx(pVCpu, u32Value, false /* fForceX2ApicBehaviour */);
2019 break;
2020 }
2021
2022 case MSR_IA32_X2APIC_ICR:
2023 {
2024 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
2025 break;
2026 }
2027
2028 case MSR_IA32_X2APIC_SVR:
2029 {
2030 rcStrict = apicSetSvr(pVCpu, u32Value);
2031 break;
2032 }
2033
2034 case MSR_IA32_X2APIC_ESR:
2035 {
2036 rcStrict = apicSetEsr(pVCpu, u32Value);
2037 break;
2038 }
2039
2040 case MSR_IA32_X2APIC_TIMER_DCR:
2041 {
2042 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
2043 break;
2044 }
2045
2046 case MSR_IA32_X2APIC_LVT_TIMER:
2047 case MSR_IA32_X2APIC_LVT_THERMAL:
2048 case MSR_IA32_X2APIC_LVT_PERF:
2049 case MSR_IA32_X2APIC_LVT_LINT0:
2050 case MSR_IA32_X2APIC_LVT_LINT1:
2051 case MSR_IA32_X2APIC_LVT_ERROR:
2052 {
2053 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
2054 break;
2055 }
2056
2057 case MSR_IA32_X2APIC_TIMER_ICR:
2058 {
2059 rcStrict = apicSetTimerIcr(pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
2060 break;
2061 }
2062
2063 /* Write-only MSRs: */
2064 case MSR_IA32_X2APIC_SELF_IPI:
2065 {
2066 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
2067 apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE);
2068 rcStrict = VINF_SUCCESS;
2069 break;
2070 }
2071
2072 case MSR_IA32_X2APIC_EOI:
2073 {
2074 rcStrict = apicSetEoi(pVCpu, u32Value, VINF_CPUM_R3_MSR_WRITE, false /* fForceX2ApicBehaviour */);
2075 break;
2076 }
2077
2078 /*
2079 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to write the "high"
2080 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
2081 * index (0x80E). The write value was 0xffffffff on a Windows 8.1 64-bit guest. We can
2082 * safely ignore this nonsense, See @bugref{8382#c7}.
2083 */
2084 case MSR_IA32_X2APIC_LDR + 1:
2085 {
2086 if (pApic->fHyperVCompatMode)
2087 rcStrict = VINF_SUCCESS;
2088 else
2089 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2090 break;
2091 }
2092
2093 /* Special-treament (read-only normally, but not with Hyper-V) */
2094 case MSR_IA32_X2APIC_LDR:
2095 {
2096 if (pApic->fHyperVCompatMode)
2097 {
2098 rcStrict = apicSetLdr(pVCpu, u32Value);
2099 break;
2100 }
2101 /* fallthru */
2102 }
2103 /* Read-only MSRs: */
2104 case MSR_IA32_X2APIC_ID:
2105 case MSR_IA32_X2APIC_VERSION:
2106 case MSR_IA32_X2APIC_PPR:
2107 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
2108 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
2109 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
2110 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
2111 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
2112 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
2113 case MSR_IA32_X2APIC_TIMER_CCR:
2114 {
2115 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
2116 break;
2117 }
2118
2119 /* Reserved MSRs: */
2120 case MSR_IA32_X2APIC_LVT_CMCI:
2121 default:
2122 {
2123 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2124 break;
2125 }
2126 }
2127 }
2128 else
2129 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
2130
2131 return rcStrict;
2132}
2133
2134
2135/**
2136 * @interface_method_impl{PDMAPICREG,pfnSetBaseMsrR3}
2137 */
2138APICBOTHCBDECL(VBOXSTRICTRC) apicSetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint64_t u64BaseMsr)
2139{
2140 Assert(pVCpu);
2141 NOREF(pDevIns);
2142
2143#ifdef IN_RING3
2144 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2145 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2146 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
2147 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
2148 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
2149
2150 Log2(("APIC%u: ApicSetBaseMsr: u64BaseMsr=%#RX64 enmNewMode=%s enmOldMode=%s\n", pVCpu->idCpu, u64BaseMsr,
2151 apicGetModeName(enmNewMode), apicGetModeName(enmOldMode)));
2152
2153 /*
2154 * We do not support re-mapping the APIC base address because:
2155 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
2156 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
2157 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
2158 * region remains mapped but doesn't belong to the called VCPU's APIC).
2159 */
2160 /** @todo Handle per-VCPU APIC base relocation. */
2161 if (MSR_IA32_APICBASE_GET_ADDR(uBaseMsr) != MSR_IA32_APICBASE_ADDR)
2162 {
2163 LogRelMax(5, ("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
2164 MSR_IA32_APICBASE_GET_ADDR(uBaseMsr)));
2165 return VERR_CPUM_RAISE_GP_0;
2166 }
2167
2168 /* Don't allow enabling xAPIC/x2APIC if the VM is configured with the APIC disabled. */
2169 if (pApic->enmMaxMode == PDMAPICMODE_NONE)
2170 {
2171 LogRel(("APIC%u: Disallowing APIC base MSR write as the VM is configured with APIC disabled!\n",
2172 pVCpu->idCpu));
2173 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_DISALLOWED_CONFIG);
2174 }
2175
2176 /*
2177 * Act on state transition.
2178 */
2179 if (enmNewMode != enmOldMode)
2180 {
2181 switch (enmNewMode)
2182 {
2183 case APICMODE_DISABLED:
2184 {
2185 /*
2186 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2187 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2188 *
2189 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2190 *
2191 * We'll also manually manage the APIC base MSR here. We want a single-point of commit
2192 * at the end of this function rather than updating it in apicR3ResetCpu. This means we also
2193 * need to update the CPUID leaf ourselves.
2194 */
2195 apicR3ResetCpu(pVCpu, false /* fResetApicBaseMsr */);
2196 uBaseMsr &= ~(MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD);
2197 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, false /*fVisible*/);
2198 LogRel(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2199 break;
2200 }
2201
2202 case APICMODE_XAPIC:
2203 {
2204 if (enmOldMode != APICMODE_DISABLED)
2205 {
2206 LogRel(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2207 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2208 }
2209
2210 uBaseMsr |= MSR_IA32_APICBASE_EN;
2211 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/);
2212 LogRel(("APIC%u: Switched mode to xAPIC\n", pVCpu->idCpu));
2213 break;
2214 }
2215
2216 case APICMODE_X2APIC:
2217 {
2218 if (pApic->enmMaxMode != PDMAPICMODE_X2APIC)
2219 {
2220 LogRel(("APIC%u: Disallowing transition to x2APIC mode as the VM is configured with the x2APIC disabled!\n",
2221 pVCpu->idCpu));
2222 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2223 }
2224
2225 if (enmOldMode != APICMODE_XAPIC)
2226 {
2227 LogRel(("APIC%u: Can only transition to x2APIC state from xAPIC state\n", pVCpu->idCpu));
2228 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2229 }
2230
2231 uBaseMsr |= MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD;
2232
2233 /*
2234 * The APIC ID needs updating when entering x2APIC mode.
2235 * Software written APIC ID in xAPIC mode isn't preserved.
2236 * The APIC ID becomes read-only to software in x2APIC mode.
2237 *
2238 * See Intel spec. 10.12.5.1 "x2APIC States".
2239 */
2240 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2241 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2242 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2243
2244 /*
2245 * LDR initialization occurs when entering x2APIC mode.
2246 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2247 */
2248 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2249 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2250
2251 LogRel(("APIC%u: Switched mode to x2APIC\n", pVCpu->idCpu));
2252 break;
2253 }
2254
2255 case APICMODE_INVALID:
2256 default:
2257 {
2258 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2259 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2260 }
2261 }
2262 }
2263
2264 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2265 return VINF_SUCCESS;
2266
2267#else /* !IN_RING3 */
2268 RT_NOREF_PV(pDevIns);
2269 RT_NOREF_PV(pVCpu);
2270 RT_NOREF_PV(u64BaseMsr);
2271 return VINF_CPUM_R3_MSR_WRITE;
2272#endif /* IN_RING3 */
2273}
2274
2275
2276/**
2277 * @interface_method_impl{PDMAPICREG,pfnGetBaseMsrR3}
2278 */
2279APICBOTHCBDECL(uint64_t) apicGetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
2280{
2281 RT_NOREF_PV(pDevIns);
2282 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2283
2284 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2285 return pApicCpu->uApicBaseMsr;
2286}
2287
2288
2289/**
2290 * @interface_method_impl{PDMAPICREG,pfnSetTprR3}
2291 */
2292APICBOTHCBDECL(void) apicSetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Tpr)
2293{
2294 RT_NOREF_PV(pDevIns);
2295 apicSetTprEx(pVCpu, u8Tpr, false /* fForceX2ApicBehaviour */);
2296}
2297
2298
2299/**
2300 * Gets the highest priority pending interrupt.
2301 *
2302 * @returns true if any interrupt is pending, false otherwise.
2303 * @param pVCpu The cross context virtual CPU structure.
2304 * @param pu8PendingIntr Where to store the interrupt vector if the
2305 * interrupt is pending (optional, can be NULL).
2306 */
2307static bool apicGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2308{
2309 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2310 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2311 if (irrv >= 0)
2312 {
2313 Assert(irrv <= (int)UINT8_MAX);
2314 if (pu8PendingIntr)
2315 *pu8PendingIntr = (uint8_t)irrv;
2316 return true;
2317 }
2318 return false;
2319}
2320
2321
2322/**
2323 * @interface_method_impl{PDMAPICREG,pfnGetTprR3}
2324 */
2325APICBOTHCBDECL(uint8_t) apicGetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, bool *pfPending, uint8_t *pu8PendingIntr)
2326{
2327 RT_NOREF_PV(pDevIns);
2328 VMCPU_ASSERT_EMT(pVCpu);
2329 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2330
2331 if (pfPending)
2332 {
2333 /*
2334 * Just return whatever the highest pending interrupt is in the IRR.
2335 * The caller is responsible for figuring out if it's masked by the TPR etc.
2336 */
2337 *pfPending = apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2338 }
2339
2340 return pXApicPage->tpr.u8Tpr;
2341}
2342
2343
2344/**
2345 * @interface_method_impl{PDMAPICREG,pfnGetTimerFreqR3}
2346 */
2347APICBOTHCBDECL(uint64_t) apicGetTimerFreq(PPDMDEVINS pDevIns)
2348{
2349 PVM pVM = PDMDevHlpGetVM(pDevIns);
2350 PVMCPU pVCpu = &pVM->aCpus[0];
2351 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2352 uint64_t uTimer = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer));
2353 return uTimer;
2354}
2355
2356
2357/**
2358 * @interface_method_impl{PDMAPICREG,pfnBusDeliverR3}
2359 * @remarks This is a private interface between the IOAPIC and the APIC.
2360 */
2361APICBOTHCBDECL(int) apicBusDeliver(PPDMDEVINS pDevIns, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2362 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uTagSrc)
2363{
2364 NOREF(uPolarity);
2365 NOREF(uTagSrc);
2366 PVM pVM = PDMDevHlpGetVM(pDevIns);
2367
2368 /*
2369 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2370 * Hence, the broadcast mask is 0xff.
2371 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2372 */
2373 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2374 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2375 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2376 uint32_t fDestMask = uDest;
2377 uint32_t fBroadcastMask = UINT32_C(0xff);
2378
2379 Log2(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s uVector=%#x\n", fDestMask,
2380 apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode), apicGetDeliveryModeName(enmDeliveryMode),
2381 uVector));
2382
2383 bool fIntrAccepted;
2384 VMCPUSET DestCpuSet;
2385 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2386 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2387 &fIntrAccepted, VINF_SUCCESS /* rcRZ */);
2388 if (fIntrAccepted)
2389 return VBOXSTRICTRC_VAL(rcStrict);
2390 return VERR_APIC_INTR_DISCARDED;
2391}
2392
2393
2394/**
2395 * @interface_method_impl{PDMAPICREG,pfnLocalInterruptR3}
2396 * @remarks This is a private interface between the PIC and the APIC.
2397 */
2398APICBOTHCBDECL(VBOXSTRICTRC) apicLocalInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2399{
2400 NOREF(pDevIns);
2401 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2402 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2403
2404 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2405
2406 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2407 if (apicIsEnabled(pVCpu))
2408 {
2409 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2410
2411 /* Pick the LVT entry corresponding to the interrupt pin. */
2412 static const uint16_t s_au16LvtOffsets[] =
2413 {
2414 XAPIC_OFF_LVT_LINT0,
2415 XAPIC_OFF_LVT_LINT1
2416 };
2417 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2418 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2419 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
2420
2421 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2422 if (!XAPIC_LVT_IS_MASKED(uLvt))
2423 {
2424 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2425 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2426
2427 switch (enmDeliveryMode)
2428 {
2429 case XAPICDELIVERYMODE_INIT:
2430 {
2431 /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2432 AssertMsgFailed(("INIT through LINT0/LINT1 is not yet supported\n"));
2433 /* fallthru */
2434 }
2435 case XAPICDELIVERYMODE_FIXED:
2436 {
2437 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2438 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2439 bool fActive = RT_BOOL(u8Level & 1);
2440 bool volatile *pfActiveLine = u8Pin == 0 ? &pApicCpu->fActiveLint0 : &pApicCpu->fActiveLint1;
2441 /** @todo Polarity is busted elsewhere, we need to fix that
2442 * first. See @bugref{8386#c7}. */
2443#if 0
2444 uint8_t const u8Polarity = XAPIC_LVT_GET_POLARITY(uLvt);
2445 fActive ^= u8Polarity; */
2446#endif
2447 if (!fActive)
2448 {
2449 ASMAtomicCmpXchgBool(pfActiveLine, false, true);
2450 break;
2451 }
2452
2453 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2454 if (offLvt == XAPIC_OFF_LVT_LINT1)
2455 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2456 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2457 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2458 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2459 means. */
2460
2461 bool fSendIntr;
2462 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2463 {
2464 /* Recognize and send the interrupt only on an edge transition. */
2465 fSendIntr = ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2466 }
2467 else
2468 {
2469 /* For level-triggered interrupts, redundant interrupts are not a problem. */
2470 Assert(enmTriggerMode == XAPICTRIGGERMODE_LEVEL);
2471 ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2472
2473 /* Only when the remote IRR isn't set, set it and send the interrupt. */
2474 if (!(pXApicPage->lvt_lint0.all.u32LvtLint0 & XAPIC_LVT_REMOTE_IRR))
2475 {
2476 Assert(offLvt == XAPIC_OFF_LVT_LINT0);
2477 ASMAtomicOrU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, XAPIC_LVT_REMOTE_IRR);
2478 fSendIntr = true;
2479 }
2480 else
2481 fSendIntr = false;
2482 }
2483
2484 if (fSendIntr)
2485 {
2486 VMCPUSET DestCpuSet;
2487 VMCPUSET_EMPTY(&DestCpuSet);
2488 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2489 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode,
2490 &DestCpuSet, NULL /* pfIntrAccepted */, rcRZ);
2491 }
2492 break;
2493 }
2494
2495 case XAPICDELIVERYMODE_SMI:
2496 case XAPICDELIVERYMODE_NMI:
2497 {
2498 VMCPUSET DestCpuSet;
2499 VMCPUSET_EMPTY(&DestCpuSet);
2500 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2501 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2502 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2503 NULL /* pfIntrAccepted */, rcRZ);
2504 break;
2505 }
2506
2507 case XAPICDELIVERYMODE_EXTINT:
2508 {
2509 Log2(("APIC%u: apicLocalInterrupt: %s ExtINT through LINT%u\n", pVCpu->idCpu,
2510 u8Level ? "Raising" : "Lowering", u8Pin));
2511 if (u8Level)
2512 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2513 else
2514 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2515 break;
2516 }
2517
2518 /* Reserved/unknown delivery modes: */
2519 case XAPICDELIVERYMODE_LOWEST_PRIO:
2520 case XAPICDELIVERYMODE_STARTUP:
2521 default:
2522 {
2523 rcStrict = VERR_INTERNAL_ERROR_3;
2524 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2525 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2526 break;
2527 }
2528 }
2529 }
2530 }
2531 else
2532 {
2533 /* The APIC is hardware disabled. The CPU behaves as though there is no on-chip APIC. */
2534 if (u8Pin == 0)
2535 {
2536 /* LINT0 behaves as an external interrupt pin. */
2537 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, %s INTR\n", pVCpu->idCpu,
2538 u8Level ? "raising" : "lowering"));
2539 if (u8Level)
2540 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2541 else
2542 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2543 }
2544 else
2545 {
2546 /* LINT1 behaves as NMI. */
2547 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, raising NMI\n", pVCpu->idCpu));
2548 apicSetInterruptFF(pVCpu, PDMAPICIRQ_NMI);
2549 }
2550 }
2551
2552 return rcStrict;
2553}
2554
2555
2556/**
2557 * @interface_method_impl{PDMAPICREG,pfnGetInterruptR3}
2558 */
2559APICBOTHCBDECL(int) apicGetInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t *pu8Vector, uint32_t *pu32TagSrc)
2560{
2561 RT_NOREF_PV(pDevIns);
2562 VMCPU_ASSERT_EMT(pVCpu);
2563 Assert(pu8Vector);
2564 NOREF(pu32TagSrc);
2565
2566 LogFlow(("APIC%u: apicGetInterrupt:\n", pVCpu->idCpu));
2567
2568 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2569 bool const fApicHwEnabled = apicIsEnabled(pVCpu);
2570 if ( fApicHwEnabled
2571 && pXApicPage->svr.u.fApicSoftwareEnable)
2572 {
2573 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2574 if (RT_LIKELY(irrv >= 0))
2575 {
2576 Assert(irrv <= (int)UINT8_MAX);
2577 uint8_t const uVector = irrv;
2578
2579 /*
2580 * This can happen if the APIC receives an interrupt when the CPU has interrupts
2581 * disabled but the TPR is raised by the guest before re-enabling interrupts.
2582 */
2583 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2584 if ( uTpr > 0
2585 && XAPIC_TPR_GET_TP(uVector) <= XAPIC_TPR_GET_TP(uTpr))
2586 {
2587 Log2(("APIC%u: apicGetInterrupt: Interrupt masked. uVector=%#x uTpr=%#x SpuriousVector=%#x\n", pVCpu->idCpu,
2588 uVector, uTpr, pXApicPage->svr.u.u8SpuriousVector));
2589 *pu8Vector = uVector;
2590 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByTpr);
2591 return VERR_APIC_INTR_MASKED_BY_TPR;
2592 }
2593
2594 /*
2595 * The PPR should be up-to-date at this point through apicSetEoi().
2596 * We're on EMT so no parallel updates possible.
2597 * Subject the pending vector to PPR prioritization.
2598 */
2599 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2600 if ( !uPpr
2601 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2602 {
2603 apicClearVectorInReg(&pXApicPage->irr, uVector);
2604 apicSetVectorInReg(&pXApicPage->isr, uVector);
2605 apicUpdatePpr(pVCpu);
2606 apicSignalNextPendingIntr(pVCpu);
2607
2608 Log2(("APIC%u: apicGetInterrupt: Valid Interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
2609 *pu8Vector = uVector;
2610 return VINF_SUCCESS;
2611 }
2612 else
2613 {
2614 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByPpr);
2615 Log2(("APIC%u: apicGetInterrupt: Interrupt's priority is not higher than the PPR. uVector=%#x PPR=%#x\n",
2616 pVCpu->idCpu, uVector, uPpr));
2617 }
2618 }
2619 else
2620 Log2(("APIC%u: apicGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2621 }
2622 else
2623 Log2(("APIC%u: apicGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2624
2625 return VERR_APIC_INTR_NOT_PENDING;
2626}
2627
2628
2629/**
2630 * @callback_method_impl{FNIOMMMIOREAD}
2631 */
2632APICBOTHCBDECL(int) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
2633{
2634 NOREF(pvUser);
2635 Assert(!(GCPhysAddr & 0xf));
2636 Assert(cb == 4); RT_NOREF_PV(cb);
2637
2638 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2639 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2640 uint16_t offReg = GCPhysAddr & 0xff0;
2641 uint32_t uValue = 0;
2642
2643 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioRead));
2644
2645 int rc = VBOXSTRICTRC_VAL(apicReadRegister(pApicDev, pVCpu, offReg, &uValue));
2646 *(uint32_t *)pv = uValue;
2647
2648 Log2(("APIC%u: apicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2649 return rc;
2650}
2651
2652
2653/**
2654 * @callback_method_impl{FNIOMMMIOWRITE}
2655 */
2656APICBOTHCBDECL(int) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
2657{
2658 NOREF(pvUser);
2659 Assert(!(GCPhysAddr & 0xf));
2660 Assert(cb == 4); RT_NOREF_PV(cb);
2661
2662 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2663 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2664 uint16_t offReg = GCPhysAddr & 0xff0;
2665 uint32_t uValue = *(uint32_t *)pv;
2666
2667 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioWrite));
2668
2669 Log2(("APIC%u: apicWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2670
2671 int rc = VBOXSTRICTRC_VAL(apicWriteRegister(pApicDev, pVCpu, offReg, uValue));
2672 return rc;
2673}
2674
2675
2676/**
2677 * Sets the interrupt pending force-flag and pokes the EMT if required.
2678 *
2679 * @param pVCpu The cross context virtual CPU structure.
2680 * @param enmType The IRQ type.
2681 */
2682VMM_INT_DECL(void) apicSetInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2683{
2684 PVM pVM = pVCpu->CTX_SUFF(pVM);
2685 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2686 CTX_SUFF(pApicDev->pApicHlp)->pfnSetInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2687}
2688
2689
2690/**
2691 * Clears the interrupt pending force-flag.
2692 *
2693 * @param pVCpu The cross context virtual CPU structure.
2694 * @param enmType The IRQ type.
2695 */
2696VMM_INT_DECL(void) apicClearInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2697{
2698 PVM pVM = pVCpu->CTX_SUFF(pVM);
2699 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2700 pApicDev->CTX_SUFF(pApicHlp)->pfnClearInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2701}
2702
2703
2704/**
2705 * Posts an interrupt to a target APIC.
2706 *
2707 * This function handles interrupts received from the system bus or
2708 * interrupts generated locally from the LVT or via a self IPI.
2709 *
2710 * Don't use this function to try and deliver ExtINT style interrupts.
2711 *
2712 * @returns true if the interrupt was accepted, false otherwise.
2713 * @param pVCpu The cross context virtual CPU structure.
2714 * @param uVector The vector of the interrupt to be posted.
2715 * @param enmTriggerMode The trigger mode of the interrupt.
2716 *
2717 * @thread Any.
2718 */
2719VMM_INT_DECL(bool) apicPostInterrupt(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode)
2720{
2721 Assert(pVCpu);
2722 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
2723
2724 PVM pVM = pVCpu->CTX_SUFF(pVM);
2725 PCAPIC pApic = VM_TO_APIC(pVM);
2726 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2727 bool fAccepted = true;
2728
2729 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
2730
2731 /*
2732 * Only post valid interrupt vectors.
2733 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
2734 */
2735 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
2736 {
2737 /*
2738 * If the interrupt is already pending in the IRR we can skip the
2739 * potential expensive operation of poking the guest EMT out of execution.
2740 */
2741 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2742 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
2743 {
2744 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u uVector=%#x\n", VMMGetCpuId(pVM), pVCpu->idCpu, uVector));
2745 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2746 {
2747 if (pApic->fPostedIntrsEnabled)
2748 { /** @todo posted-interrupt call to hardware */ }
2749 else
2750 {
2751 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
2752 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2753 if (!fAlreadySet)
2754 {
2755 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for edge-triggered intr. uVector=%#x\n", uVector));
2756 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
2757 }
2758 }
2759 }
2760 else
2761 {
2762 /*
2763 * Level-triggered interrupts requires updating of the TMR and thus cannot be
2764 * delivered asynchronously.
2765 */
2766 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
2767 uint32_t const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel);
2768 if (!fAlreadySet)
2769 {
2770 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for level-triggered intr. uVector=%#x\n", uVector));
2771 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
2772 }
2773 }
2774 }
2775 else
2776 {
2777 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u. Vector %#x Already in IRR, skipping\n", VMMGetCpuId(pVM),
2778 pVCpu->idCpu, uVector));
2779 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
2780 }
2781 }
2782 else
2783 {
2784 fAccepted = false;
2785 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
2786 }
2787
2788 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
2789 return fAccepted;
2790}
2791
2792
2793/**
2794 * Starts the APIC timer.
2795 *
2796 * @param pVCpu The cross context virtual CPU structure.
2797 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
2798 * 0.
2799 * @thread Any.
2800 */
2801VMM_INT_DECL(void) apicStartTimer(PVMCPU pVCpu, uint32_t uInitialCount)
2802{
2803 Assert(pVCpu);
2804 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2805 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2806 Assert(uInitialCount > 0);
2807
2808 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
2809 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
2810 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
2811
2812 Log2(("APIC%u: apicStartTimer: uInitialCount=%#RX32 uTimerShift=%u cTicksToNext=%RU64\n", pVCpu->idCpu, uInitialCount,
2813 uTimerShift, cTicksToNext));
2814
2815 /*
2816 * The assumption here is that the timer doesn't tick during this call
2817 * and thus setting a relative time to fire next is accurate. The advantage
2818 * however is updating u64TimerInitial 'atomically' while setting the next
2819 * tick.
2820 */
2821 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2822 TMTimerSetRelative(pTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
2823 apicHintTimerFreq(pApicCpu, uInitialCount, uTimerShift);
2824}
2825
2826
2827/**
2828 * Stops the APIC timer.
2829 *
2830 * @param pVCpu The cross context virtual CPU structure.
2831 * @thread Any.
2832 */
2833VMM_INT_DECL(void) apicStopTimer(PVMCPU pVCpu)
2834{
2835 Assert(pVCpu);
2836 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2837 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2838
2839 Log2(("APIC%u: apicStopTimer\n", pVCpu->idCpu));
2840
2841 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2842 TMTimerStop(pTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
2843 pApicCpu->uHintedTimerInitialCount = 0;
2844 pApicCpu->uHintedTimerShift = 0;
2845}
2846
2847
2848/**
2849 * Queues a pending interrupt as in-service.
2850 *
2851 * This function should only be needed without virtualized APIC
2852 * registers. With virtualized APIC registers, it's sufficient to keep
2853 * the interrupts pending in the IRR as the hardware takes care of
2854 * virtual interrupt delivery.
2855 *
2856 * @returns true if the interrupt was queued to in-service interrupts,
2857 * false otherwise.
2858 * @param pVCpu The cross context virtual CPU structure.
2859 * @param u8PendingIntr The pending interrupt to queue as
2860 * in-service.
2861 *
2862 * @remarks This assumes the caller has done the necessary checks and
2863 * is ready to take actually service the interrupt (TPR,
2864 * interrupt shadow etc.)
2865 */
2866VMMDECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2867{
2868 VMCPU_ASSERT_EMT(pVCpu);
2869
2870 PVM pVM = pVCpu->CTX_SUFF(pVM);
2871 PAPIC pApic = VM_TO_APIC(pVM);
2872 Assert(!pApic->fVirtApicRegsEnabled);
2873 NOREF(pApic);
2874
2875 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2876 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
2877 if (fIsPending)
2878 {
2879 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
2880 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
2881 apicUpdatePpr(pVCpu);
2882 return true;
2883 }
2884 return false;
2885}
2886
2887
2888/**
2889 * De-queues a pending interrupt from in-service.
2890 *
2891 * This undoes APICQueueInterruptToService() for premature VM-exits before event
2892 * injection.
2893 *
2894 * @param pVCpu The cross context virtual CPU structure.
2895 * @param u8PendingIntr The pending interrupt to de-queue from
2896 * in-service.
2897 */
2898VMMDECL(void) APICDequeueInterruptFromService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2899{
2900 VMCPU_ASSERT_EMT(pVCpu);
2901
2902 PVM pVM = pVCpu->CTX_SUFF(pVM);
2903 PAPIC pApic = VM_TO_APIC(pVM);
2904 Assert(!pApic->fVirtApicRegsEnabled);
2905 NOREF(pApic);
2906
2907 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2908 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
2909 if (fInService)
2910 {
2911 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
2912 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
2913 apicUpdatePpr(pVCpu);
2914 }
2915}
2916
2917
2918/**
2919 * Updates pending interrupts from the pending-interrupt bitmaps to the IRR.
2920 *
2921 * @param pVCpu The cross context virtual CPU structure.
2922 */
2923VMMDECL(void) APICUpdatePendingInterrupts(PVMCPU pVCpu)
2924{
2925 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2926
2927 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2928 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2929 bool fHasPendingIntrs = false;
2930
2931 Log3(("APIC%u: APICUpdatePendingInterrupts:\n", pVCpu->idCpu));
2932 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
2933
2934 /* Update edge-triggered pending interrupts. */
2935 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
2936 for (;;)
2937 {
2938 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2939 if (!fAlreadySet)
2940 break;
2941
2942 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
2943 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
2944 {
2945 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
2946 if (u64Fragment)
2947 {
2948 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2949 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2950
2951 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2952 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2953
2954 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
2955 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
2956 fHasPendingIntrs = true;
2957 }
2958 }
2959 }
2960
2961 /* Update level-triggered pending interrupts. */
2962 pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
2963 for (;;)
2964 {
2965 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
2966 if (!fAlreadySet)
2967 break;
2968
2969 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
2970 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
2971 {
2972 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
2973 if (u64Fragment)
2974 {
2975 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2976 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2977
2978 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2979 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2980
2981 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
2982 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2983 fHasPendingIntrs = true;
2984 }
2985 }
2986 }
2987
2988 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
2989 Log3(("APIC%u: APICUpdatePendingInterrupts: fHasPendingIntrs=%RTbool\n", pVCpu->idCpu, fHasPendingIntrs));
2990
2991 if ( fHasPendingIntrs
2992 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC))
2993 apicSignalNextPendingIntr(pVCpu);
2994}
2995
2996
2997/**
2998 * Gets the highest priority pending interrupt.
2999 *
3000 * @returns true if any interrupt is pending, false otherwise.
3001 * @param pVCpu The cross context virtual CPU structure.
3002 * @param pu8PendingIntr Where to store the interrupt vector if the
3003 * interrupt is pending.
3004 */
3005VMMDECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
3006{
3007 VMCPU_ASSERT_EMT(pVCpu);
3008 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
3009}
3010
3011
3012/**
3013 * Posts an interrupt to a target APIC, Hyper-V interface.
3014 *
3015 * @returns true if the interrupt was accepted, false otherwise.
3016 * @param pVCpu The cross context virtual CPU structure.
3017 * @param uVector The vector of the interrupt to be posted.
3018 * @param fAutoEoi Whether this interrupt has automatic EOI
3019 * treatment.
3020 * @param enmTriggerMode The trigger mode of the interrupt.
3021 *
3022 * @thread Any.
3023 */
3024VMM_INT_DECL(void) APICHvSendInterrupt(PVMCPU pVCpu, uint8_t uVector, bool fAutoEoi, XAPICTRIGGERMODE enmTriggerMode)
3025{
3026 Assert(pVCpu);
3027 Assert(!fAutoEoi); /** @todo AutoEOI. */
3028 RT_NOREF(fAutoEoi);
3029 apicPostInterrupt(pVCpu, uVector, enmTriggerMode);
3030}
3031
3032
3033/**
3034 * Sets the Task Priority Register (TPR), Hyper-V interface.
3035 *
3036 * @returns Strict VBox status code.
3037 * @param pVCpu The cross context virtual CPU structure.
3038 * @param uTpr The TPR value to set.
3039 *
3040 * @remarks Validates like in x2APIC mode.
3041 */
3042VMM_INT_DECL(VBOXSTRICTRC) APICHvSetTpr(PVMCPU pVCpu, uint8_t uTpr)
3043{
3044 Assert(pVCpu);
3045 VMCPU_ASSERT_EMT(pVCpu);
3046 return apicSetTprEx(pVCpu, uTpr, true /* fForceX2ApicBehaviour */);
3047}
3048
3049
3050/**
3051 * Gets the Task Priority Register (TPR), Hyper-V interface.
3052 *
3053 * @returns The TPR value.
3054 * @param pVCpu The cross context virtual CPU structure.
3055 */
3056VMM_INT_DECL(uint8_t) APICHvGetTpr(PVMCPU pVCpu)
3057{
3058 Assert(pVCpu);
3059 VMCPU_ASSERT_EMT(pVCpu);
3060
3061 /*
3062 * The APIC could be operating in xAPIC mode and thus we should not use the apicReadMsr()
3063 * interface which validates the APIC mode and will throw a #GP(0) if not in x2APIC mode.
3064 * We could use the apicReadRegister() MMIO interface, but why bother getting the PDMDEVINS
3065 * pointer, so just directly read the APIC page.
3066 */
3067 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3068 return apicReadRaw32(pXApicPage, XAPIC_OFF_TPR);
3069}
3070
3071
3072/**
3073 * Sets the Interrupt Command Register (ICR), Hyper-V interface.
3074 *
3075 * @returns Strict VBox status code.
3076 * @param pVCpu The cross context virtual CPU structure.
3077 * @param uIcr The ICR value to set.
3078 */
3079VMM_INT_DECL(VBOXSTRICTRC) APICHvSetIcr(PVMCPU pVCpu, uint64_t uIcr)
3080{
3081 Assert(pVCpu);
3082 VMCPU_ASSERT_EMT(pVCpu);
3083 return apicSetIcr(pVCpu, uIcr, VINF_CPUM_R3_MSR_WRITE);
3084}
3085
3086
3087/**
3088 * Gets the Interrupt Command Register (ICR), Hyper-V interface.
3089 *
3090 * @returns The ICR value.
3091 * @param pVCpu The cross context virtual CPU structure.
3092 */
3093VMM_INT_DECL(uint64_t) APICHvGetIcr(PVMCPU pVCpu)
3094{
3095 Assert(pVCpu);
3096 VMCPU_ASSERT_EMT(pVCpu);
3097 return apicGetIcrNoCheck(pVCpu);
3098}
3099
3100
3101/**
3102 * Sets the End-Of-Interrupt (EOI) register, Hyper-V interface.
3103 *
3104 * @returns Strict VBox status code.
3105 * @param pVCpu The cross context virtual CPU structure.
3106 * @param uEoi The EOI value.
3107 */
3108VMM_INT_DECL(VBOXSTRICTRC) APICHvSetEoi(PVMCPU pVCpu, uint32_t uEoi)
3109{
3110 Assert(pVCpu);
3111 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3112 return apicSetEoi(pVCpu, uEoi, VINF_CPUM_R3_MSR_WRITE, true /* fForceX2ApicBehaviour */);
3113}
3114
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette