VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 107631

Last change on this file since 107631 was 107308, checked in by vboxsync, 5 weeks ago

VMM: bugref:10759 Refactor GIC for use with different backends.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 125.6 KB
Line 
1/* $Id: APICAll.cpp 107308 2024-12-13 08:09:39Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_APIC
33#define VMCPU_INCL_CPUM_GST_CTX /* for macOS hack */
34#include "APICInternal.h"
35#include <VBox/vmm/pdmdev.h>
36#include <VBox/vmm/pdmapi.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/vmm.h>
39#include <VBox/vmm/vmcpuset.h>
40#ifdef IN_RING0
41# include <VBox/vmm/gvmm.h>
42#endif
43
44
45/*********************************************************************************************************************************
46* Internal Functions *
47*********************************************************************************************************************************/
48static void apicSetInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType);
49static void apicStopTimer(PVMCPUCC pVCpu);
50
51
52/*********************************************************************************************************************************
53* Global Variables *
54*********************************************************************************************************************************/
55#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
56/** An ordered array of valid LVT masks. */
57static const uint32_t g_au32LvtValidMasks[] =
58{
59 XAPIC_LVT_TIMER_VALID,
60 XAPIC_LVT_THERMAL_VALID,
61 XAPIC_LVT_PERF_VALID,
62 XAPIC_LVT_LINT_VALID, /* LINT0 */
63 XAPIC_LVT_LINT_VALID, /* LINT1 */
64 XAPIC_LVT_ERROR_VALID
65};
66#endif
67
68#if 0
69/** @todo CMCI */
70static const uint32_t g_au32LvtExtValidMask[] =
71{
72 XAPIC_LVT_CMCI_VALID
73};
74#endif
75
76
77/**
78 * Checks if a vector is set in an APIC 256-bit sparse register.
79 *
80 * @returns true if the specified vector is set, false otherwise.
81 * @param pApicReg The APIC 256-bit spare register.
82 * @param uVector The vector to check if set.
83 */
84DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
85{
86 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
87 return ASMBitTest(pbBitmap, (XAPIC_REG256_VECTOR_OFF(uVector) << 3) + XAPIC_REG256_VECTOR_BIT(uVector));
88}
89
90
91/**
92 * Sets the vector in an APIC 256-bit sparse register.
93 *
94 * @param pApicReg The APIC 256-bit spare register.
95 * @param uVector The vector to set.
96 */
97DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
98{
99 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
100 ASMAtomicBitSet(pbBitmap, (XAPIC_REG256_VECTOR_OFF(uVector) << 3) + XAPIC_REG256_VECTOR_BIT(uVector));
101}
102
103
104/**
105 * Clears the vector in an APIC 256-bit sparse register.
106 *
107 * @param pApicReg The APIC 256-bit spare register.
108 * @param uVector The vector to clear.
109 */
110DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
111{
112 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
113 ASMAtomicBitClear(pbBitmap, (XAPIC_REG256_VECTOR_OFF(uVector) << 3) + XAPIC_REG256_VECTOR_BIT(uVector));
114}
115
116
117#if 0 /* unused */
118/**
119 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
120 *
121 * @returns true if the specified vector is set, false otherwise.
122 * @param pvPib Opaque pointer to the PIB.
123 * @param uVector The vector to check if set.
124 */
125DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
126{
127 return ASMBitTest(pvPib, uVector);
128}
129#endif /* unused */
130
131
132/**
133 * Atomically sets the PIB notification bit.
134 *
135 * @returns non-zero if the bit was already set, 0 otherwise.
136 * @param pApicPib Pointer to the PIB.
137 */
138DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
139{
140 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
141}
142
143
144/**
145 * Atomically tests and clears the PIB notification bit.
146 *
147 * @returns non-zero if the bit was already set, 0 otherwise.
148 * @param pApicPib Pointer to the PIB.
149 */
150DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
151{
152 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
153}
154
155
156/**
157 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
158 *
159 * @param pvPib Opaque pointer to the PIB.
160 * @param uVector The vector to set.
161 */
162DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
163{
164 ASMAtomicBitSet(pvPib, uVector);
165}
166
167#if 0 /* unused */
168/**
169 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
170 *
171 * @param pvPib Opaque pointer to the PIB.
172 * @param uVector The vector to clear.
173 */
174DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
175{
176 ASMAtomicBitClear(pvPib, uVector);
177}
178#endif /* unused */
179
180#if 0 /* unused */
181/**
182 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
183 * register.
184 *
185 * @param pApicReg The APIC 256-bit spare register.
186 * @param idxFragment The index of the 32-bit fragment in @a
187 * pApicReg.
188 * @param u32Fragment The 32-bit vector fragment to OR.
189 */
190DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
191{
192 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
193 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
194}
195#endif /* unused */
196
197
198#if 0 /* unused */
199/**
200 * Atomically AND's a fragment (32 vectors) into an APIC
201 * 256-bit sparse register.
202 *
203 * @param pApicReg The APIC 256-bit spare register.
204 * @param idxFragment The index of the 32-bit fragment in @a
205 * pApicReg.
206 * @param u32Fragment The 32-bit vector fragment to AND.
207 */
208DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
209{
210 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
211 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
212}
213#endif /* unused */
214
215
216/**
217 * Reports and returns appropriate error code for invalid MSR accesses.
218 *
219 * @returns VERR_CPUM_RAISE_GP_0
220 *
221 * @param pVCpu The cross context virtual CPU structure.
222 * @param u32Reg The MSR being accessed.
223 * @param enmAccess The invalid-access type.
224 */
225static int apicMsrAccessError(PVMCPUCC pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
226{
227 static struct
228 {
229 const char *pszBefore; /* The error message before printing the MSR index */
230 const char *pszAfter; /* The error message after printing the MSR index */
231 } const s_aAccess[] =
232 {
233 /* enmAccess pszBefore pszAfter */
234 /* 0 */ { "read MSR", " while not in x2APIC mode" },
235 /* 1 */ { "write MSR", " while not in x2APIC mode" },
236 /* 2 */ { "read reserved/unknown MSR", "" },
237 /* 3 */ { "write reserved/unknown MSR", "" },
238 /* 4 */ { "read write-only MSR", "" },
239 /* 5 */ { "write read-only MSR", "" },
240 /* 6 */ { "read reserved bits of MSR", "" },
241 /* 7 */ { "write reserved bits of MSR", "" },
242 /* 8 */ { "write an invalid value to MSR", "" },
243 /* 9 */ { "write MSR", " disallowed by configuration" },
244 /* 10 */ { "read MSR", " disallowed by configuration" },
245 };
246 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
247
248 size_t const i = enmAccess;
249 Assert(i < RT_ELEMENTS(s_aAccess));
250 if (pVCpu->apic.s.cLogMaxAccessError++ < 5)
251 LogRel(("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg, s_aAccess[i].pszAfter));
252 return VERR_CPUM_RAISE_GP_0;
253}
254
255
256/**
257 * Gets the descriptive APIC mode.
258 *
259 * @returns The name.
260 * @param enmMode The xAPIC mode.
261 */
262const char *apicGetModeName(APICMODE enmMode)
263{
264 switch (enmMode)
265 {
266 case APICMODE_DISABLED: return "Disabled";
267 case APICMODE_XAPIC: return "xAPIC";
268 case APICMODE_X2APIC: return "x2APIC";
269 default: break;
270 }
271 return "Invalid";
272}
273
274
275/**
276 * Gets the descriptive destination format name.
277 *
278 * @returns The destination format name.
279 * @param enmDestFormat The destination format.
280 */
281const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
282{
283 switch (enmDestFormat)
284 {
285 case XAPICDESTFORMAT_FLAT: return "Flat";
286 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
287 default: break;
288 }
289 return "Invalid";
290}
291
292
293/**
294 * Gets the descriptive delivery mode name.
295 *
296 * @returns The delivery mode name.
297 * @param enmDeliveryMode The delivery mode.
298 */
299const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
300{
301 switch (enmDeliveryMode)
302 {
303 case XAPICDELIVERYMODE_FIXED: return "Fixed";
304 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
305 case XAPICDELIVERYMODE_SMI: return "SMI";
306 case XAPICDELIVERYMODE_NMI: return "NMI";
307 case XAPICDELIVERYMODE_INIT: return "INIT";
308 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
309 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
310 default: break;
311 }
312 return "Invalid";
313}
314
315
316/**
317 * Gets the descriptive destination mode name.
318 *
319 * @returns The destination mode name.
320 * @param enmDestMode The destination mode.
321 */
322const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
323{
324 switch (enmDestMode)
325 {
326 case XAPICDESTMODE_PHYSICAL: return "Physical";
327 case XAPICDESTMODE_LOGICAL: return "Logical";
328 default: break;
329 }
330 return "Invalid";
331}
332
333
334/**
335 * Gets the descriptive trigger mode name.
336 *
337 * @returns The trigger mode name.
338 * @param enmTriggerMode The trigger mode.
339 */
340const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
341{
342 switch (enmTriggerMode)
343 {
344 case XAPICTRIGGERMODE_EDGE: return "Edge";
345 case XAPICTRIGGERMODE_LEVEL: return "Level";
346 default: break;
347 }
348 return "Invalid";
349}
350
351
352/**
353 * Gets the destination shorthand name.
354 *
355 * @returns The destination shorthand name.
356 * @param enmDestShorthand The destination shorthand.
357 */
358const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
359{
360 switch (enmDestShorthand)
361 {
362 case XAPICDESTSHORTHAND_NONE: return "None";
363 case XAPICDESTSHORTHAND_SELF: return "Self";
364 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
365 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
366 default: break;
367 }
368 return "Invalid";
369}
370
371
372/**
373 * Gets the timer mode name.
374 *
375 * @returns The timer mode name.
376 * @param enmTimerMode The timer mode.
377 */
378const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
379{
380 switch (enmTimerMode)
381 {
382 case XAPICTIMERMODE_ONESHOT: return "One-shot";
383 case XAPICTIMERMODE_PERIODIC: return "Periodic";
384 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
385 default: break;
386 }
387 return "Invalid";
388}
389
390
391/**
392 * Gets the APIC mode given the base MSR value.
393 *
394 * @returns The APIC mode.
395 * @param uApicBaseMsr The APIC Base MSR value.
396 */
397APICMODE apicGetMode(uint64_t uApicBaseMsr)
398{
399 uint32_t const uMode = (uApicBaseMsr >> 10) & UINT64_C(3);
400 APICMODE const enmMode = (APICMODE)uMode;
401#ifdef VBOX_STRICT
402 /* Paranoia. */
403 switch (uMode)
404 {
405 case APICMODE_DISABLED:
406 case APICMODE_INVALID:
407 case APICMODE_XAPIC:
408 case APICMODE_X2APIC:
409 break;
410 default:
411 AssertMsgFailed(("Invalid mode"));
412 }
413#endif
414 return enmMode;
415}
416
417
418/**
419 * @interface_method_impl{PDMAPICBACKEND,pfnIsEnabled}
420 */
421static DECLCALLBACK(bool) apicIsEnabled(PCVMCPUCC pVCpu)
422{
423 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
424 return RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN);
425}
426
427
428/**
429 * Finds the most significant set bit in an APIC 256-bit sparse register.
430 *
431 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
432 * @param pReg The APIC 256-bit sparse register.
433 * @param rcNotFound What to return when no bit is set.
434 */
435static int apicGetHighestSetBitInReg(volatile const XAPIC256BITREG *pReg, int rcNotFound)
436{
437 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
438 unsigned const uFragmentShift = 5;
439 AssertCompile(1 << uFragmentShift == sizeof(pReg->u[0].u32Reg) * 8);
440 for (ssize_t i = cFragments - 1; i >= 0; i--)
441 {
442 uint32_t const uFragment = pReg->u[i].u32Reg;
443 if (uFragment)
444 {
445 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
446 --idxSetBit;
447 idxSetBit |= i << uFragmentShift;
448 return idxSetBit;
449 }
450 }
451 return rcNotFound;
452}
453
454
455/**
456 * Reads a 32-bit register at a specified offset.
457 *
458 * @returns The value at the specified offset.
459 * @param pVCpu The cross context virtual CPU structure.
460 * @param offReg The offset of the register being read.
461 */
462static DECLCALLBACK(uint32_t) apicReadRaw32(PCVMCPUCC pVCpu, uint16_t offReg)
463{
464 PCXAPICPAGE pcXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
465 Assert(offReg < sizeof(*pcXApicPage) - sizeof(uint32_t));
466 uint8_t const *pbXApic = (const uint8_t *)pcXApicPage;
467 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
468 return uValue;
469}
470
471
472/**
473 * Writes a 32-bit register at a specified offset.
474 *
475 * @param pXApicPage The xAPIC page.
476 * @param offReg The offset of the register being written.
477 * @param uReg The value of the register.
478 */
479DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
480{
481 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
482 uint8_t *pbXApic = (uint8_t *)pXApicPage;
483 *(uint32_t *)(pbXApic + offReg) = uReg;
484}
485
486
487/**
488 * Sets an error in the internal ESR of the specified APIC.
489 *
490 * @param pVCpu The cross context virtual CPU structure.
491 * @param uError The error.
492 * @thread Any.
493 */
494DECLINLINE(void) apicSetError(PVMCPUCC pVCpu, uint32_t uError)
495{
496 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
497 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
498}
499
500
501/**
502 * Clears all errors in the internal ESR.
503 *
504 * @returns The value of the internal ESR before clearing.
505 * @param pVCpu The cross context virtual CPU structure.
506 */
507DECLINLINE(uint32_t) apicClearAllErrors(PVMCPUCC pVCpu)
508{
509 VMCPU_ASSERT_EMT(pVCpu);
510 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
511 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
512}
513
514
515/**
516 * Signals the guest if a pending interrupt is ready to be serviced.
517 *
518 * @param pVCpu The cross context virtual CPU structure.
519 */
520static void apicSignalNextPendingIntr(PVMCPUCC pVCpu)
521{
522 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
523
524 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
525 if (pXApicPage->svr.u.fApicSoftwareEnable)
526 {
527 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1 /* rcNotFound */);
528 if (irrv >= 0)
529 {
530 Assert(irrv <= (int)UINT8_MAX);
531 uint8_t const uVector = irrv;
532 int const isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
533 Assert(isrv <= (int)UINT8_MAX);
534 uint8_t const uIsrVec = isrv;
535
536 /* uIsrVect reflects the highest interrupt vector currently serviced (i.e. in ISR),
537 * or zero if there's none. We want to report a pending interrupt only if IRR > ISR but
538 * regardless of TPR. Hence we can't look at the PPR value, since that also reflects TPR.
539 * NB: The APIC emulation will know when ISR changes, but not necessarily when TPR does.
540 */
541 if (XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uIsrVec))
542 {
543 Log2(("APIC%u: apicSignalNextPendingIntr: Signalling pending interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
544 apicSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
545 }
546 else
547 Log2(("APIC%u: apicSignalNextPendingIntr: Nothing to signal yet. uVector=%#x uIsrVec=%#x\n", pVCpu->idCpu, uVector, uIsrVec));
548 }
549 }
550 else
551 {
552 Log2(("APIC%u: apicSignalNextPendingIntr: APIC software-disabled, clearing pending interrupt\n", pVCpu->idCpu));
553 apicClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
554 }
555}
556
557
558/**
559 * Sets the Spurious-Interrupt Vector Register (SVR).
560 *
561 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
562 * @param pVCpu The cross context virtual CPU structure.
563 * @param uSvr The SVR value.
564 */
565static int apicSetSvr(PVMCPUCC pVCpu, uint32_t uSvr)
566{
567 VMCPU_ASSERT_EMT(pVCpu);
568
569 uint32_t uValidMask = XAPIC_SVR_VALID;
570 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
571 if (pXApicPage->version.u.fEoiBroadcastSupression)
572 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
573
574 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
575 && (uSvr & ~uValidMask))
576 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
577
578 Log2(("APIC%u: apicSetSvr: uSvr=%#RX32\n", pVCpu->idCpu, uSvr));
579 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
580 if (!pXApicPage->svr.u.fApicSoftwareEnable)
581 {
582 /** @todo CMCI. */
583 pXApicPage->lvt_timer.u.u1Mask = 1;
584#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
585 pXApicPage->lvt_thermal.u.u1Mask = 1;
586#endif
587 pXApicPage->lvt_perf.u.u1Mask = 1;
588 pXApicPage->lvt_lint0.u.u1Mask = 1;
589 pXApicPage->lvt_lint1.u.u1Mask = 1;
590 pXApicPage->lvt_error.u.u1Mask = 1;
591 }
592
593 apicSignalNextPendingIntr(pVCpu);
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * Sends an interrupt to one or more APICs.
600 *
601 * @returns Strict VBox status code.
602 * @param pVM The cross context VM structure.
603 * @param pVCpu The cross context virtual CPU structure, can be
604 * NULL if the source of the interrupt is not an
605 * APIC (for e.g. a bus).
606 * @param uVector The interrupt vector.
607 * @param enmTriggerMode The trigger mode.
608 * @param enmDeliveryMode The delivery mode.
609 * @param pDestCpuSet The destination CPU set.
610 * @param pfIntrAccepted Where to store whether this interrupt was
611 * accepted by the target APIC(s) or not.
612 * Optional, can be NULL.
613 * @param uSrcTag The interrupt source tag (debugging).
614 * @param rcRZ The return code if the operation cannot be
615 * performed in the current context.
616 */
617static VBOXSTRICTRC apicSendIntr(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
618 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, bool *pfIntrAccepted,
619 uint32_t uSrcTag, int rcRZ)
620{
621 AssertCompile(sizeof(pVM->apic.s) <= sizeof(pVM->apic.padding));
622 AssertCompile(sizeof(pVCpu->apic.s) <= sizeof(pVCpu->apic.padding));
623#ifdef IN_RING0
624 AssertCompile(sizeof(pVM->apicr0.s) <= sizeof(pVM->apicr0.padding));
625#endif
626 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
627 VMCPUID const cCpus = pVM->cCpus;
628 bool fAccepted = false;
629 switch (enmDeliveryMode)
630 {
631 case XAPICDELIVERYMODE_FIXED:
632 {
633 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
634 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
635 {
636 PVMCPUCC pItVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
637 if (apicIsEnabled(pItVCpu))
638 fAccepted = apicPostInterrupt(pItVCpu, uVector, enmTriggerMode, false /* fAutoEoi */, uSrcTag);
639 }
640 break;
641 }
642
643 case XAPICDELIVERYMODE_LOWEST_PRIO:
644 {
645 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
646 AssertMsgBreak(idCpu < pVM->cCpus, ("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode! idCpu=%u\n", idCpu));
647 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
648 if (apicIsEnabled(pVCpuDst))
649 fAccepted = apicPostInterrupt(pVCpuDst, uVector, enmTriggerMode, false /* fAutoEoi */, uSrcTag);
650 else
651 AssertMsgFailed(("APIC: apicSendIntr: Target APIC not enabled in lowest-priority delivery mode! idCpu=%u\n", idCpu));
652 break;
653 }
654
655 case XAPICDELIVERYMODE_SMI:
656 {
657 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
658 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
659 {
660 Log2(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
661 apicSetInterruptFF(pVM->CTX_SUFF(apCpus)[idCpu], PDMAPICIRQ_SMI);
662 fAccepted = true;
663 }
664 break;
665 }
666
667 case XAPICDELIVERYMODE_NMI:
668 {
669 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
670 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
671 {
672 PVMCPUCC pItVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
673 if (apicIsEnabled(pItVCpu))
674 {
675 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
676 apicSetInterruptFF(pItVCpu, PDMAPICIRQ_NMI);
677 fAccepted = true;
678 }
679 }
680 break;
681 }
682
683 case XAPICDELIVERYMODE_INIT:
684 {
685#ifdef IN_RING3
686 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
687 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
688 {
689 Log2(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
690 VMMR3SendInitIpi(pVM, idCpu);
691 fAccepted = true;
692 }
693#else
694 /* We need to return to ring-3 to deliver the INIT. */
695 rcStrict = rcRZ;
696 fAccepted = true;
697#endif
698 break;
699 }
700
701 case XAPICDELIVERYMODE_STARTUP:
702 {
703#ifdef IN_RING3
704 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
705 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
706 {
707 Log2(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
708 VMMR3SendStartupIpi(pVM, idCpu, uVector);
709 fAccepted = true;
710 }
711#else
712 /* We need to return to ring-3 to deliver the SIPI. */
713 rcStrict = rcRZ;
714 fAccepted = true;
715 Log2(("APIC: apicSendIntr: SIPI issued, returning to RZ. rc=%Rrc\n", rcRZ));
716#endif
717 break;
718 }
719
720 case XAPICDELIVERYMODE_EXTINT:
721 {
722 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
723 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
724 {
725 Log2(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
726 apicSetInterruptFF(pVM->CTX_SUFF(apCpus)[idCpu], PDMAPICIRQ_EXTINT);
727 fAccepted = true;
728 }
729 break;
730 }
731
732 default:
733 {
734 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
735 apicGetDeliveryModeName(enmDeliveryMode)));
736 break;
737 }
738 }
739
740 /*
741 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
742 * interrupt is being sent by an APIC.
743 *
744 * The 'receive illegal vector' will be set on the target APIC when the interrupt
745 * gets generated, see apicPostInterrupt().
746 *
747 * See Intel spec. 10.5.3 "Error Handling".
748 */
749 if ( rcStrict != rcRZ
750 && pVCpu)
751 {
752 /*
753 * Flag only errors when the delivery mode is fixed and not others.
754 *
755 * Ubuntu 10.04-3 amd64 live CD with 2 VCPUs gets upset as it sends an SIPI to the
756 * 2nd VCPU with vector 6 and checks the ESR for no errors, see @bugref{8245#c86}.
757 */
758 /** @todo The spec says this for LVT, but not explcitly for ICR-lo
759 * but it probably is true. */
760 if (enmDeliveryMode == XAPICDELIVERYMODE_FIXED)
761 {
762 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
763 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
764 }
765 }
766
767 if (pfIntrAccepted)
768 *pfIntrAccepted = fAccepted;
769
770 return rcStrict;
771}
772
773
774/**
775 * Checks if this APIC belongs to a logical destination.
776 *
777 * @returns true if the APIC belongs to the logical
778 * destination, false otherwise.
779 * @param pVCpu The cross context virtual CPU structure.
780 * @param fDest The destination mask.
781 *
782 * @thread Any.
783 */
784static bool apicIsLogicalDest(PVMCPUCC pVCpu, uint32_t fDest)
785{
786 if (XAPIC_IN_X2APIC_MODE(pVCpu))
787 {
788 /*
789 * Flat logical mode is not supported in x2APIC mode.
790 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
791 * - High 16 bits is the cluster ID.
792 * - Low 16 bits: each bit represents a unique APIC within the cluster.
793 */
794 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
795 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
796 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
797 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
798 return false;
799 }
800
801#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
802 /*
803 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
804 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
805 */
806 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
807 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
808 return true;
809
810 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
811 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
812 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
813 {
814 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
815 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
816 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
817 }
818
819 /*
820 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
821 * - High 4 bits is the cluster ID.
822 * - Low 4 bits: each bit represents a unique APIC within the cluster.
823 */
824 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
825 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
826 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
827 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
828 return false;
829#else
830# error "Implement Pentium and P6 family APIC architectures"
831#endif
832}
833
834
835/**
836 * Figures out the set of destination CPUs for a given destination mode, format
837 * and delivery mode setting.
838 *
839 * @param pVM The cross context VM structure.
840 * @param fDestMask The destination mask.
841 * @param fBroadcastMask The broadcast mask.
842 * @param enmDestMode The destination mode.
843 * @param enmDeliveryMode The delivery mode.
844 * @param pDestCpuSet The destination CPU set to update.
845 */
846static void apicGetDestCpuSet(PVMCC pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
847 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
848{
849 VMCPUSET_EMPTY(pDestCpuSet);
850
851 /*
852 * Physical destination mode only supports either a broadcast or a single target.
853 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
854 * as a regular broadcast like in fixed delivery mode.
855 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
856 * to the target like in fixed delivery mode.
857 *
858 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
859 */
860 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
861 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
862 {
863 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
864 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
865 }
866
867 uint32_t const cCpus = pVM->cCpus;
868 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
869 {
870 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
871#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
872 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
873 uint8_t u8LowestTpr = UINT8_C(0xff);
874 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
875 {
876 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
877 if (apicIsLogicalDest(pVCpuDst, fDestMask))
878 {
879 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDst);
880 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
881
882 /*
883 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
884 * Hence the use of "<=" in the check below.
885 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
886 */
887 if (u8Tpr <= u8LowestTpr)
888 {
889 u8LowestTpr = u8Tpr;
890 idCpuLowestTpr = idCpu;
891 }
892 }
893 }
894 if (idCpuLowestTpr != NIL_VMCPUID)
895 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
896#else
897# error "Implement Pentium and P6 family APIC architectures"
898#endif
899 return;
900 }
901
902 /*
903 * x2APIC:
904 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
905 * xAPIC:
906 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
907 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
908 *
909 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
910 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
911 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
912 */
913 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
914 {
915 VMCPUSET_FILL(pDestCpuSet);
916 return;
917 }
918
919 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
920 {
921 /* The destination mask is interpreted as the physical APIC ID of a single target. */
922#if 1
923 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
924 if (RT_LIKELY(fDestMask < cCpus))
925 VMCPUSET_ADD(pDestCpuSet, fDestMask);
926#else
927 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
928 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
929 {
930 PVMCPUCC pVCpuDst = &pVM->aCpus[idCpu];
931 if (XAPIC_IN_X2APIC_MODE(pVCpuDst))
932 {
933 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDst);
934 if (pX2ApicPage->id.u32ApicId == fDestMask)
935 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
936 }
937 else
938 {
939 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDst);
940 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
941 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
942 }
943 }
944#endif
945 }
946 else
947 {
948 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
949
950 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
951 if (RT_UNLIKELY(!fDestMask))
952 return;
953
954 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
955 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
956 {
957 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
958 if (apicIsLogicalDest(pVCpuDst, fDestMask))
959 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
960 }
961 }
962}
963
964
965/**
966 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
967 * Command Register (ICR).
968 *
969 * @returns VBox status code.
970 * @param pVCpu The cross context virtual CPU structure.
971 * @param rcRZ The return code if the operation cannot be
972 * performed in the current context.
973 */
974DECLINLINE(VBOXSTRICTRC) apicSendIpi(PVMCPUCC pVCpu, int rcRZ)
975{
976 VMCPU_ASSERT_EMT(pVCpu);
977
978 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
979 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
980 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
981 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
982 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
983 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
984 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
985
986 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
987 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
988 Log5(("apicSendIpi: delivery=%u mode=%u init=%u trigger=%u short=%u vector=%#x fDest=%#x\n",
989 enmDeliveryMode, enmDestMode, enmInitLevel, enmTriggerMode, enmDestShorthand, uVector, fDest));
990
991#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
992 /*
993 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
994 * Apparently, this also applies to NMI, SMI, lowest-priority and fixed delivery modes,
995 * see @bugref{8245#c116}.
996 *
997 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
998 */
999 if ( enmTriggerMode == XAPICTRIGGERMODE_LEVEL
1000 && enmInitLevel == XAPICINITLEVEL_DEASSERT
1001 && ( enmDeliveryMode == XAPICDELIVERYMODE_FIXED
1002 || enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO
1003 || enmDeliveryMode == XAPICDELIVERYMODE_SMI
1004 || enmDeliveryMode == XAPICDELIVERYMODE_NMI
1005 || enmDeliveryMode == XAPICDELIVERYMODE_INIT))
1006 {
1007 Log2(("APIC%u: %s level de-assert unsupported, ignoring!\n", pVCpu->idCpu, apicGetDeliveryModeName(enmDeliveryMode)));
1008 return VINF_SUCCESS;
1009 }
1010#else
1011# error "Implement Pentium and P6 family APIC architectures"
1012#endif
1013
1014 /*
1015 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
1016 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
1017 */
1018 VMCPUSET DestCpuSet;
1019 switch (enmDestShorthand)
1020 {
1021 case XAPICDESTSHORTHAND_NONE:
1022 {
1023 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1024 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
1025 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1026 break;
1027 }
1028
1029 case XAPICDESTSHORTHAND_SELF:
1030 {
1031 VMCPUSET_EMPTY(&DestCpuSet);
1032 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1033 break;
1034 }
1035
1036 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
1037 {
1038 VMCPUSET_FILL(&DestCpuSet);
1039 break;
1040 }
1041
1042 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
1043 {
1044 VMCPUSET_FILL(&DestCpuSet);
1045 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
1046 break;
1047 }
1048 }
1049
1050 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
1051 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
1052}
1053
1054
1055/**
1056 * Sets the Interrupt Command Register (ICR) high dword.
1057 *
1058 * @returns Strict VBox status code.
1059 * @param pVCpu The cross context virtual CPU structure.
1060 * @param uIcrHi The ICR high dword.
1061 */
1062static VBOXSTRICTRC apicSetIcrHi(PVMCPUCC pVCpu, uint32_t uIcrHi)
1063{
1064 VMCPU_ASSERT_EMT(pVCpu);
1065 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1066
1067 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1068 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1069 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrHiWrite);
1070 Log2(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1071
1072 return VINF_SUCCESS;
1073}
1074
1075
1076/**
1077 * Sets the Interrupt Command Register (ICR) low dword.
1078 *
1079 * @returns Strict VBox status code.
1080 * @param pVCpu The cross context virtual CPU structure.
1081 * @param uIcrLo The ICR low dword.
1082 * @param rcRZ The return code if the operation cannot be performed
1083 * in the current context.
1084 * @param fUpdateStat Whether to update the ICR low write statistics
1085 * counter.
1086 */
1087static VBOXSTRICTRC apicSetIcrLo(PVMCPUCC pVCpu, uint32_t uIcrLo, int rcRZ, bool fUpdateStat)
1088{
1089 VMCPU_ASSERT_EMT(pVCpu);
1090
1091 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1092 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR_VALID;
1093 Log2(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1094
1095 if (fUpdateStat)
1096 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrLoWrite);
1097 RT_NOREF(fUpdateStat);
1098
1099 return apicSendIpi(pVCpu, rcRZ);
1100}
1101
1102
1103/**
1104 * Sets the Interrupt Command Register (ICR).
1105 *
1106 * @returns Strict VBox status code.
1107 * @param pVCpu The cross context virtual CPU structure.
1108 * @param u64Icr The ICR (High and Low combined).
1109 * @param rcRZ The return code if the operation cannot be performed
1110 * in the current context.
1111 *
1112 * @remarks This function is used by both x2APIC interface and the Hyper-V
1113 * interface, see PDMApicHvSetIcr. The Hyper-V spec isn't clear what
1114 * happens when invalid bits are set. For the time being, it will \#GP like
1115 * a regular x2APIC access.
1116 */
1117static DECLCALLBACK(VBOXSTRICTRC) apicSetIcr(PVMCPUCC pVCpu, uint64_t u64Icr, int rcRZ)
1118{
1119 VMCPU_ASSERT_EMT(pVCpu);
1120
1121 /* Validate. */
1122 uint32_t const uLo = RT_LO_U32(u64Icr);
1123 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR_VALID)))
1124 {
1125 /* Update high dword first, then update the low dword which sends the IPI. */
1126 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1127 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1128 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrFullWrite);
1129 return apicSetIcrLo(pVCpu, uLo, rcRZ, false /* fUpdateStat */);
1130 }
1131 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1132}
1133
1134
1135/**
1136 * Sets the Error Status Register (ESR).
1137 *
1138 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1139 * @param pVCpu The cross context virtual CPU structure.
1140 * @param uEsr The ESR value.
1141 */
1142static int apicSetEsr(PVMCPUCC pVCpu, uint32_t uEsr)
1143{
1144 VMCPU_ASSERT_EMT(pVCpu);
1145
1146 Log2(("APIC%u: apicSetEsr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1147
1148 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1149 && (uEsr & ~XAPIC_ESR_WO_VALID))
1150 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1151
1152 /*
1153 * Writes to the ESR causes the internal state to be updated in the register,
1154 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1155 */
1156 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1157 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1158 return VINF_SUCCESS;
1159}
1160
1161
1162/**
1163 * Updates the Processor Priority Register (PPR).
1164 *
1165 * @param pVCpu The cross context virtual CPU structure.
1166 */
1167static void apicUpdatePpr(PVMCPUCC pVCpu)
1168{
1169 VMCPU_ASSERT_EMT(pVCpu);
1170
1171 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1172 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1173 uint8_t const uIsrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
1174 uint8_t uPpr;
1175 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1176 uPpr = pXApicPage->tpr.u8Tpr;
1177 else
1178 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1179 pXApicPage->ppr.u8Ppr = uPpr;
1180}
1181
1182
1183/**
1184 * Gets the Processor Priority Register (PPR).
1185 *
1186 * @returns The PPR value.
1187 * @param pVCpu The cross context virtual CPU structure.
1188 */
1189static uint8_t apicGetPpr(PVMCPUCC pVCpu)
1190{
1191 VMCPU_ASSERT_EMT(pVCpu);
1192 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprRead);
1193
1194 /*
1195 * With virtualized APIC registers or with TPR virtualization, the hardware may
1196 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1197 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1198 *
1199 * In all other instances, whenever the TPR or ISR changes, we need to update the PPR
1200 * as well (e.g. like we do manually in apicInitIpi and by calling apicUpdatePpr).
1201 */
1202 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1203 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1204 apicUpdatePpr(pVCpu);
1205 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1206 return pXApicPage->ppr.u8Ppr;
1207}
1208
1209
1210/**
1211 * Sets the Task Priority Register (TPR).
1212 *
1213 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1214 * @param pVCpu The cross context virtual CPU structure.
1215 * @param uTpr The TPR value.
1216 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1217 * this write.
1218 */
1219static int apicSetTprEx(PVMCPUCC pVCpu, uint32_t uTpr, bool fForceX2ApicBehaviour)
1220{
1221 VMCPU_ASSERT_EMT(pVCpu);
1222
1223 Log2(("APIC%u: apicSetTprEx: uTpr=%#RX32\n", pVCpu->idCpu, uTpr));
1224 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprWrite);
1225
1226 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1227 if ( fX2ApicMode
1228 && (uTpr & ~XAPIC_TPR_VALID))
1229 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1230
1231 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1232 pXApicPage->tpr.u8Tpr = uTpr;
1233 apicUpdatePpr(pVCpu);
1234 apicSignalNextPendingIntr(pVCpu);
1235 return VINF_SUCCESS;
1236}
1237
1238
1239/**
1240 * Sets the End-Of-Interrupt (EOI) register.
1241 *
1242 * @returns Strict VBox status code.
1243 * @param pVCpu The cross context virtual CPU structure.
1244 * @param uEoi The EOI value.
1245 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1246 * this write.
1247 */
1248static DECLCALLBACK(VBOXSTRICTRC) apicSetEoi(PVMCPUCC pVCpu, uint32_t uEoi, bool fForceX2ApicBehaviour)
1249{
1250 VMCPU_ASSERT_EMT(pVCpu);
1251
1252 Log2(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1253 STAM_COUNTER_INC(&pVCpu->apic.s.StatEoiWrite);
1254
1255 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1256 if ( fX2ApicMode
1257 && (uEoi & ~XAPIC_EOI_WO_VALID))
1258 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1259
1260 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1261 int isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, -1 /* rcNotFound */);
1262 if (isrv >= 0)
1263 {
1264 /*
1265 * Broadcast the EOI to the I/O APIC(s).
1266 *
1267 * We'll handle the EOI broadcast first as there is tiny chance we get rescheduled to
1268 * ring-3 due to contention on the I/O APIC lock. This way we don't mess with the rest
1269 * of the APIC state and simply restart the EOI write operation from ring-3.
1270 */
1271 Assert(isrv <= (int)UINT8_MAX);
1272 uint8_t const uVector = isrv;
1273 bool const fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1274 if (fLevelTriggered)
1275 {
1276 PDMIoApicBroadcastEoi(pVCpu->CTX_SUFF(pVM), uVector);
1277
1278 /*
1279 * Clear the vector from the TMR.
1280 *
1281 * The broadcast to I/O APIC can re-trigger new interrupts to arrive via the bus. However,
1282 * apicUpdatePendingInterrupts() which updates TMR can only be done from EMT which we
1283 * currently are on, so no possibility of concurrent updates.
1284 */
1285 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1286
1287 /*
1288 * Clear the remote IRR bit for level-triggered, fixed mode LINT0 interrupt.
1289 * The LINT1 pin does not support level-triggered interrupts.
1290 * See Intel spec. 10.5.1 "Local Vector Table".
1291 */
1292 uint32_t const uLvtLint0 = pXApicPage->lvt_lint0.all.u32LvtLint0;
1293 if ( XAPIC_LVT_GET_REMOTE_IRR(uLvtLint0)
1294 && XAPIC_LVT_GET_VECTOR(uLvtLint0) == uVector
1295 && XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint0) == XAPICDELIVERYMODE_FIXED)
1296 {
1297 ASMAtomicAndU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, ~XAPIC_LVT_REMOTE_IRR);
1298 Log2(("APIC%u: apicSetEoi: Cleared remote-IRR for LINT0. uVector=%#x\n", pVCpu->idCpu, uVector));
1299 }
1300
1301 Log2(("APIC%u: apicSetEoi: Cleared level triggered interrupt from TMR. uVector=%#x\n", pVCpu->idCpu, uVector));
1302 }
1303
1304 /*
1305 * Mark interrupt as serviced, update the PPR and signal pending interrupts.
1306 */
1307 Log2(("APIC%u: apicSetEoi: Clearing interrupt from ISR. uVector=%#x\n", pVCpu->idCpu, uVector));
1308 apicClearVectorInReg(&pXApicPage->isr, uVector);
1309 apicUpdatePpr(pVCpu);
1310 apicSignalNextPendingIntr(pVCpu);
1311 }
1312 else
1313 {
1314#ifdef DEBUG_ramshankar
1315 /** @todo Figure out if this is done intentionally by guests or is a bug
1316 * in our emulation. Happened with Win10 SMP VM during reboot after
1317 * installation of guest additions with 3D support. */
1318 AssertMsgFailed(("APIC%u: apicSetEoi: Failed to find any ISR bit\n", pVCpu->idCpu));
1319#endif
1320 }
1321
1322 return VINF_SUCCESS;
1323}
1324
1325
1326/**
1327 * Sets the Logical Destination Register (LDR).
1328 *
1329 * @returns Strict VBox status code.
1330 * @param pVCpu The cross context virtual CPU structure.
1331 * @param uLdr The LDR value.
1332 *
1333 * @remarks LDR is read-only in x2APIC mode.
1334 */
1335static VBOXSTRICTRC apicSetLdr(PVMCPUCC pVCpu, uint32_t uLdr)
1336{
1337 VMCPU_ASSERT_EMT(pVCpu);
1338 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1339 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu) || pApic->fHyperVCompatMode); RT_NOREF_PV(pApic);
1340
1341 Log2(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1342
1343 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1344 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR_VALID);
1345 STAM_COUNTER_INC(&pVCpu->apic.s.StatLdrWrite);
1346 return VINF_SUCCESS;
1347}
1348
1349
1350/**
1351 * Sets the Destination Format Register (DFR).
1352 *
1353 * @returns Strict VBox status code.
1354 * @param pVCpu The cross context virtual CPU structure.
1355 * @param uDfr The DFR value.
1356 *
1357 * @remarks DFR is not available in x2APIC mode.
1358 */
1359static VBOXSTRICTRC apicSetDfr(PVMCPUCC pVCpu, uint32_t uDfr)
1360{
1361 VMCPU_ASSERT_EMT(pVCpu);
1362 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1363
1364 uDfr &= XAPIC_DFR_VALID;
1365 uDfr |= XAPIC_DFR_RSVD_MB1;
1366
1367 Log2(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1368
1369 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1370 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1371 STAM_COUNTER_INC(&pVCpu->apic.s.StatDfrWrite);
1372 return VINF_SUCCESS;
1373}
1374
1375
1376/**
1377 * Sets the Timer Divide Configuration Register (DCR).
1378 *
1379 * @returns Strict VBox status code.
1380 * @param pVCpu The cross context virtual CPU structure.
1381 * @param uTimerDcr The timer DCR value.
1382 */
1383static VBOXSTRICTRC apicSetTimerDcr(PVMCPUCC pVCpu, uint32_t uTimerDcr)
1384{
1385 VMCPU_ASSERT_EMT(pVCpu);
1386 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1387 && (uTimerDcr & ~XAPIC_TIMER_DCR_VALID))
1388 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1389
1390 Log2(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1391
1392 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1393 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1394 STAM_COUNTER_INC(&pVCpu->apic.s.StatDcrWrite);
1395 return VINF_SUCCESS;
1396}
1397
1398
1399/**
1400 * Gets the timer's Current Count Register (CCR).
1401 *
1402 * @returns VBox status code.
1403 * @param pDevIns The device instance.
1404 * @param pVCpu The cross context virtual CPU structure.
1405 * @param rcBusy The busy return code for the timer critical section.
1406 * @param puValue Where to store the LVT timer CCR.
1407 */
1408static VBOXSTRICTRC apicGetTimerCcr(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, int rcBusy, uint32_t *puValue)
1409{
1410 VMCPU_ASSERT_EMT(pVCpu);
1411 Assert(puValue);
1412
1413 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1414 *puValue = 0;
1415
1416 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1417 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1418 return VINF_SUCCESS;
1419
1420 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1421 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1422 if (!uInitialCount)
1423 return VINF_SUCCESS;
1424
1425 /*
1426 * Reading the virtual-sync clock requires locking its timer because it's not
1427 * a simple atomic operation, see tmVirtualSyncGetEx().
1428 *
1429 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1430 */
1431 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1432 TMTIMERHANDLE hTimer = pApicCpu->hTimer;
1433
1434 VBOXSTRICTRC rc = PDMDevHlpTimerLockClock(pDevIns, hTimer, rcBusy);
1435 if (rc == VINF_SUCCESS)
1436 {
1437 /* If the current-count register is 0, it implies the timer expired. */
1438 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1439 if (uCurrentCount)
1440 {
1441 uint64_t const cTicksElapsed = PDMDevHlpTimerGet(pDevIns, hTimer) - pApicCpu->u64TimerInitial;
1442 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1443 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1444 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1445 if (uInitialCount > uDelta)
1446 *puValue = uInitialCount - uDelta;
1447 }
1448 else
1449 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1450 }
1451 return rc;
1452}
1453
1454
1455/**
1456 * Sets the timer's Initial-Count Register (ICR).
1457 *
1458 * @returns Strict VBox status code.
1459 * @param pDevIns The device instance.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 * @param rcBusy The busy return code for the timer critical section.
1462 * @param uInitialCount The timer ICR.
1463 */
1464static VBOXSTRICTRC apicSetTimerIcr(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, int rcBusy, uint32_t uInitialCount)
1465{
1466 VMCPU_ASSERT_EMT(pVCpu);
1467
1468 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1469 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1470 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1471
1472 Log2(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1473 STAM_COUNTER_INC(&pApicCpu->StatTimerIcrWrite);
1474
1475 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1476 if ( pApic->fSupportsTscDeadline
1477 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1478 return VINF_SUCCESS;
1479
1480 /*
1481 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1482 * so obtain the lock -before- updating it here to be consistent with the
1483 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1484 */
1485 TMTIMERHANDLE hTimer = pApicCpu->hTimer;
1486 VBOXSTRICTRC rc = PDMDevHlpTimerLockClock(pDevIns, hTimer, rcBusy);
1487 if (rc == VINF_SUCCESS)
1488 {
1489 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1490 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1491 if (uInitialCount)
1492 apicStartTimer(pVCpu, uInitialCount);
1493 else
1494 apicStopTimer(pVCpu);
1495 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1496 }
1497 return rc;
1498}
1499
1500
1501/**
1502 * Sets an LVT entry.
1503 *
1504 * @returns Strict VBox status code.
1505 * @param pVCpu The cross context virtual CPU structure.
1506 * @param offLvt The LVT entry offset in the xAPIC page.
1507 * @param uLvt The LVT value to set.
1508 */
1509static VBOXSTRICTRC apicSetLvtEntry(PVMCPUCC pVCpu, uint16_t offLvt, uint32_t uLvt)
1510{
1511 VMCPU_ASSERT_EMT(pVCpu);
1512
1513#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1514 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1515 || offLvt == XAPIC_OFF_LVT_THERMAL
1516 || offLvt == XAPIC_OFF_LVT_PERF
1517 || offLvt == XAPIC_OFF_LVT_LINT0
1518 || offLvt == XAPIC_OFF_LVT_LINT1
1519 || offLvt == XAPIC_OFF_LVT_ERROR,
1520 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1521
1522 /*
1523 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1524 * and raise #GP(0) in x2APIC mode.
1525 */
1526 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1527 if (offLvt == XAPIC_OFF_LVT_TIMER)
1528 {
1529 STAM_COUNTER_INC(&pVCpu->apic.s.StatLvtTimerWrite);
1530 if ( !pApic->fSupportsTscDeadline
1531 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1532 {
1533 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1534 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1535 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1536 /** @todo TSC-deadline timer mode transition */
1537 }
1538 }
1539
1540 /*
1541 * Validate rest of the LVT bits.
1542 */
1543 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1544 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1545
1546 /*
1547 * For x2APIC, disallow setting of invalid/reserved bits.
1548 * For xAPIC, mask out invalid/reserved bits (i.e. ignore them).
1549 */
1550 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1551 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1552 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1553
1554 uLvt &= g_au32LvtValidMasks[idxLvt];
1555
1556 /*
1557 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1558 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1559 */
1560 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1561 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1562 uLvt |= XAPIC_LVT_MASK;
1563
1564 /*
1565 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1566 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signalling the
1567 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1568 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1569 * the interrupt for the vector happens to be generated, see apicPostInterrupt().
1570 *
1571 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1572 */
1573 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1574 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1575 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1576
1577 Log2(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1578
1579 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1580 return VINF_SUCCESS;
1581#else
1582# error "Implement Pentium and P6 family APIC architectures"
1583#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1584}
1585
1586
1587#if 0
1588/**
1589 * Sets an LVT entry in the extended LVT range.
1590 *
1591 * @returns VBox status code.
1592 * @param pVCpu The cross context virtual CPU structure.
1593 * @param offLvt The LVT entry offset in the xAPIC page.
1594 * @param uValue The LVT value to set.
1595 */
1596static int apicSetLvtExtEntry(PVMCPUCC pVCpu, uint16_t offLvt, uint32_t uLvt)
1597{
1598 VMCPU_ASSERT_EMT(pVCpu);
1599 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1600
1601 /** @todo support CMCI. */
1602 return VERR_NOT_IMPLEMENTED;
1603}
1604#endif
1605
1606
1607/**
1608 * Hints TM about the APIC timer frequency.
1609 *
1610 * @param pDevIns The device instance.
1611 * @param pApicCpu The APIC CPU state.
1612 * @param uInitialCount The new initial count.
1613 * @param uTimerShift The new timer shift.
1614 * @thread Any.
1615 */
1616void apicHintTimerFreq(PPDMDEVINS pDevIns, PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1617{
1618 Assert(pApicCpu);
1619
1620 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1621 || pApicCpu->uHintedTimerShift != uTimerShift)
1622 {
1623 uint32_t uHz;
1624 if (uInitialCount)
1625 {
1626 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1627 uHz = PDMDevHlpTimerGetFreq(pDevIns, pApicCpu->hTimer) / cTicksPerPeriod;
1628 }
1629 else
1630 uHz = 0;
1631
1632 PDMDevHlpTimerSetFrequencyHint(pDevIns, pApicCpu->hTimer, uHz);
1633 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1634 pApicCpu->uHintedTimerShift = uTimerShift;
1635 }
1636}
1637
1638
1639/**
1640 * Gets the Interrupt Command Register (ICR), without performing any interface
1641 * checks.
1642 *
1643 * @returns The ICR value.
1644 * @param pVCpu The cross context virtual CPU structure.
1645 */
1646static DECLCALLBACK(uint64_t) apicGetIcrNoCheck(PVMCPUCC pVCpu)
1647{
1648 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1649 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1650 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1651 uint64_t const uIcr = RT_MAKE_U64(uLo, uHi);
1652 return uIcr;
1653}
1654
1655
1656/**
1657 * Reads an APIC register.
1658 *
1659 * @returns VBox status code.
1660 * @param pDevIns The device instance.
1661 * @param pVCpu The cross context virtual CPU structure.
1662 * @param offReg The offset of the register being read.
1663 * @param puValue Where to store the register value.
1664 */
1665DECLINLINE(VBOXSTRICTRC) apicReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
1666{
1667 VMCPU_ASSERT_EMT(pVCpu);
1668 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1669
1670 uint32_t uValue = 0;
1671 VBOXSTRICTRC rc = VINF_SUCCESS;
1672 switch (offReg)
1673 {
1674 case XAPIC_OFF_ID:
1675 case XAPIC_OFF_VERSION:
1676 case XAPIC_OFF_TPR:
1677 case XAPIC_OFF_EOI:
1678 case XAPIC_OFF_RRD:
1679 case XAPIC_OFF_LDR:
1680 case XAPIC_OFF_DFR:
1681 case XAPIC_OFF_SVR:
1682 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1683 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1684 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1685 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1686 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1687 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1688 case XAPIC_OFF_ESR:
1689 case XAPIC_OFF_ICR_LO:
1690 case XAPIC_OFF_ICR_HI:
1691 case XAPIC_OFF_LVT_TIMER:
1692#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1693 case XAPIC_OFF_LVT_THERMAL:
1694#endif
1695 case XAPIC_OFF_LVT_PERF:
1696 case XAPIC_OFF_LVT_LINT0:
1697 case XAPIC_OFF_LVT_LINT1:
1698 case XAPIC_OFF_LVT_ERROR:
1699 case XAPIC_OFF_TIMER_ICR:
1700 case XAPIC_OFF_TIMER_DCR:
1701 {
1702 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1703 || ( offReg != XAPIC_OFF_DFR
1704 && offReg != XAPIC_OFF_ICR_HI
1705 && offReg != XAPIC_OFF_EOI));
1706 uValue = apicReadRaw32(pVCpu, offReg);
1707 Log2(("APIC%u: apicReadRegister: offReg=%#x uValue=%#x\n", pVCpu->idCpu, offReg, uValue));
1708 break;
1709 }
1710
1711 case XAPIC_OFF_PPR:
1712 {
1713 uValue = apicGetPpr(pVCpu);
1714 break;
1715 }
1716
1717 case XAPIC_OFF_TIMER_CCR:
1718 {
1719 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1720 rc = apicGetTimerCcr(pDevIns, pVCpu, VINF_IOM_R3_MMIO_READ, &uValue);
1721 break;
1722 }
1723
1724 case XAPIC_OFF_APR:
1725 {
1726#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1727 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1728 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1729#else
1730# error "Implement Pentium and P6 family APIC architectures"
1731#endif
1732 break;
1733 }
1734
1735 default:
1736 {
1737 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1738 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu, offReg);
1739 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1740 break;
1741 }
1742 }
1743
1744 *puValue = uValue;
1745 return rc;
1746}
1747
1748
1749/**
1750 * Writes an APIC register.
1751 *
1752 * @returns Strict VBox status code.
1753 * @param pDevIns The device instance.
1754 * @param pVCpu The cross context virtual CPU structure.
1755 * @param offReg The offset of the register being written.
1756 * @param uValue The register value.
1757 */
1758DECLINLINE(VBOXSTRICTRC) apicWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
1759{
1760 VMCPU_ASSERT_EMT(pVCpu);
1761 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1762 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1763
1764 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1765 switch (offReg)
1766 {
1767 case XAPIC_OFF_TPR:
1768 {
1769 rcStrict = apicSetTprEx(pVCpu, uValue, false /* fForceX2ApicBehaviour */);
1770 break;
1771 }
1772
1773 case XAPIC_OFF_LVT_TIMER:
1774#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1775 case XAPIC_OFF_LVT_THERMAL:
1776#endif
1777 case XAPIC_OFF_LVT_PERF:
1778 case XAPIC_OFF_LVT_LINT0:
1779 case XAPIC_OFF_LVT_LINT1:
1780 case XAPIC_OFF_LVT_ERROR:
1781 {
1782 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1783 break;
1784 }
1785
1786 case XAPIC_OFF_TIMER_ICR:
1787 {
1788 rcStrict = apicSetTimerIcr(pDevIns, pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1789 break;
1790 }
1791
1792 case XAPIC_OFF_EOI:
1793 {
1794 rcStrict = apicSetEoi(pVCpu, uValue, false /* fForceX2ApicBehaviour */);
1795 break;
1796 }
1797
1798 case XAPIC_OFF_LDR:
1799 {
1800 rcStrict = apicSetLdr(pVCpu, uValue);
1801 break;
1802 }
1803
1804 case XAPIC_OFF_DFR:
1805 {
1806 rcStrict = apicSetDfr(pVCpu, uValue);
1807 break;
1808 }
1809
1810 case XAPIC_OFF_SVR:
1811 {
1812 rcStrict = apicSetSvr(pVCpu, uValue);
1813 break;
1814 }
1815
1816 case XAPIC_OFF_ICR_LO:
1817 {
1818 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, true /* fUpdateStat */);
1819 break;
1820 }
1821
1822 case XAPIC_OFF_ICR_HI:
1823 {
1824 rcStrict = apicSetIcrHi(pVCpu, uValue);
1825 break;
1826 }
1827
1828 case XAPIC_OFF_TIMER_DCR:
1829 {
1830 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1831 break;
1832 }
1833
1834 case XAPIC_OFF_ESR:
1835 {
1836 rcStrict = apicSetEsr(pVCpu, uValue);
1837 break;
1838 }
1839
1840 case XAPIC_OFF_APR:
1841 case XAPIC_OFF_RRD:
1842 {
1843#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1844 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1845#else
1846# error "Implement Pentium and P6 family APIC architectures"
1847#endif
1848 break;
1849 }
1850
1851 /* Read-only, write ignored: */
1852 case XAPIC_OFF_VERSION:
1853 case XAPIC_OFF_ID:
1854 break;
1855
1856 /* Unavailable/reserved in xAPIC mode: */
1857 case X2APIC_OFF_SELF_IPI:
1858 /* Read-only registers: */
1859 case XAPIC_OFF_PPR:
1860 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1861 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1862 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1863 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1864 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1865 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1866 case XAPIC_OFF_TIMER_CCR:
1867 default:
1868 {
1869 rcStrict = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu, offReg);
1870 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1871 break;
1872 }
1873 }
1874
1875 return rcStrict;
1876}
1877
1878
1879/**
1880 * @interface_method_impl{PDMAPICBACKEND,pfnReadMsr}
1881 */
1882static DECLCALLBACK(VBOXSTRICTRC) apicReadMsr(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1883{
1884 /*
1885 * Validate.
1886 */
1887 VMCPU_ASSERT_EMT(pVCpu);
1888 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1889 Assert(pu64Value);
1890
1891 /*
1892 * Is the APIC enabled?
1893 */
1894 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1895 if (apicIsEnabled(pVCpu))
1896 { /* likely */ }
1897 else
1898 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
1899 APICMSRACCESS_READ_DISALLOWED_CONFIG : APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1900
1901#ifndef IN_RING3
1902 if (pApic->CTXALLMID(f,Enabled))
1903 { /* likely */}
1904 else
1905 return VINF_CPUM_R3_MSR_READ;
1906#endif
1907
1908 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrRead));
1909
1910 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1911 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
1912 || pApic->fHyperVCompatMode))
1913 {
1914 switch (u32Reg)
1915 {
1916 /* Special handling for x2APIC: */
1917 case MSR_IA32_X2APIC_ICR:
1918 {
1919 *pu64Value = apicGetIcrNoCheck(pVCpu);
1920 break;
1921 }
1922
1923 /* Special handling, compatible with xAPIC: */
1924 case MSR_IA32_X2APIC_TIMER_CCR:
1925 {
1926 uint32_t uValue;
1927 rcStrict = apicGetTimerCcr(VMCPU_TO_DEVINS(pVCpu), pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1928 *pu64Value = uValue;
1929 break;
1930 }
1931
1932 /* Special handling, compatible with xAPIC: */
1933 case MSR_IA32_X2APIC_PPR:
1934 {
1935 *pu64Value = apicGetPpr(pVCpu);
1936 break;
1937 }
1938
1939 /* Raw read, compatible with xAPIC: */
1940 case MSR_IA32_X2APIC_ID:
1941 {
1942 STAM_COUNTER_INC(&pVCpu->apic.s.StatIdMsrRead);
1943 /* Horrible macOS hack (sample rdmsr addres: 0008:ffffff801686f21a). */
1944 if ( !pApic->fMacOSWorkaround
1945 || pVCpu->cpum.GstCtx.cs.Sel != 8
1946 || pVCpu->cpum.GstCtx.rip < UINT64_C(0xffffff8000000000))
1947 { /* likely */ }
1948 else
1949 {
1950 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1951 uint32_t const idApic = pX2ApicPage->id.u32ApicId;
1952 *pu64Value = (idApic << 24) | idApic;
1953 Log(("APIC: Applying macOS hack to MSR_IA32_X2APIC_ID: %#RX64\n", *pu64Value));
1954 break;
1955 }
1956 RT_FALL_THRU();
1957 }
1958 case MSR_IA32_X2APIC_VERSION:
1959 case MSR_IA32_X2APIC_TPR:
1960 case MSR_IA32_X2APIC_LDR:
1961 case MSR_IA32_X2APIC_SVR:
1962 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1963 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1964 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1965 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1966 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1967 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1968 case MSR_IA32_X2APIC_ESR:
1969 case MSR_IA32_X2APIC_LVT_TIMER:
1970 case MSR_IA32_X2APIC_LVT_THERMAL:
1971 case MSR_IA32_X2APIC_LVT_PERF:
1972 case MSR_IA32_X2APIC_LVT_LINT0:
1973 case MSR_IA32_X2APIC_LVT_LINT1:
1974 case MSR_IA32_X2APIC_LVT_ERROR:
1975 case MSR_IA32_X2APIC_TIMER_ICR:
1976 case MSR_IA32_X2APIC_TIMER_DCR:
1977 {
1978 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1979 *pu64Value = apicReadRaw32(pVCpu, offReg);
1980 break;
1981 }
1982
1983 /* Write-only MSRs: */
1984 case MSR_IA32_X2APIC_SELF_IPI:
1985 case MSR_IA32_X2APIC_EOI:
1986 {
1987 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1988 break;
1989 }
1990
1991 /*
1992 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to read the "high"
1993 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
1994 * index (0x80E), see @bugref{8382#c175}.
1995 */
1996 case MSR_IA32_X2APIC_LDR + 1:
1997 {
1998 if (pApic->fHyperVCompatMode)
1999 *pu64Value = 0;
2000 else
2001 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
2002 break;
2003 }
2004
2005 /* Reserved MSRs: */
2006 case MSR_IA32_X2APIC_LVT_CMCI:
2007 default:
2008 {
2009 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
2010 break;
2011 }
2012 }
2013 }
2014 else
2015 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
2016
2017 return rcStrict;
2018}
2019
2020
2021/**
2022 * @interface_method_impl{PDMAPICBACKEND,pfnWriteMsr}
2023 */
2024static DECLCALLBACK(VBOXSTRICTRC) apicWriteMsr(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
2025{
2026 /*
2027 * Validate.
2028 */
2029 VMCPU_ASSERT_EMT(pVCpu);
2030 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
2031
2032 /*
2033 * Is the APIC enabled?
2034 */
2035 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2036 if (apicIsEnabled(pVCpu))
2037 { /* likely */ }
2038 else
2039 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
2040 APICMSRACCESS_WRITE_DISALLOWED_CONFIG : APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2041
2042#ifndef IN_RING3
2043 if (pApic->CTXALLMID(f,Enabled))
2044 { /* likely */ }
2045 else
2046 return VINF_CPUM_R3_MSR_WRITE;
2047#endif
2048
2049 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrWrite));
2050
2051 /*
2052 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
2053 * accesses where they are ignored. Hence, we need to validate each register before
2054 * invoking the generic/xAPIC write functions.
2055 *
2056 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
2057 * case first and handle validating the remaining bits on a per-register basis.
2058 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
2059 */
2060 if ( u32Reg != MSR_IA32_X2APIC_ICR
2061 && RT_HI_U32(u64Value))
2062 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
2063
2064 uint32_t u32Value = RT_LO_U32(u64Value);
2065 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2066 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
2067 || pApic->fHyperVCompatMode))
2068 {
2069 switch (u32Reg)
2070 {
2071 case MSR_IA32_X2APIC_TPR:
2072 {
2073 rcStrict = apicSetTprEx(pVCpu, u32Value, false /* fForceX2ApicBehaviour */);
2074 break;
2075 }
2076
2077 case MSR_IA32_X2APIC_ICR:
2078 {
2079 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
2080 break;
2081 }
2082
2083 case MSR_IA32_X2APIC_SVR:
2084 {
2085 rcStrict = apicSetSvr(pVCpu, u32Value);
2086 break;
2087 }
2088
2089 case MSR_IA32_X2APIC_ESR:
2090 {
2091 rcStrict = apicSetEsr(pVCpu, u32Value);
2092 break;
2093 }
2094
2095 case MSR_IA32_X2APIC_TIMER_DCR:
2096 {
2097 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
2098 break;
2099 }
2100
2101 case MSR_IA32_X2APIC_LVT_TIMER:
2102 case MSR_IA32_X2APIC_LVT_THERMAL:
2103 case MSR_IA32_X2APIC_LVT_PERF:
2104 case MSR_IA32_X2APIC_LVT_LINT0:
2105 case MSR_IA32_X2APIC_LVT_LINT1:
2106 case MSR_IA32_X2APIC_LVT_ERROR:
2107 {
2108 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
2109 break;
2110 }
2111
2112 case MSR_IA32_X2APIC_TIMER_ICR:
2113 {
2114 rcStrict = apicSetTimerIcr(VMCPU_TO_DEVINS(pVCpu), pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
2115 break;
2116 }
2117
2118 /* Write-only MSRs: */
2119 case MSR_IA32_X2APIC_SELF_IPI:
2120 {
2121 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
2122 apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE, false /* fAutoEoi */, 0 /* uSrcTag */);
2123 rcStrict = VINF_SUCCESS;
2124 break;
2125 }
2126
2127 case MSR_IA32_X2APIC_EOI:
2128 {
2129 rcStrict = apicSetEoi(pVCpu, u32Value, false /* fForceX2ApicBehaviour */);
2130 break;
2131 }
2132
2133 /*
2134 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to write the "high"
2135 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
2136 * index (0x80E). The write value was 0xffffffff on a Windows 8.1 64-bit guest. We can
2137 * safely ignore this nonsense, See @bugref{8382#c7}.
2138 */
2139 case MSR_IA32_X2APIC_LDR + 1:
2140 {
2141 if (pApic->fHyperVCompatMode)
2142 rcStrict = VINF_SUCCESS;
2143 else
2144 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2145 break;
2146 }
2147
2148 /* Special-treament (read-only normally, but not with Hyper-V) */
2149 case MSR_IA32_X2APIC_LDR:
2150 {
2151 if (pApic->fHyperVCompatMode)
2152 {
2153 rcStrict = apicSetLdr(pVCpu, u32Value);
2154 break;
2155 }
2156 }
2157 RT_FALL_THRU();
2158 /* Read-only MSRs: */
2159 case MSR_IA32_X2APIC_ID:
2160 case MSR_IA32_X2APIC_VERSION:
2161 case MSR_IA32_X2APIC_PPR:
2162 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
2163 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
2164 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
2165 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
2166 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
2167 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
2168 case MSR_IA32_X2APIC_TIMER_CCR:
2169 {
2170 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
2171 break;
2172 }
2173
2174 /* Reserved MSRs: */
2175 case MSR_IA32_X2APIC_LVT_CMCI:
2176 default:
2177 {
2178 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2179 break;
2180 }
2181 }
2182 }
2183 else
2184 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
2185
2186 return rcStrict;
2187}
2188
2189
2190/**
2191 * Resets the APIC base MSR.
2192 *
2193 * @param pVCpu The cross context virtual CPU structure.
2194 */
2195static void apicResetBaseMsr(PVMCPUCC pVCpu)
2196{
2197 /*
2198 * Initialize the APIC base MSR. The APIC enable-bit is set upon power-up or reset[1].
2199 *
2200 * A Reset (in xAPIC and x2APIC mode) brings up the local APIC in xAPIC mode.
2201 * An INIT IPI does -not- cause a transition between xAPIC and x2APIC mode[2].
2202 *
2203 * [1] See AMD spec. 14.1.3 "Processor Initialization State"
2204 * [2] See Intel spec. 10.12.5.1 "x2APIC States".
2205 */
2206 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2207
2208 /* Construct. */
2209 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2210 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2211 uint64_t uApicBaseMsr = MSR_IA32_APICBASE_ADDR;
2212 if (pVCpu->idCpu == 0)
2213 uApicBaseMsr |= MSR_IA32_APICBASE_BSP;
2214
2215 /* If the VM was configured with no APIC, don't enable xAPIC mode, obviously. */
2216 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2217 {
2218 uApicBaseMsr |= MSR_IA32_APICBASE_EN;
2219
2220 /*
2221 * While coming out of a reset the APIC is enabled and in xAPIC mode. If software had previously
2222 * disabled the APIC (which results in the CPUID bit being cleared as well) we re-enable it here.
2223 * See Intel spec. 10.12.5.1 "x2APIC States".
2224 */
2225 if (CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/) == false)
2226 LogRel(("APIC%u: Resetting mode to xAPIC\n", pVCpu->idCpu));
2227 }
2228
2229 /* Commit. */
2230 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uApicBaseMsr);
2231}
2232
2233
2234/**
2235 * @interface_method_impl{PDMAPICBACKEND,pfnInitIpi}
2236 */
2237static DECLCALLBACK(void) apicInitIpi(PVMCPUCC pVCpu)
2238{
2239 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2240 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2241
2242 /*
2243 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset (Wait-for-SIPI State)"
2244 * and AMD spec 16.3.2 "APIC Registers".
2245 *
2246 * The reason we don't simply zero out the entire APIC page and only set the non-zero members
2247 * is because there are some registers that are not touched by the INIT IPI (e.g. version)
2248 * operation and this function is only a subset of the reset operation.
2249 */
2250 RT_ZERO(pXApicPage->irr);
2251 RT_ZERO(pXApicPage->irr);
2252 RT_ZERO(pXApicPage->isr);
2253 RT_ZERO(pXApicPage->tmr);
2254 RT_ZERO(pXApicPage->icr_hi);
2255 RT_ZERO(pXApicPage->icr_lo);
2256 RT_ZERO(pXApicPage->ldr);
2257 RT_ZERO(pXApicPage->tpr);
2258 RT_ZERO(pXApicPage->ppr);
2259 RT_ZERO(pXApicPage->timer_icr);
2260 RT_ZERO(pXApicPage->timer_ccr);
2261 RT_ZERO(pXApicPage->timer_dcr);
2262
2263 pXApicPage->dfr.u.u4Model = XAPICDESTFORMAT_FLAT;
2264 pXApicPage->dfr.u.u28ReservedMb1 = UINT32_C(0xfffffff);
2265
2266 /** @todo CMCI. */
2267
2268 RT_ZERO(pXApicPage->lvt_timer);
2269 pXApicPage->lvt_timer.u.u1Mask = 1;
2270
2271#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2272 RT_ZERO(pXApicPage->lvt_thermal);
2273 pXApicPage->lvt_thermal.u.u1Mask = 1;
2274#endif
2275
2276 RT_ZERO(pXApicPage->lvt_perf);
2277 pXApicPage->lvt_perf.u.u1Mask = 1;
2278
2279 RT_ZERO(pXApicPage->lvt_lint0);
2280 pXApicPage->lvt_lint0.u.u1Mask = 1;
2281
2282 RT_ZERO(pXApicPage->lvt_lint1);
2283 pXApicPage->lvt_lint1.u.u1Mask = 1;
2284
2285 RT_ZERO(pXApicPage->lvt_error);
2286 pXApicPage->lvt_error.u.u1Mask = 1;
2287
2288 RT_ZERO(pXApicPage->svr);
2289 pXApicPage->svr.u.u8SpuriousVector = 0xff;
2290
2291 /* The self-IPI register is reset to 0. See Intel spec. 10.12.5.1 "x2APIC States" */
2292 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2293 RT_ZERO(pX2ApicPage->self_ipi);
2294
2295 /* Clear the pending-interrupt bitmaps. */
2296 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2297 RT_BZERO(&pApicCpu->ApicPibLevel, sizeof(APICPIB));
2298 RT_BZERO(pApicCpu->CTX_SUFF(pvApicPib), sizeof(APICPIB));
2299
2300 /* Clear the interrupt line states for LINT0 and LINT1 pins. */
2301 pApicCpu->fActiveLint0 = false;
2302 pApicCpu->fActiveLint1 = false;
2303}
2304
2305
2306/**
2307 * Initializes per-VCPU APIC to the state following a power-up or hardware
2308 * reset.
2309 *
2310 * @param pVCpu The cross context virtual CPU structure.
2311 * @param fResetApicBaseMsr Whether to reset the APIC base MSR.
2312 */
2313void apicResetCpu(PVMCPUCC pVCpu, bool fResetApicBaseMsr)
2314{
2315 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2316
2317 LogFlow(("APIC%u: apicR3ResetCpu: fResetApicBaseMsr=%RTbool\n", pVCpu->idCpu, fResetApicBaseMsr));
2318
2319#ifdef VBOX_STRICT
2320 /* Verify that the initial APIC ID reported via CPUID matches our VMCPU ID assumption. */
2321 uint32_t uEax, uEbx, uEcx, uEdx;
2322 uEax = uEbx = uEcx = uEdx = UINT32_MAX;
2323 CPUMGetGuestCpuId(pVCpu, 1, 0, -1 /*f64BitMode*/, &uEax, &uEbx, &uEcx, &uEdx);
2324 Assert(((uEbx >> 24) & 0xff) == pVCpu->idCpu);
2325#endif
2326
2327 /*
2328 * The state following a power-up or reset is a superset of the INIT state.
2329 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset ('Wait-for-SIPI' State)"
2330 */
2331 apicInitIpi(pVCpu);
2332
2333 /*
2334 * The APIC version register is read-only, so just initialize it here.
2335 * It is not clear from the specs, where exactly it is initialized.
2336 * The version determines the number of LVT entries and size of the APIC ID (8 bits for P4).
2337 */
2338 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2339#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2340 pXApicPage->version.u.u8MaxLvtEntry = XAPIC_MAX_LVT_ENTRIES_P4 - 1;
2341 pXApicPage->version.u.u8Version = XAPIC_HARDWARE_VERSION_P4;
2342 AssertCompile(sizeof(pXApicPage->id.u8ApicId) >= XAPIC_APIC_ID_BIT_COUNT_P4 / 8);
2343#else
2344# error "Implement Pentium and P6 family APIC architectures"
2345#endif
2346
2347 /** @todo It isn't clear in the spec. where exactly the default base address
2348 * is (re)initialized, atm we do it here in Reset. */
2349 if (fResetApicBaseMsr)
2350 apicResetBaseMsr(pVCpu);
2351
2352 /*
2353 * Initialize the APIC ID register to xAPIC format.
2354 */
2355 RT_BZERO(&pXApicPage->id, sizeof(pXApicPage->id));
2356 pXApicPage->id.u8ApicId = pVCpu->idCpu;
2357}
2358
2359
2360/**
2361 * @interface_method_impl{PDMAPICBACKEND,pfnSetBaseMsr}
2362 */
2363static DECLCALLBACK(int) apicSetBaseMsr(PVMCPUCC pVCpu, uint64_t u64BaseMsr)
2364{
2365 Assert(pVCpu);
2366
2367 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2368 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2369 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
2370 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
2371 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
2372
2373 Log2(("APIC%u: apicSetBaseMsr: u64BaseMsr=%#RX64 enmNewMode=%s enmOldMode=%s\n", pVCpu->idCpu, u64BaseMsr,
2374 apicGetModeName(enmNewMode), apicGetModeName(enmOldMode)));
2375
2376 /*
2377 * We do not support re-mapping the APIC base address because:
2378 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
2379 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
2380 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
2381 * region remains mapped but doesn't belong to the called VCPU's APIC).
2382 */
2383 /** @todo Handle per-VCPU APIC base relocation. */
2384 if (MSR_IA32_APICBASE_GET_ADDR(uBaseMsr) != MSR_IA32_APICBASE_ADDR)
2385 {
2386 if (pVCpu->apic.s.cLogMaxSetApicBaseAddr++ < 5)
2387 LogRel(("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
2388 MSR_IA32_APICBASE_GET_ADDR(uBaseMsr)));
2389 return VERR_CPUM_RAISE_GP_0;
2390 }
2391
2392 /* Don't allow enabling xAPIC/x2APIC if the VM is configured with the APIC disabled. */
2393 if (pApic->enmMaxMode == PDMAPICMODE_NONE)
2394 {
2395 LogRel(("APIC%u: Disallowing APIC base MSR write as the VM is configured with APIC disabled!\n", pVCpu->idCpu));
2396 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_DISALLOWED_CONFIG);
2397 }
2398
2399 /*
2400 * Act on state transition.
2401 */
2402 if (enmNewMode != enmOldMode)
2403 {
2404 switch (enmNewMode)
2405 {
2406 case APICMODE_DISABLED:
2407 {
2408 /*
2409 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2410 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2411 *
2412 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2413 *
2414 * We'll also manually manage the APIC base MSR here. We want a single-point of commit
2415 * at the end of this function rather than updating it in apicR3ResetCpu. This means we also
2416 * need to update the CPUID leaf ourselves.
2417 */
2418 apicResetCpu(pVCpu, false /* fResetApicBaseMsr */);
2419 uBaseMsr &= ~(MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD);
2420 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, false /*fVisible*/);
2421 LogRel(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2422 break;
2423 }
2424
2425 case APICMODE_XAPIC:
2426 {
2427 if (enmOldMode != APICMODE_DISABLED)
2428 {
2429 LogRel(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2430 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2431 }
2432
2433 uBaseMsr |= MSR_IA32_APICBASE_EN;
2434 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/);
2435 LogRel(("APIC%u: Switched mode to xAPIC\n", pVCpu->idCpu));
2436 break;
2437 }
2438
2439 case APICMODE_X2APIC:
2440 {
2441 if (pApic->enmMaxMode != PDMAPICMODE_X2APIC)
2442 {
2443 LogRel(("APIC%u: Disallowing transition to x2APIC mode as the VM is configured with the x2APIC disabled!\n",
2444 pVCpu->idCpu));
2445 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2446 }
2447
2448 if (enmOldMode != APICMODE_XAPIC)
2449 {
2450 LogRel(("APIC%u: Can only transition to x2APIC state from xAPIC state\n", pVCpu->idCpu));
2451 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2452 }
2453
2454 uBaseMsr |= MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD;
2455
2456 /*
2457 * The APIC ID needs updating when entering x2APIC mode.
2458 * Software written APIC ID in xAPIC mode isn't preserved.
2459 * The APIC ID becomes read-only to software in x2APIC mode.
2460 *
2461 * See Intel spec. 10.12.5.1 "x2APIC States".
2462 */
2463 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2464 RT_BZERO(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2465 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2466
2467 /*
2468 * LDR initialization occurs when entering x2APIC mode.
2469 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2470 */
2471 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2472 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2473
2474 LogRel(("APIC%u: Switched mode to x2APIC\n", pVCpu->idCpu));
2475 break;
2476 }
2477
2478 case APICMODE_INVALID:
2479 default:
2480 {
2481 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2482 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2483 }
2484 }
2485 }
2486
2487 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2488 return VINF_SUCCESS;
2489}
2490
2491
2492/**
2493 * @interface_method_impl{PDMAPICBACKEND,pfnGetBaseMsrNoCheck}
2494 */
2495static DECLCALLBACK(uint64_t) apicGetBaseMsrNoCheck(PCVMCPUCC pVCpu)
2496{
2497 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2498 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2499 return pApicCpu->uApicBaseMsr;
2500}
2501
2502
2503/**
2504 * @interface_method_impl{PDMAPICBACKEND,pfnGetBaseMsr}
2505 */
2506static DECLCALLBACK(VBOXSTRICTRC) apicGetBaseMsr(PVMCPUCC pVCpu, uint64_t *pu64Value)
2507{
2508 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2509
2510 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2511 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2512 {
2513 *pu64Value = apicGetBaseMsrNoCheck(pVCpu);
2514 return VINF_SUCCESS;
2515 }
2516
2517 if (pVCpu->apic.s.cLogMaxGetApicBaseAddr++ < 5)
2518 LogRel(("APIC%u: Reading APIC base MSR (%#x) when there is no APIC -> #GP(0)\n", pVCpu->idCpu, MSR_IA32_APICBASE));
2519 return VERR_CPUM_RAISE_GP_0;
2520}
2521
2522
2523/**
2524 * @interface_method_impl{PDMAPICBACKEND,pfnSetTpr}
2525 */
2526static DECLCALLBACK(int) apicSetTpr(PVMCPUCC pVCpu, uint8_t u8Tpr, bool fForceX2ApicBehaviour)
2527{
2528 if (apicIsEnabled(pVCpu))
2529 return apicSetTprEx(pVCpu, u8Tpr, fForceX2ApicBehaviour);
2530 return VERR_PDM_NO_APIC_INSTANCE;
2531}
2532
2533
2534/**
2535 * Gets the highest priority pending interrupt.
2536 *
2537 * @returns true if any interrupt is pending, false otherwise.
2538 * @param pVCpu The cross context virtual CPU structure.
2539 * @param pu8PendingIntr Where to store the interrupt vector if the
2540 * interrupt is pending (optional, can be NULL).
2541 */
2542static bool apicGetHighestPendingInterrupt(PCVMCPUCC pVCpu, uint8_t *pu8PendingIntr)
2543{
2544 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2545 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2546 if (irrv >= 0)
2547 {
2548 Assert(irrv <= (int)UINT8_MAX);
2549 if (pu8PendingIntr)
2550 *pu8PendingIntr = (uint8_t)irrv;
2551 return true;
2552 }
2553 return false;
2554}
2555
2556
2557/**
2558 * @interface_method_impl{PDMAPICBACKEND,pfnGetTpr}
2559 */
2560static DECLCALLBACK(int) apicGetTpr(PCVMCPUCC pVCpu, uint8_t *pu8Tpr, bool *pfPending, uint8_t *pu8PendingIntr)
2561{
2562 VMCPU_ASSERT_EMT(pVCpu);
2563 if (apicIsEnabled(pVCpu))
2564 {
2565 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2566 if (pfPending)
2567 {
2568 /*
2569 * Just return whatever the highest pending interrupt is in the IRR.
2570 * The caller is responsible for figuring out if it's masked by the TPR etc.
2571 */
2572 *pfPending = apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2573 }
2574
2575 *pu8Tpr = pXApicPage->tpr.u8Tpr;
2576 return VINF_SUCCESS;
2577 }
2578
2579 *pu8Tpr = 0;
2580 return VERR_PDM_NO_APIC_INSTANCE;
2581}
2582
2583
2584/**
2585 * @interface_method_impl{PDMAPICBACKEND,pfnGetTimerFreq}
2586 */
2587static DECLCALLBACK(int) apicGetTimerFreq(PVMCC pVM, uint64_t *pu64Value)
2588{
2589 /*
2590 * Validate.
2591 */
2592 Assert(pVM);
2593 AssertPtrReturn(pu64Value, VERR_INVALID_PARAMETER);
2594
2595 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[0];
2596 if (apicIsEnabled(pVCpu))
2597 {
2598 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2599 *pu64Value = PDMDevHlpTimerGetFreq(VMCPU_TO_DEVINS(pVCpu), pApicCpu->hTimer);
2600 return VINF_SUCCESS;
2601 }
2602 return VERR_PDM_NO_APIC_INSTANCE;
2603}
2604
2605
2606/**
2607 * @interface_method_impl{PDMAPICBACKEND,pfnBusDeliver}
2608 */
2609static DECLCALLBACK(int) apicBusDeliver(PVMCC pVM, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2610 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uSrcTag)
2611{
2612 NOREF(uPolarity);
2613
2614 /*
2615 * If the APIC isn't enabled, do nothing and pretend success.
2616 */
2617 if (apicIsEnabled(pVM->CTX_SUFF(apCpus)[0]))
2618 { /* likely */ }
2619 else
2620 return VINF_SUCCESS;
2621
2622 /*
2623 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2624 * Hence, the broadcast mask is 0xff.
2625 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2626 */
2627 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2628 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2629 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2630 uint32_t fDestMask = uDest;
2631 uint32_t fBroadcastMask = UINT32_C(0xff);
2632
2633 Log2(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s uVector=%#x uSrcTag=%#x\n",
2634 fDestMask, apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode),
2635 apicGetDeliveryModeName(enmDeliveryMode), uVector, uSrcTag));
2636
2637 bool fIntrAccepted;
2638 VMCPUSET DestCpuSet;
2639 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2640 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2641 &fIntrAccepted, uSrcTag, VINF_SUCCESS /* rcRZ */);
2642 if (fIntrAccepted)
2643 return VBOXSTRICTRC_VAL(rcStrict);
2644 return VERR_APIC_INTR_DISCARDED;
2645}
2646
2647
2648/**
2649 * @interface_method_impl{PDMAPICBACKEND,pfnSetLocalInterrupt}
2650 */
2651static DECLCALLBACK(VBOXSTRICTRC) apicSetLocalInterrupt(PVMCPUCC pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2652{
2653 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2654 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2655
2656 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2657
2658 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2659 if (apicIsEnabled(pVCpu))
2660 {
2661 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2662
2663 /* Pick the LVT entry corresponding to the interrupt pin. */
2664 static const uint16_t s_au16LvtOffsets[] =
2665 {
2666 XAPIC_OFF_LVT_LINT0,
2667 XAPIC_OFF_LVT_LINT1
2668 };
2669 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2670 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2671 uint32_t const uLvt = apicReadRaw32(pVCpu, offLvt);
2672
2673 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2674 if (!XAPIC_LVT_IS_MASKED(uLvt))
2675 {
2676 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2677 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2678
2679 switch (enmDeliveryMode)
2680 {
2681 case XAPICDELIVERYMODE_INIT:
2682 {
2683 /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2684 AssertMsgFailed(("INIT through LINT0/LINT1 is not yet supported\n"));
2685 }
2686 RT_FALL_THRU();
2687 case XAPICDELIVERYMODE_FIXED:
2688 {
2689 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2690 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2691 bool fActive = RT_BOOL(u8Level & 1);
2692 bool volatile *pfActiveLine = u8Pin == 0 ? &pApicCpu->fActiveLint0 : &pApicCpu->fActiveLint1;
2693 /** @todo Polarity is busted elsewhere, we need to fix that
2694 * first. See @bugref{8386#c7}. */
2695#if 0
2696 uint8_t const u8Polarity = XAPIC_LVT_GET_POLARITY(uLvt);
2697 fActive ^= u8Polarity; */
2698#endif
2699 if (!fActive)
2700 {
2701 ASMAtomicCmpXchgBool(pfActiveLine, false, true);
2702 break;
2703 }
2704
2705 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2706 if (offLvt == XAPIC_OFF_LVT_LINT1)
2707 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2708 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2709 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2710 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2711 means. */
2712
2713 bool fSendIntr;
2714 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2715 {
2716 /* Recognize and send the interrupt only on an edge transition. */
2717 fSendIntr = ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2718 }
2719 else
2720 {
2721 /* For level-triggered interrupts, redundant interrupts are not a problem. */
2722 Assert(enmTriggerMode == XAPICTRIGGERMODE_LEVEL);
2723 ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2724
2725 /* Only when the remote IRR isn't set, set it and send the interrupt. */
2726 if (!(pXApicPage->lvt_lint0.all.u32LvtLint0 & XAPIC_LVT_REMOTE_IRR))
2727 {
2728 Assert(offLvt == XAPIC_OFF_LVT_LINT0);
2729 ASMAtomicOrU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, XAPIC_LVT_REMOTE_IRR);
2730 fSendIntr = true;
2731 }
2732 else
2733 fSendIntr = false;
2734 }
2735
2736 if (fSendIntr)
2737 {
2738 VMCPUSET DestCpuSet;
2739 VMCPUSET_EMPTY(&DestCpuSet);
2740 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2741 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode,
2742 &DestCpuSet, NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2743 }
2744 break;
2745 }
2746
2747 case XAPICDELIVERYMODE_SMI:
2748 case XAPICDELIVERYMODE_NMI:
2749 {
2750 VMCPUSET DestCpuSet;
2751 VMCPUSET_EMPTY(&DestCpuSet);
2752 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2753 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2754 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2755 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2756 break;
2757 }
2758
2759 case XAPICDELIVERYMODE_EXTINT:
2760 {
2761 Log2(("APIC%u: apicSetLocalInterrupt: %s ExtINT through LINT%u\n", pVCpu->idCpu,
2762 u8Level ? "Raising" : "Lowering", u8Pin));
2763 if (u8Level)
2764 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2765 else
2766 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2767 break;
2768 }
2769
2770 /* Reserved/unknown delivery modes: */
2771 case XAPICDELIVERYMODE_LOWEST_PRIO:
2772 case XAPICDELIVERYMODE_STARTUP:
2773 default:
2774 {
2775 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2776 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2777 rcStrict = VERR_INTERNAL_ERROR_3;
2778 break;
2779 }
2780 }
2781 }
2782 }
2783 else
2784 {
2785 /* The APIC is hardware disabled. The CPU behaves as though there is no on-chip APIC. */
2786 if (u8Pin == 0)
2787 {
2788 /* LINT0 behaves as an external interrupt pin. */
2789 Log2(("APIC%u: apicSetLocalInterrupt: APIC hardware-disabled, %s INTR\n", pVCpu->idCpu,
2790 u8Level ? "raising" : "lowering"));
2791 if (u8Level)
2792 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2793 else
2794 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2795 }
2796 else
2797 {
2798 /* LINT1 behaves as NMI. */
2799 Log2(("APIC%u: apicSetLocalInterrupt: APIC hardware-disabled, raising NMI\n", pVCpu->idCpu));
2800 apicSetInterruptFF(pVCpu, PDMAPICIRQ_NMI);
2801 }
2802 }
2803
2804 return rcStrict;
2805}
2806
2807
2808/**
2809 * @interface_method_impl{PDMAPICBACKEND,pfnGetInterrupt}
2810 */
2811static DECLCALLBACK(int) apicGetInterrupt(PVMCPUCC pVCpu, uint8_t *pu8Vector, uint32_t *puSrcTag)
2812{
2813 VMCPU_ASSERT_EMT(pVCpu);
2814 Assert(pu8Vector);
2815
2816 LogFlow(("APIC%u: apicGetInterrupt:\n", pVCpu->idCpu));
2817
2818 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2819 bool const fApicHwEnabled = apicIsEnabled(pVCpu);
2820 if ( fApicHwEnabled
2821 && pXApicPage->svr.u.fApicSoftwareEnable)
2822 {
2823 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2824 if (RT_LIKELY(irrv >= 0))
2825 {
2826 Assert(irrv <= (int)UINT8_MAX);
2827 uint8_t const uVector = irrv;
2828
2829 /*
2830 * This can happen if the APIC receives an interrupt when the CPU has interrupts
2831 * disabled but the TPR is raised by the guest before re-enabling interrupts.
2832 */
2833 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2834 if ( uTpr > 0
2835 && XAPIC_TPR_GET_TP(uVector) <= XAPIC_TPR_GET_TP(uTpr))
2836 {
2837 Log2(("APIC%u: apicGetInterrupt: Interrupt masked. uVector=%#x uTpr=%#x SpuriousVector=%#x\n", pVCpu->idCpu,
2838 uVector, uTpr, pXApicPage->svr.u.u8SpuriousVector));
2839 *pu8Vector = uVector;
2840 *puSrcTag = 0;
2841 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByTpr);
2842 return VERR_APIC_INTR_MASKED_BY_TPR;
2843 }
2844
2845 /*
2846 * The PPR should be up-to-date at this point through apicSetEoi().
2847 * We're on EMT so no parallel updates possible.
2848 * Subject the pending vector to PPR prioritization.
2849 */
2850 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2851 if ( !uPpr
2852 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2853 {
2854 apicClearVectorInReg(&pXApicPage->irr, uVector);
2855 apicSetVectorInReg(&pXApicPage->isr, uVector);
2856 apicUpdatePpr(pVCpu);
2857 apicSignalNextPendingIntr(pVCpu);
2858
2859 /* Retrieve the interrupt source tag associated with this interrupt. */
2860 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2861 AssertCompile(RT_ELEMENTS(pApicCpu->auSrcTags) > UINT8_MAX);
2862 *puSrcTag = pApicCpu->auSrcTags[uVector];
2863 pApicCpu->auSrcTags[uVector] = 0;
2864
2865 Log2(("APIC%u: apicGetInterrupt: Valid Interrupt. uVector=%#x uSrcTag=%#x\n", pVCpu->idCpu, uVector, *puSrcTag));
2866 *pu8Vector = uVector;
2867 return VINF_SUCCESS;
2868 }
2869
2870 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByPpr);
2871 Log2(("APIC%u: apicGetInterrupt: Interrupt's priority is not higher than the PPR. uVector=%#x PPR=%#x\n",
2872 pVCpu->idCpu, uVector, uPpr));
2873 }
2874 else
2875 Log2(("APIC%u: apicGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2876 }
2877 else
2878 Log2(("APIC%u: apicGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2879
2880 *pu8Vector = 0;
2881 *puSrcTag = 0;
2882 return VERR_APIC_INTR_NOT_PENDING;
2883}
2884
2885
2886/**
2887 * @callback_method_impl{FNIOMMMIONEWREAD}
2888 */
2889DECLCALLBACK(VBOXSTRICTRC) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2890{
2891 NOREF(pvUser);
2892 Assert(!(off & 0xf));
2893 Assert(cb == 4); RT_NOREF_PV(cb);
2894
2895 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2896 uint16_t offReg = off & 0xff0;
2897 uint32_t uValue = 0;
2898
2899 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioRead));
2900
2901 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(apicReadRegister(pDevIns, pVCpu, offReg, &uValue));
2902 *(uint32_t *)pv = uValue;
2903
2904 Log2(("APIC%u: apicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2905 return rc;
2906}
2907
2908
2909/**
2910 * @callback_method_impl{FNIOMMMIONEWWRITE}
2911 */
2912DECLCALLBACK(VBOXSTRICTRC) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2913{
2914 NOREF(pvUser);
2915 Assert(!(off & 0xf));
2916 Assert(cb == 4); RT_NOREF_PV(cb);
2917
2918 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2919 uint16_t offReg = off & 0xff0;
2920 uint32_t uValue = *(uint32_t *)pv;
2921
2922 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioWrite));
2923
2924 Log2(("APIC%u: apicWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2925
2926 return apicWriteRegister(pDevIns, pVCpu, offReg, uValue);
2927}
2928
2929
2930/**
2931 * Sets the interrupt pending force-flag and pokes the EMT if required.
2932 *
2933 * @param pVCpu The cross context virtual CPU structure.
2934 * @param enmType The IRQ type.
2935 */
2936static void apicSetInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType)
2937{
2938#ifdef IN_RING3
2939 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
2940 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
2941#endif
2942
2943 switch (enmType)
2944 {
2945 case PDMAPICIRQ_HARDWARE:
2946 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2947 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
2948 break;
2949 case PDMAPICIRQ_UPDATE_PENDING: VMCPU_FF_SET(pVCpu, VMCPU_FF_UPDATE_APIC); break;
2950 case PDMAPICIRQ_NMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); break;
2951 case PDMAPICIRQ_SMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); break;
2952 case PDMAPICIRQ_EXTINT: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
2953 default:
2954 AssertMsgFailed(("enmType=%d\n", enmType));
2955 break;
2956 }
2957
2958 /*
2959 * We need to wake up the target CPU if we're not on EMT.
2960 */
2961 /** @todo r=bird: Why do we skip this waking up for PDMAPICIRQ_HARDWARE? */
2962 /** @todo r=bird: We could just use RTThreadNativeSelf() here, couldn't we? */
2963#if defined(IN_RING0)
2964 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2965 VMCPUID idCpu = pVCpu->idCpu;
2966 if ( enmType != PDMAPICIRQ_HARDWARE
2967 && VMMGetCpuId(pVM) != idCpu)
2968 {
2969 switch (VMCPU_GET_STATE(pVCpu))
2970 {
2971 case VMCPUSTATE_STARTED_EXEC:
2972 Log7Func(("idCpu=%u VMCPUSTATE_STARTED_EXEC\n", idCpu));
2973 GVMMR0SchedPokeNoGVMNoLock(pVM, idCpu);
2974 break;
2975
2976 case VMCPUSTATE_STARTED_HALTED:
2977 Log7Func(("idCpu=%u VMCPUSTATE_STARTED_HALTED\n", idCpu));
2978 GVMMR0SchedWakeUpNoGVMNoLock(pVM, idCpu);
2979 break;
2980
2981 default:
2982 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
2983 break; /* nothing to do in other states. */
2984 }
2985 }
2986#elif defined(IN_RING3)
2987 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2988 VMCPUID idCpu = pVCpu->idCpu;
2989 if ( enmType != PDMAPICIRQ_HARDWARE
2990 && VMMGetCpuId(pVM) != idCpu)
2991 {
2992 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
2993 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE);
2994 }
2995#endif
2996}
2997
2998
2999/**
3000 * Clears the interrupt pending force-flag.
3001 *
3002 * @param pVCpu The cross context virtual CPU structure.
3003 * @param enmType The IRQ type.
3004 */
3005void apicClearInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType)
3006{
3007#ifdef IN_RING3
3008 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
3009 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
3010#endif
3011
3012 /* NMI/SMI can't be cleared. */
3013 switch (enmType)
3014 {
3015 case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break;
3016 case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
3017 default:
3018 AssertMsgFailed(("enmType=%d\n", enmType));
3019 break;
3020 }
3021}
3022
3023
3024/**
3025 * Posts an interrupt to a target APIC.
3026 *
3027 * This function handles interrupts received from the system bus or
3028 * interrupts generated locally from the LVT or via a self IPI.
3029 *
3030 * Don't use this function to try and deliver ExtINT style interrupts.
3031 *
3032 * @returns true if the interrupt was accepted, false otherwise.
3033 * @param pVCpu The cross context virtual CPU structure.
3034 * @param uVector The vector of the interrupt to be posted.
3035 * @param enmTriggerMode The trigger mode of the interrupt.
3036 * @param fAutoEoi Whether this interrupt has automatic EOI
3037 * treatment.
3038 * @param uSrcTag The interrupt source tag (debugging).
3039 *
3040 * @thread Any.
3041 */
3042DECLCALLBACK(bool) apicPostInterrupt(PVMCPUCC pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode, bool fAutoEoi,
3043 uint32_t uSrcTag)
3044{
3045 Assert(pVCpu);
3046 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
3047 RT_NOREF(fAutoEoi);
3048
3049 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3050 PCAPIC pApic = VM_TO_APIC(pVM);
3051 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3052 bool fAccepted = true;
3053
3054 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
3055 STAM_REL_COUNTER_INC(&pApicCpu->StatPostIntrCnt);
3056 STAM_REL_COUNTER_INC(&pApicCpu->aStatVectors[uVector]);
3057
3058 /*
3059 * Only post valid interrupt vectors.
3060 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
3061 */
3062 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
3063 {
3064 /*
3065 * If the interrupt is already pending in the IRR we can skip the
3066 * potential expensive operation of poking the guest EMT out of execution.
3067 */
3068 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3069 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
3070 {
3071 /* Update the interrupt source tag (debugging). */
3072 if (!pApicCpu->auSrcTags[uVector])
3073 pApicCpu->auSrcTags[uVector] = uSrcTag;
3074 else
3075 pApicCpu->auSrcTags[uVector] |= RT_BIT_32(31);
3076
3077 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u uVector=%#x %s\n",
3078 VMMGetCpuId(pVM), pVCpu->idCpu, uVector, enmTriggerMode == XAPICTRIGGERMODE_EDGE ? "edge" : "lvl"));
3079 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
3080 {
3081 if (pApic->fPostedIntrsEnabled)
3082 { /** @todo posted-interrupt call to hardware */ }
3083 else
3084 {
3085 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
3086 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3087 if (!fAlreadySet)
3088 {
3089 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for edge-triggered intr. uVector=%#x\n", uVector));
3090 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3091 }
3092 }
3093 }
3094 else
3095 {
3096 /*
3097 * Level-triggered interrupts requires updating of the TMR and thus cannot be
3098 * delivered asynchronously.
3099 */
3100 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
3101 uint32_t const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel);
3102 if (!fAlreadySet)
3103 {
3104 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for level-triggered intr. uVector=%#x\n", uVector));
3105 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3106 }
3107 }
3108 }
3109 else
3110 {
3111 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u. Vector %#x Already in IRR, skipping\n", VMMGetCpuId(pVM),
3112 pVCpu->idCpu, uVector));
3113 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
3114 }
3115 }
3116 else
3117 {
3118 fAccepted = false;
3119 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
3120 }
3121
3122 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
3123 return fAccepted;
3124}
3125
3126
3127/**
3128 * Starts the APIC timer.
3129 *
3130 * @param pVCpu The cross context virtual CPU structure.
3131 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
3132 * 0.
3133 * @thread Any.
3134 */
3135void apicStartTimer(PVMCPUCC pVCpu, uint32_t uInitialCount)
3136{
3137 Assert(pVCpu);
3138 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3139 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
3140 Assert(PDMDevHlpTimerIsLockOwner(pDevIns, pApicCpu->hTimer));
3141 Assert(uInitialCount > 0);
3142
3143 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
3144 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
3145 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
3146
3147 Log2(("APIC%u: apicStartTimer: uInitialCount=%#RX32 uTimerShift=%u cTicksToNext=%RU64\n", pVCpu->idCpu, uInitialCount,
3148 uTimerShift, cTicksToNext));
3149
3150 /*
3151 * The assumption here is that the timer doesn't tick during this call
3152 * and thus setting a relative time to fire next is accurate. The advantage
3153 * however is updating u64TimerInitial 'atomically' while setting the next
3154 * tick.
3155 */
3156 PDMDevHlpTimerSetRelative(pDevIns, pApicCpu->hTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
3157 apicHintTimerFreq(pDevIns, pApicCpu, uInitialCount, uTimerShift);
3158}
3159
3160
3161/**
3162 * Stops the APIC timer.
3163 *
3164 * @param pVCpu The cross context virtual CPU structure.
3165 * @thread Any.
3166 */
3167static void apicStopTimer(PVMCPUCC pVCpu)
3168{
3169 Assert(pVCpu);
3170 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3171 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
3172 Assert(PDMDevHlpTimerIsLockOwner(pDevIns, pApicCpu->hTimer));
3173
3174 Log2(("APIC%u: apicStopTimer\n", pVCpu->idCpu));
3175
3176 PDMDevHlpTimerStop(pDevIns, pApicCpu->hTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
3177 pApicCpu->uHintedTimerInitialCount = 0;
3178 pApicCpu->uHintedTimerShift = 0;
3179}
3180
3181
3182#if 0
3183/**
3184 * Queues a pending interrupt as in-service.
3185 *
3186 * This function should only be needed without virtualized APIC
3187 * registers. With virtualized APIC registers, it's sufficient to keep
3188 * the interrupts pending in the IRR as the hardware takes care of
3189 * virtual interrupt delivery.
3190 *
3191 * @returns true if the interrupt was queued to in-service interrupts,
3192 * false otherwise.
3193 * @param pVCpu The cross context virtual CPU structure.
3194 * @param u8PendingIntr The pending interrupt to queue as
3195 * in-service.
3196 *
3197 * @remarks This assumes the caller has done the necessary checks and
3198 * is ready to take actually service the interrupt (TPR,
3199 * interrupt shadow etc.)
3200 */
3201VMM_INT_DECL(bool) APICQueueInterruptToService(PVMCPUCC pVCpu, uint8_t u8PendingIntr)
3202{
3203 VMCPU_ASSERT_EMT(pVCpu);
3204
3205 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3206 PAPIC pApic = VM_TO_APIC(pVM);
3207 Assert(!pApic->fVirtApicRegsEnabled);
3208 NOREF(pApic);
3209
3210 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3211 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
3212 if (fIsPending)
3213 {
3214 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
3215 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
3216 apicUpdatePpr(pVCpu);
3217 return true;
3218 }
3219 return false;
3220}
3221
3222
3223/**
3224 * De-queues a pending interrupt from in-service.
3225 *
3226 * This undoes APICQueueInterruptToService() for premature VM-exits before event
3227 * injection.
3228 *
3229 * @param pVCpu The cross context virtual CPU structure.
3230 * @param u8PendingIntr The pending interrupt to de-queue from
3231 * in-service.
3232 */
3233VMM_INT_DECL(void) APICDequeueInterruptFromService(PVMCPUCC pVCpu, uint8_t u8PendingIntr)
3234{
3235 VMCPU_ASSERT_EMT(pVCpu);
3236
3237 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3238 PAPIC pApic = VM_TO_APIC(pVM);
3239 Assert(!pApic->fVirtApicRegsEnabled);
3240 NOREF(pApic);
3241
3242 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3243 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
3244 if (fInService)
3245 {
3246 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
3247 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
3248 apicUpdatePpr(pVCpu);
3249 }
3250}
3251#endif
3252
3253
3254/**
3255 * @interface_method_impl{PDMAPICBACKEND,pfnUpdatePendingInterrupts}
3256 */
3257static DECLCALLBACK(void) apicUpdatePendingInterrupts(PVMCPUCC pVCpu)
3258{
3259 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3260
3261 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3262 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3263 bool fHasPendingIntrs = false;
3264
3265 Log3(("APIC%u: apicUpdatePendingInterrupts:\n", pVCpu->idCpu));
3266 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
3267
3268 /* Update edge-triggered pending interrupts. */
3269 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
3270 for (;;)
3271 {
3272 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3273 if (!fAlreadySet)
3274 break;
3275
3276 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3277 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3278 {
3279 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3280 if (u64Fragment)
3281 {
3282 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3283 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3284 Log6Func(("edge[%u/%u]: %'016RX64: irr=%08RX32'%08RX32 |; tmr=%08RX32'%08RX32 &~\n", idxPib, idxReg, u64Fragment,
3285 pXApicPage->irr.u[idxReg].u32Reg, pXApicPage->irr.u[idxReg + 1].u32Reg,
3286 pXApicPage->tmr.u[idxReg].u32Reg, pXApicPage->tmr.u[idxReg + 1].u32Reg));
3287
3288 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3289 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3290
3291 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
3292 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
3293 fHasPendingIntrs = true;
3294 }
3295 }
3296 }
3297
3298 /* Update level-triggered pending interrupts. */
3299 pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
3300 for (;;)
3301 {
3302 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
3303 if (!fAlreadySet)
3304 break;
3305
3306 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3307 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3308 {
3309 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3310 if (u64Fragment)
3311 {
3312 Log6Func(("level[%u/%u]: %'016RX64: irr=%08RX32'%08RX32 |; tmr=%08RX32'%08RX32 |\n", idxPib, idxReg, u64Fragment,
3313 pXApicPage->irr.u[idxReg].u32Reg, pXApicPage->irr.u[idxReg + 1].u32Reg,
3314 pXApicPage->tmr.u[idxReg].u32Reg, pXApicPage->tmr.u[idxReg + 1].u32Reg));
3315 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3316 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3317
3318 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3319 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3320
3321 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
3322 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3323 fHasPendingIntrs = true;
3324 }
3325 }
3326 }
3327
3328 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
3329 Log3(("APIC%u: apicUpdatePendingInterrupts: fHasPendingIntrs=%RTbool\n", pVCpu->idCpu, fHasPendingIntrs));
3330
3331 if ( fHasPendingIntrs
3332 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))
3333 apicSignalNextPendingIntr(pVCpu);
3334}
3335
3336
3337#ifdef IN_RING0
3338/**
3339 * @interface_method_impl{PDMAPICBACKENDR0,pfnGetApicPageForCpu}
3340 */
3341static DECLCALLBACK(int) apicR0VBoxGetApicPageForCpu(PCVMCPUCC pVCpu, PRTHCPHYS pHCPhys, PRTR0PTR pR0Ptr, PRTR3PTR pR3Ptr)
3342{
3343 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
3344 AssertReturn(pHCPhys, VERR_INVALID_PARAMETER);
3345 AssertReturn(pR0Ptr, VERR_INVALID_PARAMETER);
3346
3347 Assert(PDMHasApic(pVCpu->CTX_SUFF(pVM)));
3348
3349 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3350 *pHCPhys = pApicCpu->HCPhysApicPage;
3351 *pR0Ptr = pApicCpu->pvApicPageR0;
3352 if (pR3Ptr)
3353 *pR3Ptr = pApicCpu->pvApicPageR3;
3354 return VINF_SUCCESS;
3355}
3356#endif /* IN_RING0 */
3357
3358#ifndef IN_RING3
3359
3360/**
3361 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
3362 */
3363static DECLCALLBACK(int) apicRZConstruct(PPDMDEVINS pDevIns)
3364{
3365 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
3366 PAPICDEV pThis = PDMDEVINS_2_DATA(pDevIns, PAPICDEV);
3367 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
3368
3369 pVM->apicr0.s.pDevInsR0 = pDevIns;
3370
3371 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
3372 AssertRCReturn(rc, rc);
3373
3374 rc = PDMDevHlpIcSetUpContext(pDevIns);
3375 AssertRCReturn(rc, rc);
3376
3377 rc = PDMApicRegisterBackend(pVM, PDMAPICBACKENDTYPE_VBOX, &g_ApicBackend);
3378 AssertLogRelRCReturn(rc, rc);
3379
3380 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, apicWriteMmio, apicReadMmio, NULL /*pvUser*/);
3381 AssertRCReturn(rc, rc);
3382
3383 return VINF_SUCCESS;
3384}
3385#endif /* !IN_RING3 */
3386
3387/**
3388 * APIC device registration structure.
3389 */
3390const PDMDEVREG g_DeviceAPIC =
3391{
3392 /* .u32Version = */ PDM_DEVREG_VERSION,
3393 /* .uReserved0 = */ 0,
3394 /* .szName = */ "apic",
3395 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE
3396 | PDM_DEVREG_FLAGS_REQUIRE_R0 | PDM_DEVREG_FLAGS_REQUIRE_RC,
3397 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
3398 /* .cMaxInstances = */ 1,
3399 /* .uSharedVersion = */ 42,
3400 /* .cbInstanceShared = */ sizeof(APICDEV),
3401 /* .cbInstanceCC = */ 0,
3402 /* .cbInstanceRC = */ 0,
3403 /* .cMaxPciDevices = */ 0,
3404 /* .cMaxMsixVectors = */ 0,
3405 /* .pszDescription = */ "Advanced Programmable Interrupt Controller",
3406#if defined(IN_RING3)
3407 /* .szRCMod = */ "VMMRC.rc",
3408 /* .szR0Mod = */ "VMMR0.r0",
3409 /* .pfnConstruct = */ apicR3Construct,
3410 /* .pfnDestruct = */ apicR3Destruct,
3411 /* .pfnRelocate = */ apicR3Relocate,
3412 /* .pfnMemSetup = */ NULL,
3413 /* .pfnPowerOn = */ NULL,
3414 /* .pfnReset = */ apicR3Reset,
3415 /* .pfnSuspend = */ NULL,
3416 /* .pfnResume = */ NULL,
3417 /* .pfnAttach = */ NULL,
3418 /* .pfnDetach = */ NULL,
3419 /* .pfnQueryInterface = */ NULL,
3420 /* .pfnInitComplete = */ apicR3InitComplete,
3421 /* .pfnPowerOff = */ NULL,
3422 /* .pfnSoftReset = */ NULL,
3423 /* .pfnReserved0 = */ NULL,
3424 /* .pfnReserved1 = */ NULL,
3425 /* .pfnReserved2 = */ NULL,
3426 /* .pfnReserved3 = */ NULL,
3427 /* .pfnReserved4 = */ NULL,
3428 /* .pfnReserved5 = */ NULL,
3429 /* .pfnReserved6 = */ NULL,
3430 /* .pfnReserved7 = */ NULL,
3431#elif defined(IN_RING0)
3432 /* .pfnEarlyConstruct = */ NULL,
3433 /* .pfnConstruct = */ apicRZConstruct,
3434 /* .pfnDestruct = */ NULL,
3435 /* .pfnFinalDestruct = */ NULL,
3436 /* .pfnRequest = */ NULL,
3437 /* .pfnReserved0 = */ NULL,
3438 /* .pfnReserved1 = */ NULL,
3439 /* .pfnReserved2 = */ NULL,
3440 /* .pfnReserved3 = */ NULL,
3441 /* .pfnReserved4 = */ NULL,
3442 /* .pfnReserved5 = */ NULL,
3443 /* .pfnReserved6 = */ NULL,
3444 /* .pfnReserved7 = */ NULL,
3445#elif defined(IN_RC)
3446 /* .pfnConstruct = */ apicRZConstruct,
3447 /* .pfnReserved0 = */ NULL,
3448 /* .pfnReserved1 = */ NULL,
3449 /* .pfnReserved2 = */ NULL,
3450 /* .pfnReserved3 = */ NULL,
3451 /* .pfnReserved4 = */ NULL,
3452 /* .pfnReserved5 = */ NULL,
3453 /* .pfnReserved6 = */ NULL,
3454 /* .pfnReserved7 = */ NULL,
3455#else
3456# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3457#endif
3458 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3459};
3460
3461/**
3462 * The VirtualBox APIC backend.
3463 */
3464const PDMAPICBACKEND g_ApicBackend =
3465{
3466 /* .pfnIsEnabled = */ apicIsEnabled,
3467 /* .pfnInitIpi = */ apicInitIpi,
3468 /* .pfnGetBaseMsrNoCheck = */ apicGetBaseMsrNoCheck,
3469 /* .pfnGetBaseMsr = */ apicGetBaseMsr,
3470 /* .pfnSetBaseMsr = */ apicSetBaseMsr,
3471 /* .pfnReadRaw32 = */ apicReadRaw32,
3472 /* .pfnReadMsr = */ apicReadMsr,
3473 /* .pfnWriteMsr = */ apicWriteMsr,
3474 /* .pfnGetTpr = */ apicGetTpr,
3475 /* .pfnSetTpr = */ apicSetTpr,
3476 /* .pfnGetIcrNoCheck = */ apicGetIcrNoCheck,
3477 /* .pfnSetIcr = */ apicSetIcr,
3478 /* .pfnGetTimerFreq = */ apicGetTimerFreq,
3479 /* .pfnSetLocalInterrupt = */ apicSetLocalInterrupt,
3480 /* .pfnGetInterrupt = */ apicGetInterrupt,
3481 /* .pfnPostInterrupt = */ apicPostInterrupt,
3482 /* .pfnUpdatePendingInterrupts = */ apicUpdatePendingInterrupts,
3483 /* .pfnBusDeliver = */ apicBusDeliver,
3484 /* .pfnSetEoi = */ apicSetEoi,
3485#if defined(IN_RING3)
3486 /* .pfnHvSetCompatMode = */ apicR3HvSetCompatMode,
3487#elif defined(IN_RING0)
3488 /* .pfnGetApicPageForCpu = */ apicR0VBoxGetApicPageForCpu,
3489#endif
3490};
3491
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette