VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 60619

Last change on this file since 60619 was 60619, checked in by vboxsync, 9 years ago

VMM/APIC: CTX_SUFF nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 91.7 KB
Line 
1/* $Id: APICAll.cpp 60619 2016-04-21 11:32:01Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/vmcpuset.h>
27
28/*********************************************************************************************************************************
29* Global Variables *
30*********************************************************************************************************************************/
31#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
32/** An ordered array of valid LVT masks. */
33static const uint32_t g_au32LvtValidMasks[] =
34{
35 XAPIC_LVT_TIMER,
36 XAPIC_LVT_THERMAL,
37 XAPIC_LVT_PERF,
38 XAPIC_LVT_LINT0,
39 XAPIC_LVT_LINT1,
40 XAPIC_LVT_ERROR
41};
42#endif
43
44#if 0
45/** @todo CMCI */
46static const uint32_t g_au32LvtExtValidMask[] =
47{
48 XAPIC_LVT_CMCI
49};
50#endif
51
52
53/**
54 * Checks if a vector is set in an APIC 256-bit sparse register.
55 *
56 * @returns true if the specified vector is set, false otherwise.
57 * @param pApicReg The APIC 256-bit spare register.
58 * @param uVector The vector to check if set.
59 */
60DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
61{
62 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
63 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
64}
65
66
67/**
68 * Sets the vector in an APIC 256-bit sparse register.
69 *
70 * @param pApicReg The APIC 256-bit spare register.
71 * @param uVector The vector to set.
72 */
73DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
74{
75 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
76 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
77}
78
79
80/**
81 * Clears the vector in an APIC 256-bit sparse register.
82 *
83 * @param pApicReg The APIC 256-bit spare register.
84 * @param uVector The vector to clear.
85 */
86DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
87{
88 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
89 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
90}
91
92
93/**
94 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
95 *
96 * @returns true if the specified vector is set, false otherwise.
97 * @param pvPib Opaque pointer to the PIB.
98 * @param uVector The vector to check if set.
99 */
100DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
101{
102 return ASMBitTest(pvPib, uVector);
103}
104
105
106/**
107 * Atomically sets the PIB notification bit.
108 *
109 * @returns non-zero if the bit was already set, 0 otherwise.
110 * @param pApicPib Pointer to the PIB.
111 */
112DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
113{
114 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
115}
116
117
118/**
119 * Atomically tests and clears the PIB notification bit.
120 *
121 * @returns non-zero if the bit was already set, 0 otherwise.
122 * @param pApicPib Pointer to the PIB.
123 */
124DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
125{
126 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
127}
128
129
130/**
131 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
132 *
133 * @param pvPib Opaque pointer to the PIB.
134 * @param uVector The vector to set.
135 */
136DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
137{
138 ASMAtomicBitSet(pvPib, uVector);
139}
140
141
142/**
143 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
144 *
145 * @param pvPib Opaque pointer to the PIB.
146 * @param uVector The vector to clear.
147 */
148DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
149{
150 ASMAtomicBitClear(pvPib, uVector);
151}
152
153
154/**
155 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
156 * register.
157 *
158 * @param pApicReg The APIC 256-bit spare register.
159 * @param idxFragment The index of the 32-bit fragment in @a
160 * pApicReg.
161 * @param u32Fragment The 32-bit vector fragment to OR.
162 */
163DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
164{
165 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
166 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
167}
168
169
170/**
171 * Atomically AND's a fragment (32 vectors) into an APIC
172 * 256-bit sparse register.
173 *
174 * @param pApicReg The APIC 256-bit spare register.
175 * @param idxFragment The index of the 32-bit fragment in @a
176 * pApicReg.
177 * @param u32Fragment The 32-bit vector fragment to AND.
178 */
179DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
180{
181 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
182 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
183}
184
185
186/**
187 * Reports and returns appropriate error code for invalid MSR accesses.
188 *
189 * @returns Strict VBox status code.
190 * @retval VINF_CPUM_R3_MSR_WRITE if the MSR write could not be serviced in the
191 * current context (raw-mode or ring-0).
192 * @retval VINF_CPUM_R3_MSR_READ if the MSR read could not be serviced in the
193 * current context (raw-mode or ring-0).
194 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
195 * appropriate actions.
196 *
197 * @param pVCpu The cross context virtual CPU structure.
198 * @param u32Reg The MSR being accessed.
199 * @param enmAccess The invalid-access type.
200 */
201static VBOXSTRICTRC apicMsrAccessError(PVMCPU pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
202{
203 static struct
204 {
205 const char *pszBefore; /* The error message before printing the MSR index */
206 const char *pszAfter; /* The error message after printing the MSR index */
207 int rcR0; /* The ring-0 error code */
208 } const s_aAccess[] =
209 {
210 { "read MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_READ },
211 { "write MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_WRITE },
212 { "read reserved/unknown MSR", "", VINF_CPUM_R3_MSR_READ },
213 { "write reserved/unknown MSR", "", VINF_CPUM_R3_MSR_WRITE },
214 { "read write-only MSR", "", VINF_CPUM_R3_MSR_READ },
215 { "write read-only MSR", "", VINF_CPUM_R3_MSR_WRITE },
216 { "read reserved bits of MSR", "", VINF_CPUM_R3_MSR_READ },
217 { "write reserved bits of MSR", "", VINF_CPUM_R3_MSR_WRITE },
218 { "write an invalid value to MSR", "", VINF_CPUM_R3_MSR_WRITE }
219 };
220 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
221
222 size_t const i = enmAccess;
223 Assert(i < RT_ELEMENTS(s_aAccess));
224#ifdef IN_RING3
225 LogRelMax(5, ("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg,
226 s_aAccess[i].pszAfter));
227 return VERR_CPUM_RAISE_GP_0;
228#else
229 return s_aAccess[i].rcR0;
230#endif
231}
232
233
234/**
235 * Gets the descriptive APIC mode.
236 *
237 * @returns The name.
238 * @param enmMode The xAPIC mode.
239 */
240const char *apicGetModeName(APICMODE enmMode)
241{
242 switch (enmMode)
243 {
244 case APICMODE_DISABLED: return "Disabled";
245 case APICMODE_XAPIC: return "xAPIC";
246 case APICMODE_X2APIC: return "x2APIC";
247 default: break;
248 }
249 return "Invalid";
250}
251
252
253/**
254 * Gets the descriptive destination format name.
255 *
256 * @returns The destination format name.
257 * @param enmDestFormat The destination format.
258 */
259const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
260{
261 switch (enmDestFormat)
262 {
263 case XAPICDESTFORMAT_FLAT: return "Flat";
264 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
265 default: break;
266 }
267 return "Invalid";
268}
269
270
271/**
272 * Gets the descriptive delivery mode name.
273 *
274 * @returns The delivery mode name.
275 * @param enmDeliveryMode The delivery mode.
276 */
277const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
278{
279 switch (enmDeliveryMode)
280 {
281 case XAPICDELIVERYMODE_FIXED: return "Fixed";
282 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
283 case XAPICDELIVERYMODE_SMI: return "SMI";
284 case XAPICDELIVERYMODE_NMI: return "NMI";
285 case XAPICDELIVERYMODE_INIT: return "INIT";
286 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
287 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
288 default: break;
289 }
290 return "Invalid";
291}
292
293
294/**
295 * Gets the descriptive destination mode name.
296 *
297 * @returns The destination mode name.
298 * @param enmDestMode The destination mode.
299 */
300const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
301{
302 switch (enmDestMode)
303 {
304 case XAPICDESTMODE_PHYSICAL: return "Physical";
305 case XAPICDESTMODE_LOGICAL: return "Logical";
306 default: break;
307 }
308 return "Invalid";
309}
310
311
312/**
313 * Gets the descriptive trigger mode name.
314 *
315 * @returns The trigger mode name.
316 * @param enmTriggerMode The trigger mode.
317 */
318const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
319{
320 switch (enmTriggerMode)
321 {
322 case XAPICTRIGGERMODE_EDGE: return "Edge";
323 case XAPICTRIGGERMODE_LEVEL: return "Level";
324 default: break;
325 }
326 return "Invalid";
327}
328
329
330/**
331 * Gets the destination shorthand name.
332 *
333 * @returns The destination shorthand name.
334 * @param enmDestShorthand The destination shorthand.
335 */
336const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
337{
338 switch (enmDestShorthand)
339 {
340 case XAPICDESTSHORTHAND_NONE: return "None";
341 case XAPICDESTSHORTHAND_SELF: return "Self";
342 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
343 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
344 default: break;
345 }
346 return "Invalid";
347}
348
349
350/**
351 * Gets the timer mode name.
352 *
353 * @returns The timer mode name.
354 * @param enmTimerMode The timer mode.
355 */
356const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
357{
358 switch (enmTimerMode)
359 {
360 case XAPICTIMERMODE_ONESHOT: return "One-shot";
361 case XAPICTIMERMODE_PERIODIC: return "Periodic";
362 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
363 default: break;
364 }
365 return "Invalid";
366}
367
368
369/**
370 * Gets the APIC mode given the base MSR value.
371 *
372 * @returns The APIC mode.
373 * @param uApicBaseMsr The APIC Base MSR value.
374 */
375static APICMODE apicGetMode(uint64_t uApicBaseMsr)
376{
377 uint32_t const uMode = MSR_APICBASE_GET_MODE(uApicBaseMsr);
378 APICMODE const enmMode = (APICMODE)uMode;
379#ifdef VBOX_STRICT
380 /* Paranoia. */
381 switch (uMode)
382 {
383 case APICMODE_DISABLED:
384 case APICMODE_INVALID:
385 case APICMODE_XAPIC:
386 case APICMODE_X2APIC:
387 break;
388 default:
389 AssertMsgFailed(("Invalid mode"));
390 }
391#endif
392 return enmMode;
393}
394
395
396/**
397 * Returns whether the APIC is hardware enabled or not.
398 *
399 * @returns true if enabled, false otherwise.
400 */
401DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
402{
403 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
404 return MSR_APICBASE_IS_ENABLED(pApicCpu->uApicBaseMsr);
405}
406
407
408/**
409 * Finds the most significant set bit in an APIC 256-bit sparse register.
410 *
411 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
412 * @param pReg The APIC 256-bit sparse register.
413 * @param rcNotFound What to return when no bit is set.
414 */
415static int apicGetLastSetBit(volatile const XAPIC256BITREG *pReg, int rcNotFound)
416{
417 unsigned const cBitsPerFragment = sizeof(pReg->u[0].u32Reg) * 8;
418 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
419 for (ssize_t i = cFragments - 1; i >= 0; i--)
420 {
421 uint32_t const uFragment = pReg->u[i].u32Reg;
422 if (uFragment)
423 {
424 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
425 --idxSetBit;
426 idxSetBit += (i * cBitsPerFragment);
427 return idxSetBit;
428 }
429 }
430 return rcNotFound;
431}
432
433
434/**
435 * Gets the highest priority pending interrupt.
436 *
437 * @returns true if any interrupt is pending, false otherwise.
438 * @param pVCpu The cross context virtual CPU structure.
439 * @param pu8PendingIntr Where to store the interrupt vector if the
440 * interrupt is pending, optional can be NULL.
441 */
442static bool apicGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
443{
444 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
445 int const irrv = apicGetLastSetBit(&pXApicPage->irr, -1);
446 if (irrv >= 0)
447 {
448 Assert(irrv <= (int)UINT8_MAX);
449 if (pu8PendingIntr)
450 *pu8PendingIntr = (uint8_t)irrv;
451 return true;
452 }
453 return false;
454}
455
456
457/**
458 * Reads a 32-bit register at a specified offset.
459 *
460 * @returns The value at the specified offset.
461 * @param pXApicPage The xAPIC page.
462 * @param offReg The offset of the register being read.
463 */
464DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
465{
466 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
467 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
468 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
469 return uValue;
470}
471
472
473/**
474 * Writes a 32-bit register at a specified offset.
475 *
476 * @param pXApicPage The xAPIC page.
477 * @param offReg The offset of the register being written.
478 * @param uReg The value of the register.
479 */
480DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
481{
482 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
483 uint8_t *pbXApic = (uint8_t *)pXApicPage;
484 *(uint32_t *)(pbXApic + offReg) = uReg;
485}
486
487
488/**
489 * Sets an error in the internal ESR of the specified APIC.
490 *
491 * @param pVCpu The cross context virtual CPU structure.
492 * @param uError The error.
493 * @thread Any.
494 */
495DECLINLINE(void) apicSetError(PVMCPU pVCpu, uint32_t uError)
496{
497 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
498 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
499}
500
501
502/**
503 * Clears all errors in the internal ESR.
504 *
505 * @returns The value of the internal ESR before clearing.
506 * @param pVCpu The cross context virtual CPU structure.
507 */
508DECLINLINE(uint32_t) apicClearAllErrors(PVMCPU pVCpu)
509{
510 VMCPU_ASSERT_EMT(pVCpu);
511 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
512 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
513}
514
515
516/**
517 * Signals the guest if a pending interrupt is ready to be serviced.
518 *
519 * @param pVCpu The cross context virtual CPU structure.
520 */
521static void apicSignalNextPendingIntr(PVMCPU pVCpu)
522{
523 VMCPU_ASSERT_EMT(pVCpu);
524
525 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
526 if (pXApicPage->svr.u.fApicSoftwareEnable)
527 {
528 int const irrv = apicGetLastSetBit(&pXApicPage->irr, -1 /* rcNotFound */);
529 if (irrv >= 0)
530 {
531 Assert(irrv <= (int)UINT8_MAX);
532 uint8_t const uVector = irrv;
533 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
534 if ( !uPpr
535 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
536 {
537 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
538 }
539 }
540 }
541 else
542 APICClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
543}
544
545
546/**
547 * Sets the Spurious-Interrupt Vector Register (SVR).
548 *
549 * @returns Strict VBox status code.
550 * @param pVCpu The cross context virtual CPU structure.
551 * @param uSvr The SVR value.
552 */
553static VBOXSTRICTRC apicSetSvr(PVMCPU pVCpu, uint32_t uSvr)
554{
555 VMCPU_ASSERT_EMT(pVCpu);
556
557 uint32_t uValidMask = XAPIC_SVR;
558 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
559 if (pXApicPage->version.u.fEoiBroadcastSupression)
560 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
561
562 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
563 && (uSvr & ~uValidMask))
564 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
565
566 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
567 if (!pXApicPage->svr.u.fApicSoftwareEnable)
568 {
569 /** @todo CMCI. */
570 pXApicPage->lvt_timer.u.u1Mask = 1;
571#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
572 pXApicPage->lvt_thermal.u.u1Mask = 1;
573#endif
574 pXApicPage->lvt_perf.u.u1Mask = 1;
575 pXApicPage->lvt_lint0.u.u1Mask = 1;
576 pXApicPage->lvt_lint1.u.u1Mask = 1;
577 pXApicPage->lvt_error.u.u1Mask = 1;
578 }
579 return VINF_SUCCESS;
580}
581
582
583/**
584 * Sends an interrupt to one or more APICs.
585 *
586 * @returns Strict VBox status code.
587 * @param pVM The cross context VM structure.
588 * @param pVCpu The cross context virtual CPU structure, can be
589 * NULL if the source of the interrupt is not an
590 * APIC (for e.g. a bus).
591 * @param uVector The interrupt vector.
592 * @param enmTriggerMode The trigger mode.
593 * @param enmDeliveryMode The delivery mode.
594 * @param pDestCpuSet The destination CPU set.
595 * @param rcRZ The return code if the operation cannot be
596 * performed in the current context.
597 */
598static VBOXSTRICTRC apicSendIntr(PVM pVM, PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
599 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, int rcRZ)
600{
601 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
602 VMCPUID const cCpus = pVM->cCpus;
603 switch (enmDeliveryMode)
604 {
605 case XAPICDELIVERYMODE_FIXED:
606 {
607 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
608 {
609 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
610 && apicIsEnabled(&pVM->aCpus[idCpu]))
611 APICPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
612 }
613 break;
614 }
615
616 case XAPICDELIVERYMODE_LOWEST_PRIO:
617 {
618 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
619 if ( idCpu < pVM->cCpus
620 && apicIsEnabled(&pVM->aCpus[idCpu]))
621 APICPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
622 else
623 Log4(("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode!\n"));
624 break;
625 }
626
627 case XAPICDELIVERYMODE_SMI:
628 {
629 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
630 {
631 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
632 {
633 Log4(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
634 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_SMI);
635 }
636 }
637 break;
638 }
639
640 case XAPICDELIVERYMODE_NMI:
641 {
642 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
643 {
644 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
645 && apicIsEnabled(&pVM->aCpus[idCpu]))
646 {
647 Log4(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
648 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_NMI);
649 }
650 }
651 break;
652 }
653
654 case XAPICDELIVERYMODE_INIT:
655 {
656#ifdef IN_RING3
657 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
658 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
659 {
660 Log4(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
661 VMMR3SendInitIpi(pVM, idCpu);
662 }
663#else
664 /* We need to return to ring-3 to deliver the INIT. */
665 rcStrict = rcRZ;
666#endif
667 break;
668 }
669
670 case XAPICDELIVERYMODE_STARTUP:
671 {
672#ifdef IN_RING3
673 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
674 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
675 {
676 Log4(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
677 VMMR3SendStartupIpi(pVM, idCpu, uVector);
678 }
679#else
680 /* We need to return to ring-3 to deliver the SIPI. */
681 rcStrict = rcRZ;
682#endif
683 break;
684 }
685
686 case XAPICDELIVERYMODE_EXTINT:
687 {
688 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
689 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
690 {
691 Log4(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
692 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_EXTINT);
693 }
694 break;
695 }
696
697 default:
698 {
699 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
700 apicGetDeliveryModeName(enmDeliveryMode)));
701 break;
702 }
703 }
704
705 /*
706 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
707 * interrupt is being sent by an APIC.
708 *
709 * The 'receive illegal vector' will be set on the target APIC when the interrupt
710 * gets generated, see APICPostInterrupt().
711 *
712 * See Intel spec. 10.5.3 "Error Handling".
713 */
714 if ( rcStrict != rcRZ
715 && pVCpu)
716 {
717 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
718 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
719 }
720 return rcStrict;
721}
722
723
724/**
725 * Checks if this APIC belongs to a logical destination.
726 *
727 * @returns true if the APIC belongs to the logical
728 * destination, false otherwise.
729 * @param pVCpu The cross context virtual CPU structure.
730 * @param fDest The destination mask.
731 *
732 * @thread Any.
733 */
734static bool apicIsLogicalDest(PVMCPU pVCpu, uint32_t fDest)
735{
736 if (XAPIC_IN_X2APIC_MODE(pVCpu))
737 {
738 /*
739 * Flat logical mode is not supported in x2APIC mode.
740 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
741 * - High 16 bits is the cluster ID.
742 * - Low 16 bits: each bit represents a unique APIC within the cluster.
743 */
744 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
745 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
746 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
747 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
748 return false;
749 }
750
751#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
752 /*
753 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
754 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
755 */
756 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
757 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
758 return true;
759
760 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
761 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
762 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
763 {
764 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
765 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
766 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
767 }
768
769 /*
770 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
771 * - High 4 bits is the cluster ID.
772 * - Low 4 bits: each bit represents a unique APIC within the cluster.
773 */
774 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
775 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
776 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
777 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
778 return false;
779#else
780# error "Implement Pentium and P6 family APIC architectures"
781#endif
782}
783
784
785/**
786 * Figures out the set of destination CPUs for a given destination mode, format
787 * and delivery mode setting.
788 *
789 * @param pVM The cross context VM structure.
790 * @param fDestMask The destination mask.
791 * @param fBroadcastMask The broadcast mask.
792 * @param enmDestMode The destination mode.
793 * @param enmDeliveryMode The delivery mode.
794 * @param pDestCpuSet The destination CPU set to update.
795 */
796static void apicGetDestCpuSet(PVM pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
797 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
798{
799 VMCPUSET_EMPTY(pDestCpuSet);
800
801 /*
802 * Physical destination mode only supports either a broadcast or a single target.
803 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
804 * as a regular broadcast like in fixed delivery mode.
805 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
806 * to the target like in fixed delivery mode.
807 *
808 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
809 */
810 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
811 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
812 {
813 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
814 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
815 }
816
817 uint32_t const cCpus = pVM->cCpus;
818 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
819 {
820 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
821#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
822 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
823 uint8_t u8LowestTpr = UINT8_C(0xff);
824 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
825 {
826 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
827 if (apicIsLogicalDest(pVCpuDest, fDestMask))
828 {
829 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
830 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
831
832 /*
833 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
834 * Hence the use of "<=" in the check below.
835 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
836 */
837 if (u8Tpr <= u8LowestTpr)
838 {
839 u8LowestTpr = u8Tpr;
840 idCpuLowestTpr = idCpu;
841 }
842 }
843 }
844 if (idCpuLowestTpr != NIL_VMCPUID)
845 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
846#else
847# error "Implement Pentium and P6 family APIC architectures"
848#endif
849 return;
850 }
851
852 /*
853 * x2APIC:
854 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
855 * xAPIC:
856 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
857 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
858 *
859 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
860 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
861 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
862 */
863 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
864 {
865 VMCPUSET_FILL(pDestCpuSet);
866 return;
867 }
868
869 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
870 {
871 /* The destination mask is interpreted as the physical APIC ID of a single target. */
872#if 1
873 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
874 if (RT_LIKELY(fDestMask < cCpus))
875 VMCPUSET_ADD(pDestCpuSet, fDestMask);
876#else
877 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
878 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
879 {
880 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
881 if (XAPIC_IN_X2APIC_MODE(pVCpuDest))
882 {
883 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDest);
884 if (pX2ApicPage->id.u32ApicId == fDestMask)
885 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
886 }
887 else
888 {
889 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
890 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
891 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
892 }
893 }
894#endif
895 }
896 else
897 {
898 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
899
900 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
901 if (RT_UNLIKELY(!fDestMask))
902 return;
903
904 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
905 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
906 {
907 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
908 if (apicIsLogicalDest(pVCpuDest, fDestMask))
909 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
910 }
911 }
912}
913
914
915/**
916 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
917 * Command Register (ICR).
918 *
919 * @returns VBox status code.
920 * @param pVCpu The cross context virtual CPU structure.
921 * @param rcRZ The return code if the operation cannot be
922 * performed in the current context.
923 */
924static VBOXSTRICTRC apicSendIpi(PVMCPU pVCpu, int rcRZ)
925{
926 VMCPU_ASSERT_EMT(pVCpu);
927
928 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
929 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
930 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
931 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
932 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
933 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
934 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
935
936 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
937 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
938
939#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
940 /*
941 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
942 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
943 */
944 if (RT_UNLIKELY( enmDeliveryMode == XAPICDELIVERYMODE_INIT_LEVEL_DEASSERT
945 && enmInitLevel == XAPICINITLEVEL_DEASSERT
946 && enmTriggerMode == XAPICTRIGGERMODE_LEVEL))
947 {
948 Log4(("APIC%u: INIT level de-assert unsupported, ignoring!\n", pVCpu->idCpu));
949 return VINF_SUCCESS;
950 }
951#else
952# error "Implement Pentium and P6 family APIC architectures"
953#endif
954
955 /*
956 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
957 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
958 */
959 VMCPUSET DestCpuSet;
960 switch (enmDestShorthand)
961 {
962 case XAPICDESTSHORTHAND_NONE:
963 {
964 PVM pVM = pVCpu->CTX_SUFF(pVM);
965 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
966 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
967 break;
968 }
969
970 case XAPICDESTSHORTHAND_SELF:
971 {
972 VMCPUSET_EMPTY(&DestCpuSet);
973 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
974 break;
975 }
976
977 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
978 {
979 VMCPUSET_FILL(&DestCpuSet);
980 break;
981 }
982
983 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
984 {
985 VMCPUSET_FILL(&DestCpuSet);
986 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
987 break;
988 }
989 }
990
991 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet, rcRZ);
992}
993
994
995/**
996 * Sets the Interrupt Command Register (ICR) high dword.
997 *
998 * @returns Strict VBox status code.
999 * @param pVCpu The cross context virtual CPU structure.
1000 * @param uIcrHi The ICR high dword.
1001 */
1002static VBOXSTRICTRC apicSetIcrHi(PVMCPU pVCpu, uint32_t uIcrHi)
1003{
1004 VMCPU_ASSERT_EMT(pVCpu);
1005 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1006
1007 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1008 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1009 Log4(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1010
1011 return VINF_SUCCESS;
1012}
1013
1014
1015/**
1016 * Sets the Interrupt Command Register (ICR) low dword.
1017 *
1018 * @returns Strict VBox status code.
1019 * @param pVCpu The cross context virtual CPU structure.
1020 * @param uIcrLo The ICR low dword.
1021 * @param rcRZ The return code if the operation cannot be performed
1022 * in the current context.
1023 */
1024static VBOXSTRICTRC apicSetIcrLo(PVMCPU pVCpu, uint32_t uIcrLo, int rcRZ)
1025{
1026 VMCPU_ASSERT_EMT(pVCpu);
1027
1028 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1029 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR;
1030 Log4(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1031
1032 return apicSendIpi(pVCpu, rcRZ);
1033}
1034
1035
1036/**
1037 * Sets the Interrupt Command Register (ICR).
1038 *
1039 * @returns Strict VBox status code.
1040 * @param pVCpu The cross context virtual CPU structure.
1041 * @param u64Icr The ICR (High and Low combined).
1042 * @param rcRZ The return code if the operation cannot be performed
1043 * in the current context.
1044 */
1045static VBOXSTRICTRC apicSetIcr(PVMCPU pVCpu, uint64_t u64Icr, int rcRZ)
1046{
1047 VMCPU_ASSERT_EMT(pVCpu);
1048 Assert(XAPIC_IN_X2APIC_MODE(pVCpu));
1049
1050 /* Validate. */
1051 uint32_t const uLo = RT_LO_U32(u64Icr);
1052 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR)))
1053 {
1054 /* Update high dword first, then update the low dword which sends the IPI. */
1055 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1056 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1057 return apicSetIcrLo(pVCpu, uLo, rcRZ);
1058 }
1059 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1060}
1061
1062
1063/**
1064 * Sets the Error Status Register (ESR).
1065 *
1066 * @returns Strict VBox status code.
1067 * @param pVCpu The cross context virtual CPU structure.
1068 * @param uEsr The ESR value.
1069 */
1070static VBOXSTRICTRC apicSetEsr(PVMCPU pVCpu, uint32_t uEsr)
1071{
1072 VMCPU_ASSERT_EMT(pVCpu);
1073
1074 Log4(("APIC%u: apicSetEr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1075
1076 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1077 && (uEsr & ~XAPIC_ESR_WO))
1078 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1079
1080 /*
1081 * Writes to the ESR causes the internal state to be updated in the register,
1082 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1083 */
1084 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1085 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1086 return VINF_SUCCESS;
1087}
1088
1089
1090/**
1091 * Updates the Processor Priority Register (PPR).
1092 *
1093 * @param pVCpu The cross context virtual CPU structure.
1094 */
1095static void apicUpdatePpr(PVMCPU pVCpu)
1096{
1097 VMCPU_ASSERT_EMT(pVCpu);
1098
1099 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1100 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1101 uint8_t const uIsrv = apicGetLastSetBit(&pXApicPage->isr, 0 /* rcNotFound */);
1102 uint8_t uPpr;
1103 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1104 uPpr = pXApicPage->tpr.u8Tpr;
1105 else
1106 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1107 pXApicPage->ppr.u8Ppr = uPpr;
1108}
1109
1110
1111/**
1112 * Gets the Processor Priority Register (PPR).
1113 *
1114 * @returns The PPR value.
1115 * @param pVCpu The cross context virtual CPU structure.
1116 */
1117static uint8_t apicGetPpr(PVMCPU pVCpu)
1118{
1119 VMCPU_ASSERT_EMT(pVCpu);
1120
1121 /*
1122 * With virtualized APIC registers or with TPR virtualization, the hardware may
1123 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1124 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1125 */
1126 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1127 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1128 apicUpdatePpr(pVCpu);
1129 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1130 return pXApicPage->ppr.u8Ppr;
1131}
1132
1133
1134/**
1135 * Sets the Task Priority Register (TPR).
1136 *
1137 * @returns Strict VBox status code.
1138 * @param pVCpu The cross context virtual CPU structure.
1139 * @param uTpr The TPR value.
1140 */
1141static VBOXSTRICTRC apicSetTpr(PVMCPU pVCpu, uint32_t uTpr)
1142{
1143 VMCPU_ASSERT_EMT(pVCpu);
1144
1145 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1146 && (uTpr & ~XAPIC_TPR))
1147 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1148
1149 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1150 pXApicPage->tpr.u8Tpr = uTpr;
1151 apicUpdatePpr(pVCpu);
1152 apicSignalNextPendingIntr(pVCpu);
1153 return VINF_SUCCESS;
1154}
1155
1156
1157/**
1158 * Sets the End-Of-Interrupt (EOI) register.
1159 *
1160 * @returns Strict VBox status code.
1161 * @param pVCpu The cross context virtual CPU structure.
1162 * @param uEoi The EOI value.
1163 */
1164static VBOXSTRICTRC apicSetEoi(PVMCPU pVCpu, uint32_t uEoi)
1165{
1166 VMCPU_ASSERT_EMT(pVCpu);
1167
1168 Log4(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1169
1170 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1171 && (uEoi & ~XAPIC_EOI_WO))
1172 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1173
1174 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1175 int isrv = apicGetLastSetBit(&pXApicPage->isr, -1 /* rcNotFound */);
1176 if (isrv >= 0)
1177 {
1178 /*
1179 * Dispensing the spurious-interrupt vector does not affect the ISR.
1180 * See Intel spec. 10.9 "Spurious Interrupt".
1181 */
1182 uint8_t const uVector = isrv;
1183 if (uVector != pXApicPage->svr.u.u8SpuriousVector)
1184 {
1185 apicClearVectorInReg(&pXApicPage->isr, uVector);
1186 apicUpdatePpr(pVCpu);
1187 bool fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1188 if (fLevelTriggered)
1189 {
1190 /** @todo We need to broadcast EOI to IO APICs here. */
1191 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1192 }
1193
1194 Log4(("APIC%u: apicSetEoi: Acknowledged %s triggered interrupt. uVector=%#x\n", pVCpu->idCpu,
1195 fLevelTriggered ? "level" : "edge", uVector));
1196
1197 apicSignalNextPendingIntr(pVCpu);
1198 }
1199 }
1200
1201 return VINF_SUCCESS;
1202}
1203
1204
1205/**
1206 * Sets the Logical Destination Register (LDR).
1207 *
1208 * @returns Strict VBox status code.
1209 * @param pVCpu The cross context virtual CPU structure.
1210 * @param uLdr The LDR value.
1211 *
1212 * @remarks LDR is read-only in x2APIC mode.
1213 */
1214static VBOXSTRICTRC apicSetLdr(PVMCPU pVCpu, uint32_t uLdr)
1215{
1216 VMCPU_ASSERT_EMT(pVCpu);
1217 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1218
1219 Log4(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1220
1221 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1222 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR);
1223 return VINF_SUCCESS;
1224}
1225
1226
1227/**
1228 * Sets the Destination Format Register (DFR).
1229 *
1230 * @returns Strict VBox status code.
1231 * @param pVCpu The cross context virtual CPU structure.
1232 * @param uDfr The DFR value.
1233 *
1234 * @remarks DFR is not available in x2APIC mode.
1235 */
1236static VBOXSTRICTRC apicSetDfr(PVMCPU pVCpu, uint32_t uDfr)
1237{
1238 VMCPU_ASSERT_EMT(pVCpu);
1239 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1240
1241 uDfr &= XAPIC_DFR;
1242 uDfr |= XAPIC_DFR_RSVD_MB1;
1243
1244 Log4(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1245
1246 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1247 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Sets the Timer Divide Configuration Register (DCR).
1254 *
1255 * @returns Strict VBox status code.
1256 * @param pVCpu The cross context virtual CPU structure.
1257 * @param uTimerDcr The timer DCR value.
1258 */
1259static VBOXSTRICTRC apicSetTimerDcr(PVMCPU pVCpu, uint32_t uTimerDcr)
1260{
1261 VMCPU_ASSERT_EMT(pVCpu);
1262 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1263 && (uTimerDcr & ~XAPIC_TIMER_DCR))
1264 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1265
1266 Log4(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1267
1268 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1269 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1270 return VINF_SUCCESS;
1271}
1272
1273
1274/**
1275 * Gets the timer's Current Count Register (CCR).
1276 *
1277 * @returns VBox status code.
1278 * @param pVCpu The cross context virtual CPU structure.
1279 * @param rcBusy The busy return code for the timer critical section.
1280 * @param puValue Where to store the LVT timer CCR.
1281 */
1282static VBOXSTRICTRC apicGetTimerCcr(PVMCPU pVCpu, int rcBusy, uint32_t *puValue)
1283{
1284 VMCPU_ASSERT_EMT(pVCpu);
1285 Assert(puValue);
1286
1287 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1288 *puValue = 0;
1289
1290 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1291 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1292 return VINF_SUCCESS;
1293
1294 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1295 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1296 if (!uInitialCount)
1297 return VINF_SUCCESS;
1298
1299 /*
1300 * Reading the virtual-sync clock requires locking its timer because it's not
1301 * a simple atomic operation, see tmVirtualSyncGetEx().
1302 *
1303 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1304 */
1305 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1306 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1307
1308 int rc = TMTimerLock(pTimer, rcBusy);
1309 if (rc == VINF_SUCCESS)
1310 {
1311 /* If the current-count register is 0, it implies the timer expired. */
1312 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1313 if (uCurrentCount)
1314 {
1315 uint64_t const cTicksElapsed = TMTimerGet(pApicCpu->CTX_SUFF(pTimer)) - pApicCpu->u64TimerInitial;
1316 TMTimerUnlock(pTimer);
1317 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1318 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1319 if (uInitialCount > uDelta)
1320 *puValue = uInitialCount - uDelta;
1321 }
1322 else
1323 TMTimerUnlock(pTimer);
1324 }
1325 return rc;
1326}
1327
1328
1329/**
1330 * Sets the timer's Initial-Count Register (ICR).
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pVCpu The cross context virtual CPU structure.
1334 * @param rcBusy The busy return code for the timer critical section.
1335 * @param uInitialCount The timer ICR.
1336 */
1337static VBOXSTRICTRC apicSetTimerIcr(PVMCPU pVCpu, int rcBusy, uint32_t uInitialCount)
1338{
1339 VMCPU_ASSERT_EMT(pVCpu);
1340
1341 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1342 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1343 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1344 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1345
1346 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1347 if ( pApic->fSupportsTscDeadline
1348 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1349 return VINF_SUCCESS;
1350
1351 Log4(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1352
1353 /*
1354 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1355 * so obtain the lock -before- updating it here to be consistent with the
1356 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1357 */
1358 int rc = TMTimerLock(pTimer, rcBusy);
1359 if (rc == VINF_SUCCESS)
1360 {
1361 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1362 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1363 if (uInitialCount)
1364 APICStartTimer(pApicCpu, uInitialCount);
1365 else
1366 APICStopTimer(pApicCpu);
1367 TMTimerUnlock(pTimer);
1368 }
1369 return rc;
1370}
1371
1372
1373/**
1374 * Sets an LVT entry.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pVCpu The cross context virtual CPU structure.
1378 * @param offLvt The LVT entry offset in the xAPIC page.
1379 * @param uLvt The LVT value to set.
1380 */
1381static VBOXSTRICTRC apicSetLvtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1382{
1383 VMCPU_ASSERT_EMT(pVCpu);
1384
1385#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1386 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1387 || offLvt == XAPIC_OFF_LVT_THERMAL
1388 || offLvt == XAPIC_OFF_LVT_PERF
1389 || offLvt == XAPIC_OFF_LVT_LINT0
1390 || offLvt == XAPIC_OFF_LVT_LINT1
1391 || offLvt == XAPIC_OFF_LVT_ERROR,
1392 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1393
1394 /*
1395 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1396 * and raise #GP(0) in x2APIC mode.
1397 */
1398 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1399 if (offLvt == XAPIC_OFF_LVT_TIMER)
1400 {
1401 if ( !pApic->fSupportsTscDeadline
1402 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1403 {
1404 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1405 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1406 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1407 /** @todo TSC-deadline timer mode transition */
1408 }
1409 }
1410
1411 /*
1412 * Validate rest of the LVT bits.
1413 */
1414 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1415 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1416
1417 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1418 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1419 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1420
1421 uLvt &= g_au32LvtValidMasks[idxLvt];
1422
1423 /*
1424 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1425 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1426 */
1427 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1428 AssertCompile(RT_OFFSETOF(XAPICPAGE, svr) == RT_OFFSETOF(X2APICPAGE, svr));
1429 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1430 uLvt |= XAPIC_LVT_MASK;
1431
1432 /*
1433 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1434 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signaling the
1435 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1436 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1437 * the interrupt for the vector happens to be generated, see APICPostInterrupt().
1438 *
1439 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1440 */
1441 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1442 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1443 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1444
1445 Log4(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1446
1447 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1448 return VINF_SUCCESS;
1449#else
1450# error "Implement Pentium and P6 family APIC architectures"
1451#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1452}
1453
1454
1455#if 0
1456/**
1457 * Sets an LVT entry in the extended LVT range.
1458 *
1459 * @returns VBox status code.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 * @param offLvt The LVT entry offset in the xAPIC page.
1462 * @param uValue The LVT value to set.
1463 */
1464static int apicSetLvtExtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1465{
1466 VMCPU_ASSERT_EMT(pVCpu);
1467 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1468
1469 /** @todo support CMCI. */
1470 return VERR_NOT_IMPLEMENTED;
1471}
1472#endif
1473
1474
1475/**
1476 * Hints TM about the APIC timer frequency.
1477 *
1478 * @param pApicCpu The APIC CPU state.
1479 * @param uInitialCount The new initial count.
1480 * @param uTimerShift The new timer shift.
1481 * @thread Any.
1482 */
1483void apicHintTimerFreq(PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1484{
1485 Assert(pApicCpu);
1486
1487 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1488 || pApicCpu->uHintedTimerShift != uTimerShift)
1489 {
1490 uint32_t uHz;
1491 if (uInitialCount)
1492 {
1493 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1494 uHz = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer)) / cTicksPerPeriod;
1495 }
1496 else
1497 uHz = 0;
1498
1499 TMTimerSetFrequencyHint(pApicCpu->CTX_SUFF(pTimer), uHz);
1500 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1501 pApicCpu->uHintedTimerShift = uTimerShift;
1502 }
1503}
1504
1505
1506/**
1507 * Reads an APIC register.
1508 *
1509 * @returns VBox status code.
1510 * @param pApicDev The APIC device instance.
1511 * @param pVCpu The cross context virtual CPU structure.
1512 * @param offReg The offset of the register being read.
1513 * @param puValue Where to store the register value.
1514 */
1515static int apicReadRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t *puValue)
1516{
1517 VMCPU_ASSERT_EMT(pVCpu);
1518 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1519
1520 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1521 uint32_t uValue = 0;
1522 int rc = VINF_SUCCESS;
1523 switch (offReg)
1524 {
1525 case XAPIC_OFF_ID:
1526 case XAPIC_OFF_VERSION:
1527 case XAPIC_OFF_TPR:
1528 case XAPIC_OFF_EOI:
1529 case XAPIC_OFF_RRD:
1530 case XAPIC_OFF_LDR:
1531 case XAPIC_OFF_DFR:
1532 case XAPIC_OFF_SVR:
1533 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1534 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1535 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1536 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1537 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1538 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1539 case XAPIC_OFF_ESR:
1540 case XAPIC_OFF_ICR_LO:
1541 case XAPIC_OFF_ICR_HI:
1542 case XAPIC_OFF_LVT_TIMER:
1543#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1544 case XAPIC_OFF_LVT_THERMAL:
1545#endif
1546 case XAPIC_OFF_LVT_PERF:
1547 case XAPIC_OFF_LVT_LINT0:
1548 case XAPIC_OFF_LVT_LINT1:
1549 case XAPIC_OFF_LVT_ERROR:
1550 case XAPIC_OFF_TIMER_ICR:
1551 case XAPIC_OFF_TIMER_DCR:
1552 {
1553 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1554 || ( offReg != XAPIC_OFF_DFR
1555 && offReg != XAPIC_OFF_ICR_HI
1556 && offReg != XAPIC_OFF_EOI));
1557 uValue = apicReadRaw32(pXApicPage, offReg);
1558 break;
1559 }
1560
1561 case XAPIC_OFF_PPR:
1562 {
1563 uValue = apicGetPpr(pVCpu);
1564 break;
1565 }
1566
1567 case XAPIC_OFF_TIMER_CCR:
1568 {
1569 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1570 rc = VBOXSTRICTRC_VAL(apicGetTimerCcr(pVCpu, VINF_IOM_R3_MMIO_READ, &uValue));
1571 break;
1572 }
1573
1574 case XAPIC_OFF_APR:
1575 {
1576#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1577 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1578 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1579#else
1580# error "Implement Pentium and P6 family APIC architectures"
1581#endif
1582 break;
1583 }
1584
1585 default:
1586 {
1587 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1588 rc = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu, offReg);
1589 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1590 break;
1591 }
1592 }
1593
1594 *puValue = uValue;
1595 return rc;
1596}
1597
1598
1599/**
1600 * Writes an APIC register.
1601 *
1602 * @returns Strict VBox status code.
1603 * @param pApicDev The APIC device instance.
1604 * @param pVCpu The cross context virtual CPU structure.
1605 * @param offReg The offset of the register being written.
1606 * @param uValue The register value.
1607 */
1608static VBOXSTRICTRC apicWriteRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t uValue)
1609{
1610 VMCPU_ASSERT_EMT(pVCpu);
1611 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1612 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1613
1614 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1615 switch (offReg)
1616 {
1617 case XAPIC_OFF_TPR:
1618 {
1619 rcStrict = apicSetTpr(pVCpu, uValue);
1620 break;
1621 }
1622
1623 case XAPIC_OFF_LVT_TIMER:
1624#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1625 case XAPIC_OFF_LVT_THERMAL:
1626#endif
1627 case XAPIC_OFF_LVT_PERF:
1628 case XAPIC_OFF_LVT_LINT0:
1629 case XAPIC_OFF_LVT_LINT1:
1630 case XAPIC_OFF_LVT_ERROR:
1631 {
1632 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1633 break;
1634 }
1635
1636 case XAPIC_OFF_TIMER_ICR:
1637 {
1638 rcStrict = apicSetTimerIcr(pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1639 break;
1640 }
1641
1642 case XAPIC_OFF_EOI:
1643 {
1644 rcStrict = apicSetEoi(pVCpu, uValue);
1645 break;
1646 }
1647
1648 case XAPIC_OFF_LDR:
1649 {
1650 rcStrict = apicSetLdr(pVCpu, uValue);
1651 break;
1652 }
1653
1654 case XAPIC_OFF_DFR:
1655 {
1656 rcStrict = apicSetDfr(pVCpu, uValue);
1657 break;
1658 }
1659
1660 case XAPIC_OFF_SVR:
1661 {
1662 rcStrict = apicSetSvr(pVCpu, uValue);
1663 break;
1664 }
1665
1666 case XAPIC_OFF_ICR_LO:
1667 {
1668 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE);
1669 break;
1670 }
1671
1672 case XAPIC_OFF_ICR_HI:
1673 {
1674 rcStrict = apicSetIcrHi(pVCpu, uValue);
1675 break;
1676 }
1677
1678 case XAPIC_OFF_TIMER_DCR:
1679 {
1680 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1681 break;
1682 }
1683
1684 case XAPIC_OFF_ESR:
1685 {
1686 rcStrict = apicSetEsr(pVCpu, uValue);
1687 break;
1688 }
1689
1690 case XAPIC_OFF_APR:
1691 case XAPIC_OFF_RRD:
1692 {
1693#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1694 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1695#else
1696# error "Implement Pentium and P6 family APIC architectures"
1697#endif
1698 break;
1699 }
1700
1701 /* Read-only, write ignored: */
1702 case XAPIC_OFF_VERSION:
1703 case XAPIC_OFF_ID:
1704 break;
1705
1706 /* Unavailable/reserved in xAPIC mode: */
1707 case X2APIC_OFF_SELF_IPI:
1708 /* Read-only registers: */
1709 case XAPIC_OFF_PPR:
1710 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1711 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1712 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1713 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1714 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1715 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1716 case XAPIC_OFF_TIMER_CCR:
1717 default:
1718 {
1719 rcStrict = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu,
1720 offReg);
1721 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1722 break;
1723 }
1724 }
1725
1726 return rcStrict;
1727}
1728
1729
1730/**
1731 * @interface_method_impl{PDMAPICREG,pfnReadMsrR3}
1732 */
1733VMMDECL(VBOXSTRICTRC) APICReadMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1734{
1735 /*
1736 * Validate.
1737 */
1738 VMCPU_ASSERT_EMT(pVCpu);
1739 Assert(u32Reg >= MSR_IA32_X2APIC_START && u32Reg <= MSR_IA32_X2APIC_END);
1740 Assert(pu64Value);
1741
1742 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1743 if (pApic->fRZEnabled)
1744 { /* likely */}
1745 else
1746 return VINF_CPUM_R3_MSR_READ;
1747
1748 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF(StatMsrRead));
1749
1750 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1751 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1752 {
1753 switch (u32Reg)
1754 {
1755 /* Special handling for x2APIC: */
1756 case MSR_IA32_X2APIC_ICR:
1757 {
1758 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1759 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1760 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1761 *pu64Value = RT_MAKE_U64(uLo, uHi);
1762 break;
1763 }
1764
1765 /* Special handling, compatible with xAPIC: */
1766 case MSR_IA32_X2APIC_TIMER_CCR:
1767 {
1768 uint32_t uValue;
1769 rcStrict = apicGetTimerCcr(pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1770 *pu64Value = uValue;
1771 break;
1772 }
1773
1774 /* Special handling, compatible with xAPIC: */
1775 case MSR_IA32_X2APIC_PPR:
1776 {
1777 *pu64Value = apicGetPpr(pVCpu);
1778 break;
1779 }
1780
1781 /* Raw read, compatible with xAPIC: */
1782 case MSR_IA32_X2APIC_ID:
1783 case MSR_IA32_X2APIC_VERSION:
1784 case MSR_IA32_X2APIC_TPR:
1785 case MSR_IA32_X2APIC_LDR:
1786 case MSR_IA32_X2APIC_SVR:
1787 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1788 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1789 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1790 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1791 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1792 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1793 case MSR_IA32_X2APIC_ESR:
1794 case MSR_IA32_X2APIC_LVT_TIMER:
1795 case MSR_IA32_X2APIC_LVT_THERMAL:
1796 case MSR_IA32_X2APIC_LVT_PERF:
1797 case MSR_IA32_X2APIC_LVT_LINT0:
1798 case MSR_IA32_X2APIC_LVT_LINT1:
1799 case MSR_IA32_X2APIC_LVT_ERROR:
1800 case MSR_IA32_X2APIC_TIMER_ICR:
1801 case MSR_IA32_X2APIC_TIMER_DCR:
1802 {
1803 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1804 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1805 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1806 break;
1807 }
1808
1809 /* Write-only MSRs: */
1810 case MSR_IA32_X2APIC_SELF_IPI:
1811 case MSR_IA32_X2APIC_EOI:
1812 {
1813 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1814 break;
1815 }
1816
1817 /* Reserved MSRs: */
1818 case MSR_IA32_X2APIC_LVT_CMCI:
1819 default:
1820 {
1821 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1822 break;
1823 }
1824 }
1825 }
1826 else
1827 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1828
1829 return rcStrict;
1830}
1831
1832
1833/**
1834 * @interface_method_impl{PDMAPICREG,pfnWriteMsrR3}
1835 */
1836VMMDECL(VBOXSTRICTRC) APICWriteMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t u64Value)
1837{
1838 /*
1839 * Validate.
1840 */
1841 VMCPU_ASSERT_EMT(pVCpu);
1842 Assert(u32Reg >= MSR_IA32_X2APIC_START && u32Reg <= MSR_IA32_X2APIC_END);
1843
1844 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1845 if (pApic->fRZEnabled)
1846 { /* likely */ }
1847 else
1848 return VINF_CPUM_R3_MSR_WRITE;
1849
1850 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF(StatMsrWrite));
1851
1852 /*
1853 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
1854 * accesses where they are ignored. Hence, we need to validate each register before
1855 * invoking the generic/xAPIC write functions.
1856 *
1857 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
1858 * case first and handle validating the remaining bits on a per-register basis.
1859 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
1860 */
1861 if ( u32Reg != MSR_IA32_X2APIC_ICR
1862 && RT_HI_U32(u64Value))
1863 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
1864
1865 uint32_t u32Value = RT_LO_U32(u64Value);
1866 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1867 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1868 {
1869 switch (u32Reg)
1870 {
1871 case MSR_IA32_X2APIC_TPR:
1872 {
1873 rcStrict = apicSetTpr(pVCpu, u32Value);
1874 break;
1875 }
1876
1877 case MSR_IA32_X2APIC_ICR:
1878 {
1879 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
1880 break;
1881 }
1882
1883 case MSR_IA32_X2APIC_SVR:
1884 {
1885 rcStrict = apicSetSvr(pVCpu, u32Value);
1886 break;
1887 }
1888
1889 case MSR_IA32_X2APIC_ESR:
1890 {
1891 rcStrict = apicSetEsr(pVCpu, u32Value);
1892 break;
1893 }
1894
1895 case MSR_IA32_X2APIC_TIMER_DCR:
1896 {
1897 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
1898 break;
1899 }
1900
1901 case MSR_IA32_X2APIC_LVT_TIMER:
1902 case MSR_IA32_X2APIC_LVT_THERMAL:
1903 case MSR_IA32_X2APIC_LVT_PERF:
1904 case MSR_IA32_X2APIC_LVT_LINT0:
1905 case MSR_IA32_X2APIC_LVT_LINT1:
1906 case MSR_IA32_X2APIC_LVT_ERROR:
1907 {
1908 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
1909 break;
1910 }
1911
1912 case MSR_IA32_X2APIC_TIMER_ICR:
1913 {
1914 rcStrict = apicSetTimerIcr(pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
1915 break;
1916 }
1917
1918 /* Write-only MSRs: */
1919 case MSR_IA32_X2APIC_SELF_IPI:
1920 {
1921 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
1922 APICPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE);
1923 rcStrict = VINF_SUCCESS;
1924 break;
1925 }
1926
1927 case MSR_IA32_X2APIC_EOI:
1928 {
1929 rcStrict = apicSetEoi(pVCpu, u32Value);
1930 break;
1931 }
1932
1933 /* Read-only MSRs: */
1934 case MSR_IA32_X2APIC_ID:
1935 case MSR_IA32_X2APIC_VERSION:
1936 case MSR_IA32_X2APIC_PPR:
1937 case MSR_IA32_X2APIC_LDR:
1938 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1939 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1940 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1941 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1942 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1943 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1944 case MSR_IA32_X2APIC_TIMER_CCR:
1945 {
1946 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
1947 break;
1948 }
1949
1950 /* Reserved MSRs: */
1951 case MSR_IA32_X2APIC_LVT_CMCI:
1952 default:
1953 {
1954 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
1955 break;
1956 }
1957 }
1958 }
1959 else
1960 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
1961
1962 return rcStrict;
1963}
1964
1965
1966/**
1967 * @interface_method_impl{PDMAPICREG,pfnSetBaseMsrR3}
1968 */
1969VMMDECL(VBOXSTRICTRC) APICSetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint64_t u64BaseMsr)
1970{
1971 Assert(pVCpu);
1972 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1973 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1974 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
1975 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
1976 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
1977
1978 /** @todo probably go back to ring-3 for all cases regardless of
1979 * fRZEnabled. Writing this MSR is not something guests
1980 * typically do often, and therefore is not performance
1981 * critical. We'll have better diagnostics in ring-3. */
1982 if (!pApic->fRZEnabled)
1983 return VINF_CPUM_R3_MSR_WRITE;
1984
1985 /*
1986 * We do not support re-mapping the APIC base address because:
1987 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
1988 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
1989 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
1990 * region remains mapped but doesn't belong to the called VCPU's APIC).
1991 */
1992 /** @todo Handle per-VCPU APIC base relocation. */
1993 if (MSR_APICBASE_GET_PHYSADDR(uBaseMsr) != XAPIC_APICBASE_PHYSADDR)
1994 {
1995#ifdef IN_RING3
1996 LogRelMax(5, ("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
1997 MSR_APICBASE_GET_PHYSADDR(uBaseMsr)));
1998 return VERR_CPUM_RAISE_GP_0;
1999#else
2000 return VINF_CPUM_R3_MSR_WRITE;
2001#endif
2002 }
2003
2004 /*
2005 * Act on state transition.
2006 */
2007 /** @todo We need to update the CPUID according to the state, which we
2008 * currently don't do as CPUMSetGuestCpuIdFeature() is setting
2009 * per-VM CPUID bits while we need per-VCPU specific bits. */
2010 if (enmNewMode != enmOldMode)
2011 {
2012 switch (enmNewMode)
2013 {
2014 case APICMODE_DISABLED:
2015 {
2016#ifdef IN_RING3
2017 /*
2018 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2019 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2020 *
2021 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2022 */
2023 APICR3Reset(pVCpu);
2024 uBaseMsr &= ~(MSR_APICBASE_XAPIC_ENABLE_BIT | MSR_APICBASE_X2APIC_ENABLE_BIT);
2025 Log4(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2026#else
2027 return VINF_CPUM_R3_MSR_WRITE;
2028#endif
2029 break;
2030 }
2031
2032 case APICMODE_XAPIC:
2033 {
2034 if (enmOldMode != APICMODE_DISABLED)
2035 {
2036 Log(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2037 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2038 }
2039 uBaseMsr |= MSR_APICBASE_XAPIC_ENABLE_BIT;
2040 Log4(("APIC%u: Switched mode to xApic\n", pVCpu->idCpu));
2041 break;
2042 }
2043
2044 case APICMODE_X2APIC:
2045 {
2046 uBaseMsr |= MSR_APICBASE_X2APIC_ENABLE_BIT;
2047
2048 /*
2049 * The APIC ID needs updating when entering x2APIC mode.
2050 * Software written APIC ID in xAPIC mode isn't preseved.
2051 * The APIC ID becomes read-only to software in x2APIC mode.
2052 *
2053 * See Intel spec. 10.12.5.1 "x2APIC States".
2054 */
2055 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2056 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2057 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2058
2059 /*
2060 * LDR initialization occurs when entering x2APIC mode.
2061 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2062 */
2063 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2064 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2065 Log4(("APIC%u: Switched mode to x2Apic\n", pVCpu->idCpu));
2066 break;
2067 }
2068
2069 case APICMODE_INVALID:
2070 default:
2071 {
2072 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2073 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2074 }
2075 }
2076 }
2077
2078 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2079 return VINF_SUCCESS;
2080}
2081
2082
2083/**
2084 * @interface_method_impl{PDMAPICREG,pfnGetBaseMsrR3}
2085 */
2086VMMDECL(uint64_t) APICGetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
2087{
2088 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2089
2090 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2091 return pApicCpu->uApicBaseMsr;
2092}
2093
2094
2095/**
2096 * @interface_method_impl{PDMAPICREG,pfnSetTprR3}
2097 */
2098VMMDECL(void) APICSetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Tpr)
2099{
2100 apicSetTpr(pVCpu, u8Tpr);
2101}
2102
2103
2104/**
2105 * @interface_method_impl{PDMAPICREG,pfnGetTprR3}
2106 */
2107VMMDECL(uint8_t) APICGetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
2108{
2109 VMCPU_ASSERT_EMT(pVCpu);
2110 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2111 return pXApicPage->tpr.u8Tpr;
2112}
2113
2114
2115/**
2116 * @interface_method_impl{PDMAPICREG,pfnGetTimerFreqR3}
2117 */
2118VMMDECL(uint64_t) APICGetTimerFreq(PPDMDEVINS pDevIns)
2119{
2120 PVM pVM = PDMDevHlpGetVM(pDevIns);
2121 PVMCPU pVCpu = &pVM->aCpus[0];
2122 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2123 uint64_t uTimer = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer));
2124 return uTimer;
2125}
2126
2127
2128/**
2129 * @interface_method_impl{PDMAPICREG,pfnBusDeliverR3}
2130 * @remarks This is a private interface between the IOAPIC and the APIC.
2131 */
2132VMMDECL(int) APICBusDeliver(PPDMDEVINS pDevIns, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2133 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uTagSrc)
2134{
2135 NOREF(uPolarity);
2136 NOREF(uTagSrc);
2137 PVM pVM = PDMDevHlpGetVM(pDevIns);
2138
2139 /*
2140 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2141 * Hence, the broadcast mask is 0xff.
2142 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2143 */
2144 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2145 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2146 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2147 uint32_t fDestMask = uDest;
2148 uint32_t fBroadcastMask = UINT32_C(0xff);
2149
2150 Log4(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s\n", fDestMask,
2151 apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode), apicGetDeliveryModeName(enmDeliveryMode)));
2152
2153 VMCPUSET DestCpuSet;
2154 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2155 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2156 VINF_SUCCESS /* rcRZ */);
2157 return VBOXSTRICTRC_VAL(rcStrict);
2158}
2159
2160
2161/**
2162 * @interface_method_impl{PDMAPICREG,pfnLocalInterruptR3}
2163 * @remarks This is a private interface between the PIC and the APIC.
2164 */
2165VMMDECL(VBOXSTRICTRC) APICLocalInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2166{
2167 NOREF(pDevIns);
2168 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2169 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2170 LogFlow(("APIC%u: APICLocalInterrupt\n", pVCpu->idCpu));
2171
2172 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2173
2174 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2175 if (apicIsEnabled(pVCpu))
2176 {
2177 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2178
2179 /* Pick the LVT entry corresponding to the interrupt pin. */
2180 static const uint16_t s_au16LvtOffsets[] =
2181 {
2182 XAPIC_OFF_LVT_LINT0,
2183 XAPIC_OFF_LVT_LINT1
2184 };
2185 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2186 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2187 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
2188
2189 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2190 if (!XAPIC_LVT_IS_MASKED(uLvt))
2191 {
2192 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2193 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2194
2195 switch (enmDeliveryMode)
2196 {
2197 case XAPICDELIVERYMODE_FIXED:
2198 {
2199 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2200 if (offLvt == XAPIC_OFF_LVT_LINT1)
2201 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2202 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2203 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2204 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2205 means. */
2206 /* fallthru */
2207 }
2208 case XAPICDELIVERYMODE_SMI:
2209 case XAPICDELIVERYMODE_NMI:
2210 case XAPICDELIVERYMODE_INIT: /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2211 {
2212 VMCPUSET DestCpuSet;
2213 VMCPUSET_EMPTY(&DestCpuSet);
2214 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2215 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2216 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2217 rcRZ);
2218 break;
2219 }
2220
2221 case XAPICDELIVERYMODE_EXTINT:
2222 {
2223 Log4(("APIC%u: APICLocalInterrupt: External interrupt. u8Pin=%u u8Level=%u\n", pVCpu->idCpu, u8Pin, u8Level));
2224 if (u8Level)
2225 APICSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2226 else
2227 APICClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2228 break;
2229 }
2230
2231 /* Reserved/unknown delivery modes: */
2232 case XAPICDELIVERYMODE_LOWEST_PRIO:
2233 case XAPICDELIVERYMODE_STARTUP:
2234 default:
2235 {
2236 rcStrict = VERR_INTERNAL_ERROR_3;
2237 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2238 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2239 break;
2240 }
2241 }
2242 }
2243 }
2244 else
2245 {
2246 /* The APIC is disabled, pass it through the CPU. */
2247 LogFlow(("APIC%u: APICLocalInterrupt: APIC hardware-disabled, passing interrupt to CPU. u8Pin=%u u8Level=%u\n", u8Pin,
2248 u8Level));
2249 if (u8Level)
2250 APICSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2251 else
2252 APICClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2253 }
2254
2255 return rcStrict;
2256}
2257
2258
2259/**
2260 * @interface_method_impl{PDMAPICREG,pfnHasPendingIrqR3}
2261 */
2262VMMDECL(bool) APICHasPendingIrq(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t *pu8PendingIrq)
2263{
2264 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIrq);
2265}
2266
2267
2268/**
2269 * @interface_method_impl{PDMAPICREG,pfnGetInterruptR3}
2270 */
2271VMMDECL(int) APICGetInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t *puTagSrc)
2272{
2273 VMCPU_ASSERT_EMT(pVCpu);
2274
2275 LogFlow(("APIC%u: APICGetInterrupt\n", pVCpu->idCpu));
2276
2277 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2278 bool const fApicHwEnabled = apicIsEnabled(pVCpu);
2279 if ( fApicHwEnabled
2280 && pXApicPage->svr.u.fApicSoftwareEnable)
2281 {
2282 APICUpdatePendingInterrupts(pVCpu);
2283 int const irrv = apicGetLastSetBit(&pXApicPage->irr, -1);
2284 if (irrv >= 0)
2285 {
2286 Assert(irrv <= (int)UINT8_MAX);
2287 uint8_t const uVector = irrv;
2288
2289 /** @todo this cannot possibly happen for anything other than ExtINT
2290 * interrupts right? */
2291 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2292 if (uTpr > 0 && uVector <= uTpr)
2293 {
2294 Log4(("APIC%u: APICGetInterrupt: Spurious interrupt. uVector=%#x\n", pVCpu->idCpu,
2295 pXApicPage->svr.u.u8SpuriousVector));
2296 return pXApicPage->svr.u.u8SpuriousVector;
2297 }
2298
2299 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2300 if ( !uPpr
2301 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2302 {
2303 apicClearVectorInReg(&pXApicPage->irr, uVector);
2304 apicSetVectorInReg(&pXApicPage->isr, uVector);
2305 apicUpdatePpr(pVCpu);
2306 apicSignalNextPendingIntr(pVCpu);
2307
2308 Log4(("APIC%u: APICGetInterrupt: Valid Interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
2309 return uVector;
2310 }
2311 else
2312 Log4(("APIC%u: APICGetInterrupt: Interrupt's priority is not higher than the PPR uVector=%#x PPR=%#x\n",
2313 pVCpu->idCpu, uVector, uPpr));
2314 }
2315 else
2316 Log4(("APIC%u: APICGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2317 }
2318 else
2319 Log4(("APIC%u: APICGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2320
2321 return -1;
2322}
2323
2324
2325/**
2326 * @callback_method_impl{FNIOMMMIOREAD}
2327 */
2328VMMDECL(int) APICReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
2329{
2330 NOREF(pvUser);
2331 Assert(!(GCPhysAddr & 0xf));
2332 Assert(cb == 4);
2333
2334 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2335 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2336 uint16_t offReg = (GCPhysAddr & 0xff0);
2337 uint32_t uValue = 0;
2338
2339 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF(StatMmioRead));
2340
2341 int rc = apicReadRegister(pApicDev, pVCpu, offReg, &uValue);
2342 *(uint32_t *)pv = uValue;
2343
2344 Log4(("APIC%u: ApicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2345 return rc;
2346}
2347
2348
2349/**
2350 * @callback_method_impl{FNIOMMMIOWRITE}
2351 */
2352VMMDECL(int) APICWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
2353{
2354 NOREF(pvUser);
2355 Assert(!(GCPhysAddr & 0xf));
2356 Assert(cb == 4);
2357
2358 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2359 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2360 uint16_t offReg = (GCPhysAddr & 0xff0);
2361 uint32_t uValue = *(uint32_t *)pv;
2362
2363 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF(StatMmioWrite));
2364
2365 Log4(("APIC%u: APICWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2366
2367 int rc = VBOXSTRICTRC_VAL(apicWriteRegister(pApicDev, pVCpu, offReg, uValue));
2368 return rc;
2369}
2370
2371
2372/**
2373 * Sets the interrupt pending force-flag and pokes the EMT if required.
2374 *
2375 * @param pVCpu The cross context virtual CPU structure.
2376 * @param enmType The IRQ type.
2377 */
2378VMMDECL(void) APICSetInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2379{
2380 PVM pVM = pVCpu->CTX_SUFF(pVM);
2381 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2382 CTX_SUFF(pApicDev->pApicHlp)->pfnSetInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2383}
2384
2385
2386/**
2387 * Clears the interrupt pending force-flag.
2388 *
2389 * @param pVCpu The cross context virtual CPU structure.
2390 * @param enmType The IRQ type.
2391 */
2392VMMDECL(void) APICClearInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2393{
2394 PVM pVM = pVCpu->CTX_SUFF(pVM);
2395 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2396 pApicDev->CTX_SUFF(pApicHlp)->pfnClearInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2397}
2398
2399
2400/**
2401 * Posts an interrupt to a target APIC.
2402 *
2403 * This function handles interrupts received from the system bus or
2404 * interrupts generated locally from the LVT or via a self IPI.
2405 *
2406 * Don't use this function to try and deliver ExtINT style interrupts.
2407 *
2408 * @param pVCpu The cross context virtual CPU structure.
2409 * @param uVector The vector of the interrupt to be posted.
2410 * @param enmTriggerMode The trigger mode of the interrupt.
2411 *
2412 * @thread Any.
2413 */
2414VMM_INT_DECL(void) APICPostInterrupt(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode)
2415{
2416 Assert(pVCpu);
2417 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
2418
2419 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2420 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2421
2422 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
2423
2424 /*
2425 * Only post valid interrupt vectors.
2426 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
2427 */
2428 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
2429 {
2430 /*
2431 * If the interrupt is already pending in the vIRR we can skip the
2432 * potential expensive operation of poking the guest EMT out of execution.
2433 */
2434 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2435 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
2436 {
2437 Log4(("APIC%u: APICPostInterrupt: uVector=%#x\n", pVCpu->idCpu, uVector));
2438 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2439 {
2440 if (pApic->fPostedIntrsEnabled)
2441 { /** @todo posted-interrupt call to hardware */ }
2442 else
2443 {
2444 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
2445 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2446 if (!fAlreadySet)
2447 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
2448 }
2449 }
2450 else
2451 {
2452 /*
2453 * Level-triggered interrupts requires updating of the TMR and thus cannot be
2454 * delivered asynchronously.
2455 */
2456 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
2457 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
2458 if (!fAlreadySet)
2459 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
2460 }
2461 }
2462 else
2463 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
2464 }
2465 else
2466 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
2467
2468 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
2469}
2470
2471
2472/**
2473 * Starts the APIC timer.
2474 *
2475 * @param pApicCpu The APIC CPU state.
2476 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
2477 * 0.
2478 * @thread Any.
2479 */
2480VMM_INT_DECL(void) APICStartTimer(PAPICCPU pApicCpu, uint32_t uInitialCount)
2481{
2482 Assert(pApicCpu);
2483 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2484 Assert(uInitialCount > 0);
2485
2486 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
2487 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
2488 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
2489
2490 /*
2491 * The assumption here is that the timer doesn't tick during this call
2492 * and thus setting a relative time to fire next is accurate. The advantage
2493 * however is updating u64TimerInitial 'atomically' while setting the next
2494 * tick.
2495 */
2496 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2497 TMTimerSetRelative(pTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
2498 apicHintTimerFreq(pApicCpu, uInitialCount, uTimerShift);
2499}
2500
2501
2502/**
2503 * Stops the APIC timer.
2504 *
2505 * @param pApicCpu The APIC CPU state.
2506 * @thread Any.
2507 */
2508VMM_INT_DECL(void) APICStopTimer(PAPICCPU pApicCpu)
2509{
2510 Assert(pApicCpu);
2511 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2512
2513 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2514 TMTimerStop(pTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
2515 pApicCpu->uHintedTimerInitialCount = 0;
2516 pApicCpu->uHintedTimerShift = 0;
2517}
2518
2519
2520/**
2521 * Updates the CPUID bits necessary for the given APIC mode.
2522 *
2523 * @param pVM The cross context VM structure.
2524 * @param enmMode The APIC mode.
2525 */
2526VMM_INT_DECL(void) APICUpdateCpuIdForMode(PVM pVM, APICMODE enmMode)
2527{
2528 /* The CPUID bits being updated to reflect the current state is a bit vague. See @bugref{8245#c32}. */
2529 /** @todo This needs to be done on a per-VCPU basis! */
2530 switch (enmMode)
2531 {
2532 case APICMODE_DISABLED:
2533 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
2534 break;
2535
2536 case APICMODE_XAPIC:
2537 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
2538 break;
2539
2540 case APICMODE_X2APIC:
2541 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
2542 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);
2543 break;
2544
2545 default:
2546 AssertMsgFailed(("Invalid APIC mode: %d\n", (int)enmMode));
2547 break;
2548 }
2549}
2550
2551
2552/**
2553 * Queues a pending interrupt as in-service.
2554 *
2555 * This function should only be needed without virtualized APIC
2556 * registers. With virtualized APIC registers, it's sufficient to keep
2557 * the interrupts pending in the IRR as the hardware takes care of
2558 * virtual interrupt delivery.
2559 *
2560 * @returns true if the interrupt was queued to in-service interrupts,
2561 * false otherwise.
2562 * @param pVCpu The cross context virtual CPU structure.
2563 * @param u8PendingIntr The pending interrupt to queue as
2564 * in-service.
2565 *
2566 * @remarks This assumes the caller has done the necessary checks and
2567 * is ready to take actually service the interrupt (TPR,
2568 * interrupt shadow etc.)
2569 */
2570VMMDECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2571{
2572 VMCPU_ASSERT_EMT(pVCpu);
2573
2574 PVM pVM = pVCpu->CTX_SUFF(pVM);
2575 PAPIC pApic = VM_TO_APIC(pVM);
2576 Assert(!pApic->fVirtApicRegsEnabled);
2577 NOREF(pApic);
2578
2579 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2580 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
2581 if (fIsPending)
2582 {
2583 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
2584 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
2585 apicUpdatePpr(pVCpu);
2586 return true;
2587 }
2588 return false;
2589}
2590
2591
2592/**
2593 * Dequeues a pending interrupt from in-service.
2594 *
2595 * This undoes APICQueueInterruptToService() for premature VM-exits before event
2596 * injection.
2597 *
2598 * @param pVCpu The cross context virtual CPU structure.
2599 * @param u8PendingIntr The pending interrupt to dequeue from
2600 * in-service.
2601 */
2602VMMDECL(void) APICDequeueInterruptFromService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2603{
2604 VMCPU_ASSERT_EMT(pVCpu);
2605
2606 PVM pVM = pVCpu->CTX_SUFF(pVM);
2607 PAPIC pApic = VM_TO_APIC(pVM);
2608 Assert(!pApic->fVirtApicRegsEnabled);
2609 NOREF(pApic);
2610
2611 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2612 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
2613 if (fInService)
2614 {
2615 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
2616 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
2617 apicUpdatePpr(pVCpu);
2618 }
2619}
2620
2621
2622/**
2623 * Updates pending interrupts from the pending-interrupt bitmaps to the IRR.
2624 *
2625 * @param pVCpu The cross context virtual CPU structure.
2626 */
2627VMMDECL(void) APICUpdatePendingInterrupts(PVMCPU pVCpu)
2628{
2629 VMCPU_ASSERT_EMT(pVCpu);
2630
2631 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2632 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2633
2634 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
2635
2636 /* Update edge-triggered pending interrupts. */
2637 for (;;)
2638 {
2639 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2640 if (!fAlreadySet)
2641 break;
2642
2643 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
2644 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->aVectorBitmap));
2645
2646 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->aVectorBitmap); idxPib++, idxReg += 2)
2647 {
2648 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->aVectorBitmap[idxPib], 0);
2649 if (u64Fragment)
2650 {
2651 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2652 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2653
2654 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2655 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2656
2657 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
2658 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
2659 }
2660 }
2661 }
2662
2663 /* Update level-triggered pending interrupts. */
2664 for (;;)
2665 {
2666 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
2667 if (!fAlreadySet)
2668 break;
2669
2670 PAPICPIB pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
2671 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->aVectorBitmap));
2672
2673 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->aVectorBitmap); idxPib++, idxReg += 2)
2674 {
2675 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->aVectorBitmap[idxPib], 0);
2676 if (u64Fragment)
2677 {
2678 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2679 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2680
2681 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2682 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2683
2684 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
2685 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2686 }
2687 }
2688 }
2689
2690 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
2691}
2692
2693
2694/**
2695 * Gets the highest priority pending interrupt.
2696 *
2697 * @returns true if any interrupt is pending, false otherwise.
2698 * @param pVCpu The cross context virtual CPU structure.
2699 * @param pu8PendingIntr Where to store the interrupt vector if the
2700 * interrupt is pending.
2701 */
2702VMMDECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2703{
2704 VMCPU_ASSERT_EMT(pVCpu);
2705 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2706}
2707
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette