VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 83349

Last change on this file since 83349 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 127.6 KB
Line 
1/* $Id: APICAll.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pdmapi.h>
26#include <VBox/vmm/vmcc.h>
27#include <VBox/vmm/vmm.h>
28#include <VBox/vmm/vmcpuset.h>
29#ifdef IN_RING0
30# include <VBox/vmm/gvmm.h>
31#endif
32
33
34/*********************************************************************************************************************************
35* Internal Functions *
36*********************************************************************************************************************************/
37static void apicSetInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType);
38static void apicStopTimer(PVMCPUCC pVCpu);
39
40
41/*********************************************************************************************************************************
42* Global Variables *
43*********************************************************************************************************************************/
44#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
45/** An ordered array of valid LVT masks. */
46static const uint32_t g_au32LvtValidMasks[] =
47{
48 XAPIC_LVT_TIMER_VALID,
49 XAPIC_LVT_THERMAL_VALID,
50 XAPIC_LVT_PERF_VALID,
51 XAPIC_LVT_LINT_VALID, /* LINT0 */
52 XAPIC_LVT_LINT_VALID, /* LINT1 */
53 XAPIC_LVT_ERROR_VALID
54};
55#endif
56
57#if 0
58/** @todo CMCI */
59static const uint32_t g_au32LvtExtValidMask[] =
60{
61 XAPIC_LVT_CMCI_VALID
62};
63#endif
64
65
66/**
67 * Checks if a vector is set in an APIC 256-bit sparse register.
68 *
69 * @returns true if the specified vector is set, false otherwise.
70 * @param pApicReg The APIC 256-bit spare register.
71 * @param uVector The vector to check if set.
72 */
73DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
74{
75 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
76 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
77}
78
79
80/**
81 * Sets the vector in an APIC 256-bit sparse register.
82 *
83 * @param pApicReg The APIC 256-bit spare register.
84 * @param uVector The vector to set.
85 */
86DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
87{
88 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
89 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
90}
91
92
93/**
94 * Clears the vector in an APIC 256-bit sparse register.
95 *
96 * @param pApicReg The APIC 256-bit spare register.
97 * @param uVector The vector to clear.
98 */
99DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
100{
101 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
102 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
103}
104
105
106#if 0 /* unused */
107/**
108 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
109 *
110 * @returns true if the specified vector is set, false otherwise.
111 * @param pvPib Opaque pointer to the PIB.
112 * @param uVector The vector to check if set.
113 */
114DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
115{
116 return ASMBitTest(pvPib, uVector);
117}
118#endif /* unused */
119
120
121/**
122 * Atomically sets the PIB notification bit.
123 *
124 * @returns non-zero if the bit was already set, 0 otherwise.
125 * @param pApicPib Pointer to the PIB.
126 */
127DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
128{
129 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
130}
131
132
133/**
134 * Atomically tests and clears the PIB notification bit.
135 *
136 * @returns non-zero if the bit was already set, 0 otherwise.
137 * @param pApicPib Pointer to the PIB.
138 */
139DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
140{
141 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
142}
143
144
145/**
146 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
147 *
148 * @param pvPib Opaque pointer to the PIB.
149 * @param uVector The vector to set.
150 */
151DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
152{
153 ASMAtomicBitSet(pvPib, uVector);
154}
155
156#if 0 /* unused */
157/**
158 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
159 *
160 * @param pvPib Opaque pointer to the PIB.
161 * @param uVector The vector to clear.
162 */
163DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
164{
165 ASMAtomicBitClear(pvPib, uVector);
166}
167#endif /* unused */
168
169#if 0 /* unused */
170/**
171 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
172 * register.
173 *
174 * @param pApicReg The APIC 256-bit spare register.
175 * @param idxFragment The index of the 32-bit fragment in @a
176 * pApicReg.
177 * @param u32Fragment The 32-bit vector fragment to OR.
178 */
179DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
180{
181 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
182 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
183}
184#endif /* unused */
185
186
187#if 0 /* unused */
188/**
189 * Atomically AND's a fragment (32 vectors) into an APIC
190 * 256-bit sparse register.
191 *
192 * @param pApicReg The APIC 256-bit spare register.
193 * @param idxFragment The index of the 32-bit fragment in @a
194 * pApicReg.
195 * @param u32Fragment The 32-bit vector fragment to AND.
196 */
197DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
198{
199 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
200 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
201}
202#endif /* unused */
203
204
205/**
206 * Reports and returns appropriate error code for invalid MSR accesses.
207 *
208 * @returns VERR_CPUM_RAISE_GP_0
209 *
210 * @param pVCpu The cross context virtual CPU structure.
211 * @param u32Reg The MSR being accessed.
212 * @param enmAccess The invalid-access type.
213 */
214static int apicMsrAccessError(PVMCPUCC pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
215{
216 static struct
217 {
218 const char *pszBefore; /* The error message before printing the MSR index */
219 const char *pszAfter; /* The error message after printing the MSR index */
220 } const s_aAccess[] =
221 {
222 /* enmAccess pszBefore pszAfter */
223 /* 0 */ { "read MSR", " while not in x2APIC mode" },
224 /* 1 */ { "write MSR", " while not in x2APIC mode" },
225 /* 2 */ { "read reserved/unknown MSR", "" },
226 /* 3 */ { "write reserved/unknown MSR", "" },
227 /* 4 */ { "read write-only MSR", "" },
228 /* 5 */ { "write read-only MSR", "" },
229 /* 6 */ { "read reserved bits of MSR", "" },
230 /* 7 */ { "write reserved bits of MSR", "" },
231 /* 8 */ { "write an invalid value to MSR", "" },
232 /* 9 */ { "write MSR", " disallowed by configuration" },
233 /* 10 */ { "read MSR", " disallowed by configuration" },
234 };
235 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
236
237 size_t const i = enmAccess;
238 Assert(i < RT_ELEMENTS(s_aAccess));
239 if (pVCpu->apic.s.cLogMaxAccessError++ < 5)
240 LogRel(("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg, s_aAccess[i].pszAfter));
241 return VERR_CPUM_RAISE_GP_0;
242}
243
244
245/**
246 * Gets the descriptive APIC mode.
247 *
248 * @returns The name.
249 * @param enmMode The xAPIC mode.
250 */
251const char *apicGetModeName(APICMODE enmMode)
252{
253 switch (enmMode)
254 {
255 case APICMODE_DISABLED: return "Disabled";
256 case APICMODE_XAPIC: return "xAPIC";
257 case APICMODE_X2APIC: return "x2APIC";
258 default: break;
259 }
260 return "Invalid";
261}
262
263
264/**
265 * Gets the descriptive destination format name.
266 *
267 * @returns The destination format name.
268 * @param enmDestFormat The destination format.
269 */
270const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
271{
272 switch (enmDestFormat)
273 {
274 case XAPICDESTFORMAT_FLAT: return "Flat";
275 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
276 default: break;
277 }
278 return "Invalid";
279}
280
281
282/**
283 * Gets the descriptive delivery mode name.
284 *
285 * @returns The delivery mode name.
286 * @param enmDeliveryMode The delivery mode.
287 */
288const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
289{
290 switch (enmDeliveryMode)
291 {
292 case XAPICDELIVERYMODE_FIXED: return "Fixed";
293 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
294 case XAPICDELIVERYMODE_SMI: return "SMI";
295 case XAPICDELIVERYMODE_NMI: return "NMI";
296 case XAPICDELIVERYMODE_INIT: return "INIT";
297 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
298 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
299 default: break;
300 }
301 return "Invalid";
302}
303
304
305/**
306 * Gets the descriptive destination mode name.
307 *
308 * @returns The destination mode name.
309 * @param enmDestMode The destination mode.
310 */
311const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
312{
313 switch (enmDestMode)
314 {
315 case XAPICDESTMODE_PHYSICAL: return "Physical";
316 case XAPICDESTMODE_LOGICAL: return "Logical";
317 default: break;
318 }
319 return "Invalid";
320}
321
322
323/**
324 * Gets the descriptive trigger mode name.
325 *
326 * @returns The trigger mode name.
327 * @param enmTriggerMode The trigger mode.
328 */
329const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
330{
331 switch (enmTriggerMode)
332 {
333 case XAPICTRIGGERMODE_EDGE: return "Edge";
334 case XAPICTRIGGERMODE_LEVEL: return "Level";
335 default: break;
336 }
337 return "Invalid";
338}
339
340
341/**
342 * Gets the destination shorthand name.
343 *
344 * @returns The destination shorthand name.
345 * @param enmDestShorthand The destination shorthand.
346 */
347const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
348{
349 switch (enmDestShorthand)
350 {
351 case XAPICDESTSHORTHAND_NONE: return "None";
352 case XAPICDESTSHORTHAND_SELF: return "Self";
353 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
354 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
355 default: break;
356 }
357 return "Invalid";
358}
359
360
361/**
362 * Gets the timer mode name.
363 *
364 * @returns The timer mode name.
365 * @param enmTimerMode The timer mode.
366 */
367const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
368{
369 switch (enmTimerMode)
370 {
371 case XAPICTIMERMODE_ONESHOT: return "One-shot";
372 case XAPICTIMERMODE_PERIODIC: return "Periodic";
373 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
374 default: break;
375 }
376 return "Invalid";
377}
378
379
380/**
381 * Gets the APIC mode given the base MSR value.
382 *
383 * @returns The APIC mode.
384 * @param uApicBaseMsr The APIC Base MSR value.
385 */
386APICMODE apicGetMode(uint64_t uApicBaseMsr)
387{
388 uint32_t const uMode = (uApicBaseMsr >> 10) & UINT64_C(3);
389 APICMODE const enmMode = (APICMODE)uMode;
390#ifdef VBOX_STRICT
391 /* Paranoia. */
392 switch (uMode)
393 {
394 case APICMODE_DISABLED:
395 case APICMODE_INVALID:
396 case APICMODE_XAPIC:
397 case APICMODE_X2APIC:
398 break;
399 default:
400 AssertMsgFailed(("Invalid mode"));
401 }
402#endif
403 return enmMode;
404}
405
406
407/**
408 * Returns whether the APIC is hardware enabled or not.
409 *
410 * @returns true if enabled, false otherwise.
411 * @param pVCpu The cross context virtual CPU structure.
412 */
413VMM_INT_DECL(bool) APICIsEnabled(PCVMCPUCC pVCpu)
414{
415 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
416 return RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN);
417}
418
419
420/**
421 * Finds the most significant set bit in an APIC 256-bit sparse register.
422 *
423 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
424 * @param pReg The APIC 256-bit sparse register.
425 * @param rcNotFound What to return when no bit is set.
426 */
427static int apicGetHighestSetBitInReg(volatile const XAPIC256BITREG *pReg, int rcNotFound)
428{
429 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
430 unsigned const uFragmentShift = 5;
431 AssertCompile(1 << uFragmentShift == sizeof(pReg->u[0].u32Reg) * 8);
432 for (ssize_t i = cFragments - 1; i >= 0; i--)
433 {
434 uint32_t const uFragment = pReg->u[i].u32Reg;
435 if (uFragment)
436 {
437 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
438 --idxSetBit;
439 idxSetBit |= i << uFragmentShift;
440 return idxSetBit;
441 }
442 }
443 return rcNotFound;
444}
445
446
447/**
448 * Reads a 32-bit register at a specified offset.
449 *
450 * @returns The value at the specified offset.
451 * @param pXApicPage The xAPIC page.
452 * @param offReg The offset of the register being read.
453 */
454DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
455{
456 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
457 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
458 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
459 return uValue;
460}
461
462
463/**
464 * Writes a 32-bit register at a specified offset.
465 *
466 * @param pXApicPage The xAPIC page.
467 * @param offReg The offset of the register being written.
468 * @param uReg The value of the register.
469 */
470DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
471{
472 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
473 uint8_t *pbXApic = (uint8_t *)pXApicPage;
474 *(uint32_t *)(pbXApic + offReg) = uReg;
475}
476
477
478/**
479 * Sets an error in the internal ESR of the specified APIC.
480 *
481 * @param pVCpu The cross context virtual CPU structure.
482 * @param uError The error.
483 * @thread Any.
484 */
485DECLINLINE(void) apicSetError(PVMCPUCC pVCpu, uint32_t uError)
486{
487 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
488 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
489}
490
491
492/**
493 * Clears all errors in the internal ESR.
494 *
495 * @returns The value of the internal ESR before clearing.
496 * @param pVCpu The cross context virtual CPU structure.
497 */
498DECLINLINE(uint32_t) apicClearAllErrors(PVMCPUCC pVCpu)
499{
500 VMCPU_ASSERT_EMT(pVCpu);
501 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
502 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
503}
504
505
506/**
507 * Signals the guest if a pending interrupt is ready to be serviced.
508 *
509 * @param pVCpu The cross context virtual CPU structure.
510 */
511static void apicSignalNextPendingIntr(PVMCPUCC pVCpu)
512{
513 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
514
515 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
516 if (pXApicPage->svr.u.fApicSoftwareEnable)
517 {
518 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1 /* rcNotFound */);
519 if (irrv >= 0)
520 {
521 Assert(irrv <= (int)UINT8_MAX);
522 uint8_t const uVector = irrv;
523 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
524 if ( !uPpr
525 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
526 {
527 Log2(("APIC%u: apicSignalNextPendingIntr: Signalling pending interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
528 apicSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
529 }
530 else
531 {
532 Log2(("APIC%u: apicSignalNextPendingIntr: Nothing to signal. uVector=%#x uPpr=%#x uTpr=%#x\n", pVCpu->idCpu,
533 uVector, uPpr, pXApicPage->tpr.u8Tpr));
534 }
535 }
536 }
537 else
538 {
539 Log2(("APIC%u: apicSignalNextPendingIntr: APIC software-disabled, clearing pending interrupt\n", pVCpu->idCpu));
540 apicClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
541 }
542}
543
544
545/**
546 * Sets the Spurious-Interrupt Vector Register (SVR).
547 *
548 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
549 * @param pVCpu The cross context virtual CPU structure.
550 * @param uSvr The SVR value.
551 */
552static int apicSetSvr(PVMCPUCC pVCpu, uint32_t uSvr)
553{
554 VMCPU_ASSERT_EMT(pVCpu);
555
556 uint32_t uValidMask = XAPIC_SVR_VALID;
557 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
558 if (pXApicPage->version.u.fEoiBroadcastSupression)
559 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
560
561 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
562 && (uSvr & ~uValidMask))
563 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
564
565 Log2(("APIC%u: apicSetSvr: uSvr=%#RX32\n", pVCpu->idCpu, uSvr));
566 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
567 if (!pXApicPage->svr.u.fApicSoftwareEnable)
568 {
569 /** @todo CMCI. */
570 pXApicPage->lvt_timer.u.u1Mask = 1;
571#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
572 pXApicPage->lvt_thermal.u.u1Mask = 1;
573#endif
574 pXApicPage->lvt_perf.u.u1Mask = 1;
575 pXApicPage->lvt_lint0.u.u1Mask = 1;
576 pXApicPage->lvt_lint1.u.u1Mask = 1;
577 pXApicPage->lvt_error.u.u1Mask = 1;
578 }
579
580 apicSignalNextPendingIntr(pVCpu);
581 return VINF_SUCCESS;
582}
583
584
585/**
586 * Sends an interrupt to one or more APICs.
587 *
588 * @returns Strict VBox status code.
589 * @param pVM The cross context VM structure.
590 * @param pVCpu The cross context virtual CPU structure, can be
591 * NULL if the source of the interrupt is not an
592 * APIC (for e.g. a bus).
593 * @param uVector The interrupt vector.
594 * @param enmTriggerMode The trigger mode.
595 * @param enmDeliveryMode The delivery mode.
596 * @param pDestCpuSet The destination CPU set.
597 * @param pfIntrAccepted Where to store whether this interrupt was
598 * accepted by the target APIC(s) or not.
599 * Optional, can be NULL.
600 * @param uSrcTag The interrupt source tag (debugging).
601 * @param rcRZ The return code if the operation cannot be
602 * performed in the current context.
603 */
604static VBOXSTRICTRC apicSendIntr(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
605 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, bool *pfIntrAccepted,
606 uint32_t uSrcTag, int rcRZ)
607{
608 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
609 VMCPUID const cCpus = pVM->cCpus;
610 bool fAccepted = false;
611 switch (enmDeliveryMode)
612 {
613 case XAPICDELIVERYMODE_FIXED:
614 {
615 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
616 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
617 {
618 PVMCPUCC pItVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
619 if (APICIsEnabled(pItVCpu))
620 fAccepted = apicPostInterrupt(pItVCpu, uVector, enmTriggerMode, uSrcTag);
621 }
622 break;
623 }
624
625 case XAPICDELIVERYMODE_LOWEST_PRIO:
626 {
627 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
628 AssertMsgBreak(idCpu < pVM->cCpus, ("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode! idCpu=%u\n", idCpu));
629 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
630 if (APICIsEnabled(pVCpuDst))
631 fAccepted = apicPostInterrupt(pVCpuDst, uVector, enmTriggerMode, uSrcTag);
632 else
633 AssertMsgFailed(("APIC: apicSendIntr: Target APIC not enabled in lowest-priority delivery mode! idCpu=%u\n", idCpu));
634 break;
635 }
636
637 case XAPICDELIVERYMODE_SMI:
638 {
639 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
640 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
641 {
642 Log2(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
643 apicSetInterruptFF(pVM->CTX_SUFF(apCpus)[idCpu], PDMAPICIRQ_SMI);
644 fAccepted = true;
645 }
646 break;
647 }
648
649 case XAPICDELIVERYMODE_NMI:
650 {
651 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
652 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
653 {
654 PVMCPUCC pItVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
655 if (APICIsEnabled(pItVCpu))
656 {
657 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
658 apicSetInterruptFF(pItVCpu, PDMAPICIRQ_NMI);
659 fAccepted = true;
660 }
661 }
662 break;
663 }
664
665 case XAPICDELIVERYMODE_INIT:
666 {
667#ifdef IN_RING3
668 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
669 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
670 {
671 Log2(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
672 VMMR3SendInitIpi(pVM, idCpu);
673 fAccepted = true;
674 }
675#else
676 /* We need to return to ring-3 to deliver the INIT. */
677 rcStrict = rcRZ;
678 fAccepted = true;
679#endif
680 break;
681 }
682
683 case XAPICDELIVERYMODE_STARTUP:
684 {
685#ifdef IN_RING3
686 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
687 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
688 {
689 Log2(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
690 VMMR3SendStartupIpi(pVM, idCpu, uVector);
691 fAccepted = true;
692 }
693#else
694 /* We need to return to ring-3 to deliver the SIPI. */
695 rcStrict = rcRZ;
696 fAccepted = true;
697 Log2(("APIC: apicSendIntr: SIPI issued, returning to RZ. rc=%Rrc\n", rcRZ));
698#endif
699 break;
700 }
701
702 case XAPICDELIVERYMODE_EXTINT:
703 {
704 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
705 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
706 {
707 Log2(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
708 apicSetInterruptFF(pVM->CTX_SUFF(apCpus)[idCpu], PDMAPICIRQ_EXTINT);
709 fAccepted = true;
710 }
711 break;
712 }
713
714 default:
715 {
716 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
717 apicGetDeliveryModeName(enmDeliveryMode)));
718 break;
719 }
720 }
721
722 /*
723 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
724 * interrupt is being sent by an APIC.
725 *
726 * The 'receive illegal vector' will be set on the target APIC when the interrupt
727 * gets generated, see apicPostInterrupt().
728 *
729 * See Intel spec. 10.5.3 "Error Handling".
730 */
731 if ( rcStrict != rcRZ
732 && pVCpu)
733 {
734 /*
735 * Flag only errors when the delivery mode is fixed and not others.
736 *
737 * Ubuntu 10.04-3 amd64 live CD with 2 VCPUs gets upset as it sends an SIPI to the
738 * 2nd VCPU with vector 6 and checks the ESR for no errors, see @bugref{8245#c86}.
739 */
740 /** @todo The spec says this for LVT, but not explcitly for ICR-lo
741 * but it probably is true. */
742 if (enmDeliveryMode == XAPICDELIVERYMODE_FIXED)
743 {
744 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
745 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
746 }
747 }
748
749 if (pfIntrAccepted)
750 *pfIntrAccepted = fAccepted;
751
752 return rcStrict;
753}
754
755
756/**
757 * Checks if this APIC belongs to a logical destination.
758 *
759 * @returns true if the APIC belongs to the logical
760 * destination, false otherwise.
761 * @param pVCpu The cross context virtual CPU structure.
762 * @param fDest The destination mask.
763 *
764 * @thread Any.
765 */
766static bool apicIsLogicalDest(PVMCPUCC pVCpu, uint32_t fDest)
767{
768 if (XAPIC_IN_X2APIC_MODE(pVCpu))
769 {
770 /*
771 * Flat logical mode is not supported in x2APIC mode.
772 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
773 * - High 16 bits is the cluster ID.
774 * - Low 16 bits: each bit represents a unique APIC within the cluster.
775 */
776 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
777 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
778 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
779 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
780 return false;
781 }
782
783#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
784 /*
785 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
786 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
787 */
788 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
789 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
790 return true;
791
792 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
793 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
794 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
795 {
796 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
797 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
798 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
799 }
800
801 /*
802 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
803 * - High 4 bits is the cluster ID.
804 * - Low 4 bits: each bit represents a unique APIC within the cluster.
805 */
806 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
807 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
808 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
809 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
810 return false;
811#else
812# error "Implement Pentium and P6 family APIC architectures"
813#endif
814}
815
816
817/**
818 * Figures out the set of destination CPUs for a given destination mode, format
819 * and delivery mode setting.
820 *
821 * @param pVM The cross context VM structure.
822 * @param fDestMask The destination mask.
823 * @param fBroadcastMask The broadcast mask.
824 * @param enmDestMode The destination mode.
825 * @param enmDeliveryMode The delivery mode.
826 * @param pDestCpuSet The destination CPU set to update.
827 */
828static void apicGetDestCpuSet(PVMCC pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
829 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
830{
831 VMCPUSET_EMPTY(pDestCpuSet);
832
833 /*
834 * Physical destination mode only supports either a broadcast or a single target.
835 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
836 * as a regular broadcast like in fixed delivery mode.
837 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
838 * to the target like in fixed delivery mode.
839 *
840 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
841 */
842 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
843 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
844 {
845 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
846 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
847 }
848
849 uint32_t const cCpus = pVM->cCpus;
850 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
851 {
852 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
853#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
854 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
855 uint8_t u8LowestTpr = UINT8_C(0xff);
856 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
857 {
858 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
859 if (apicIsLogicalDest(pVCpuDst, fDestMask))
860 {
861 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDst);
862 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
863
864 /*
865 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
866 * Hence the use of "<=" in the check below.
867 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
868 */
869 if (u8Tpr <= u8LowestTpr)
870 {
871 u8LowestTpr = u8Tpr;
872 idCpuLowestTpr = idCpu;
873 }
874 }
875 }
876 if (idCpuLowestTpr != NIL_VMCPUID)
877 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
878#else
879# error "Implement Pentium and P6 family APIC architectures"
880#endif
881 return;
882 }
883
884 /*
885 * x2APIC:
886 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
887 * xAPIC:
888 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
889 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
890 *
891 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
892 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
893 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
894 */
895 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
896 {
897 VMCPUSET_FILL(pDestCpuSet);
898 return;
899 }
900
901 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
902 {
903 /* The destination mask is interpreted as the physical APIC ID of a single target. */
904#if 1
905 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
906 if (RT_LIKELY(fDestMask < cCpus))
907 VMCPUSET_ADD(pDestCpuSet, fDestMask);
908#else
909 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
910 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
911 {
912 PVMCPUCC pVCpuDst = &pVM->aCpus[idCpu];
913 if (XAPIC_IN_X2APIC_MODE(pVCpuDst))
914 {
915 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDst);
916 if (pX2ApicPage->id.u32ApicId == fDestMask)
917 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
918 }
919 else
920 {
921 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDst);
922 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
923 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
924 }
925 }
926#endif
927 }
928 else
929 {
930 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
931
932 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
933 if (RT_UNLIKELY(!fDestMask))
934 return;
935
936 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
937 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
938 {
939 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
940 if (apicIsLogicalDest(pVCpuDst, fDestMask))
941 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
942 }
943 }
944}
945
946
947/**
948 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
949 * Command Register (ICR).
950 *
951 * @returns VBox status code.
952 * @param pVCpu The cross context virtual CPU structure.
953 * @param rcRZ The return code if the operation cannot be
954 * performed in the current context.
955 */
956DECLINLINE(VBOXSTRICTRC) apicSendIpi(PVMCPUCC pVCpu, int rcRZ)
957{
958 VMCPU_ASSERT_EMT(pVCpu);
959
960 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
961 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
962 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
963 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
964 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
965 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
966 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
967
968 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
969 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
970
971#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
972 /*
973 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
974 * Apparently, this also applies to NMI, SMI, lowest-priority and fixed delivery modes,
975 * see @bugref{8245#c116}.
976 *
977 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
978 */
979 if ( enmTriggerMode == XAPICTRIGGERMODE_LEVEL
980 && enmInitLevel == XAPICINITLEVEL_DEASSERT
981 && ( enmDeliveryMode == XAPICDELIVERYMODE_FIXED
982 || enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO
983 || enmDeliveryMode == XAPICDELIVERYMODE_SMI
984 || enmDeliveryMode == XAPICDELIVERYMODE_NMI
985 || enmDeliveryMode == XAPICDELIVERYMODE_INIT))
986 {
987 Log2(("APIC%u: %s level de-assert unsupported, ignoring!\n", pVCpu->idCpu, apicGetDeliveryModeName(enmDeliveryMode)));
988 return VINF_SUCCESS;
989 }
990#else
991# error "Implement Pentium and P6 family APIC architectures"
992#endif
993
994 /*
995 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
996 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
997 */
998 VMCPUSET DestCpuSet;
999 switch (enmDestShorthand)
1000 {
1001 case XAPICDESTSHORTHAND_NONE:
1002 {
1003 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1004 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
1005 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1006 break;
1007 }
1008
1009 case XAPICDESTSHORTHAND_SELF:
1010 {
1011 VMCPUSET_EMPTY(&DestCpuSet);
1012 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1013 break;
1014 }
1015
1016 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
1017 {
1018 VMCPUSET_FILL(&DestCpuSet);
1019 break;
1020 }
1021
1022 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
1023 {
1024 VMCPUSET_FILL(&DestCpuSet);
1025 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
1026 break;
1027 }
1028 }
1029
1030 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
1031 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
1032}
1033
1034
1035/**
1036 * Sets the Interrupt Command Register (ICR) high dword.
1037 *
1038 * @returns Strict VBox status code.
1039 * @param pVCpu The cross context virtual CPU structure.
1040 * @param uIcrHi The ICR high dword.
1041 */
1042static VBOXSTRICTRC apicSetIcrHi(PVMCPUCC pVCpu, uint32_t uIcrHi)
1043{
1044 VMCPU_ASSERT_EMT(pVCpu);
1045 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1046
1047 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1048 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1049 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrHiWrite);
1050 Log2(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1051
1052 return VINF_SUCCESS;
1053}
1054
1055
1056/**
1057 * Sets the Interrupt Command Register (ICR) low dword.
1058 *
1059 * @returns Strict VBox status code.
1060 * @param pVCpu The cross context virtual CPU structure.
1061 * @param uIcrLo The ICR low dword.
1062 * @param rcRZ The return code if the operation cannot be performed
1063 * in the current context.
1064 * @param fUpdateStat Whether to update the ICR low write statistics
1065 * counter.
1066 */
1067static VBOXSTRICTRC apicSetIcrLo(PVMCPUCC pVCpu, uint32_t uIcrLo, int rcRZ, bool fUpdateStat)
1068{
1069 VMCPU_ASSERT_EMT(pVCpu);
1070
1071 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1072 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR_VALID;
1073 Log2(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1074
1075 if (fUpdateStat)
1076 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrLoWrite);
1077 RT_NOREF(fUpdateStat);
1078
1079 return apicSendIpi(pVCpu, rcRZ);
1080}
1081
1082
1083/**
1084 * Sets the Interrupt Command Register (ICR).
1085 *
1086 * @returns Strict VBox status code.
1087 * @param pVCpu The cross context virtual CPU structure.
1088 * @param u64Icr The ICR (High and Low combined).
1089 * @param rcRZ The return code if the operation cannot be performed
1090 * in the current context.
1091 *
1092 * @remarks This function is used by both x2APIC interface and the Hyper-V
1093 * interface, see APICHvSetIcr. The Hyper-V spec isn't clear what
1094 * happens when invalid bits are set. For the time being, it will
1095 * \#GP like a regular x2APIC access.
1096 */
1097static VBOXSTRICTRC apicSetIcr(PVMCPUCC pVCpu, uint64_t u64Icr, int rcRZ)
1098{
1099 VMCPU_ASSERT_EMT(pVCpu);
1100
1101 /* Validate. */
1102 uint32_t const uLo = RT_LO_U32(u64Icr);
1103 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR_VALID)))
1104 {
1105 /* Update high dword first, then update the low dword which sends the IPI. */
1106 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1107 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1108 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrFullWrite);
1109 return apicSetIcrLo(pVCpu, uLo, rcRZ, false /* fUpdateStat */);
1110 }
1111 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1112}
1113
1114
1115/**
1116 * Sets the Error Status Register (ESR).
1117 *
1118 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1119 * @param pVCpu The cross context virtual CPU structure.
1120 * @param uEsr The ESR value.
1121 */
1122static int apicSetEsr(PVMCPUCC pVCpu, uint32_t uEsr)
1123{
1124 VMCPU_ASSERT_EMT(pVCpu);
1125
1126 Log2(("APIC%u: apicSetEsr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1127
1128 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1129 && (uEsr & ~XAPIC_ESR_WO_VALID))
1130 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1131
1132 /*
1133 * Writes to the ESR causes the internal state to be updated in the register,
1134 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1135 */
1136 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1137 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1138 return VINF_SUCCESS;
1139}
1140
1141
1142/**
1143 * Updates the Processor Priority Register (PPR).
1144 *
1145 * @param pVCpu The cross context virtual CPU structure.
1146 */
1147static void apicUpdatePpr(PVMCPUCC pVCpu)
1148{
1149 VMCPU_ASSERT_EMT(pVCpu);
1150
1151 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1152 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1153 uint8_t const uIsrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
1154 uint8_t uPpr;
1155 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1156 uPpr = pXApicPage->tpr.u8Tpr;
1157 else
1158 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1159 pXApicPage->ppr.u8Ppr = uPpr;
1160}
1161
1162
1163/**
1164 * Gets the Processor Priority Register (PPR).
1165 *
1166 * @returns The PPR value.
1167 * @param pVCpu The cross context virtual CPU structure.
1168 */
1169static uint8_t apicGetPpr(PVMCPUCC pVCpu)
1170{
1171 VMCPU_ASSERT_EMT(pVCpu);
1172 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprRead);
1173
1174 /*
1175 * With virtualized APIC registers or with TPR virtualization, the hardware may
1176 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1177 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1178 *
1179 * In all other instances, whenever the TPR or ISR changes, we need to update the PPR
1180 * as well (e.g. like we do manually in apicR3InitIpi and by calling apicUpdatePpr).
1181 */
1182 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1183 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1184 apicUpdatePpr(pVCpu);
1185 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1186 return pXApicPage->ppr.u8Ppr;
1187}
1188
1189
1190/**
1191 * Sets the Task Priority Register (TPR).
1192 *
1193 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1194 * @param pVCpu The cross context virtual CPU structure.
1195 * @param uTpr The TPR value.
1196 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1197 * this write.
1198 */
1199static int apicSetTprEx(PVMCPUCC pVCpu, uint32_t uTpr, bool fForceX2ApicBehaviour)
1200{
1201 VMCPU_ASSERT_EMT(pVCpu);
1202
1203 Log2(("APIC%u: apicSetTprEx: uTpr=%#RX32\n", pVCpu->idCpu, uTpr));
1204 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprWrite);
1205
1206 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1207 if ( fX2ApicMode
1208 && (uTpr & ~XAPIC_TPR_VALID))
1209 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1210
1211 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1212 pXApicPage->tpr.u8Tpr = uTpr;
1213 apicUpdatePpr(pVCpu);
1214 apicSignalNextPendingIntr(pVCpu);
1215 return VINF_SUCCESS;
1216}
1217
1218
1219/**
1220 * Sets the End-Of-Interrupt (EOI) register.
1221 *
1222 * @returns Strict VBox status code.
1223 * @param pVCpu The cross context virtual CPU structure.
1224 * @param uEoi The EOI value.
1225 * @param rcBusy The busy return code when the write cannot
1226 * be completed successfully in this context.
1227 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1228 * this write.
1229 */
1230static VBOXSTRICTRC apicSetEoi(PVMCPUCC pVCpu, uint32_t uEoi, int rcBusy, bool fForceX2ApicBehaviour)
1231{
1232 VMCPU_ASSERT_EMT(pVCpu);
1233
1234 Log2(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1235 STAM_COUNTER_INC(&pVCpu->apic.s.StatEoiWrite);
1236
1237 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1238 if ( fX2ApicMode
1239 && (uEoi & ~XAPIC_EOI_WO_VALID))
1240 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1241
1242 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1243 int isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, -1 /* rcNotFound */);
1244 if (isrv >= 0)
1245 {
1246 /*
1247 * Broadcast the EOI to the I/O APIC(s).
1248 *
1249 * We'll handle the EOI broadcast first as there is tiny chance we get rescheduled to
1250 * ring-3 due to contention on the I/O APIC lock. This way we don't mess with the rest
1251 * of the APIC state and simply restart the EOI write operation from ring-3.
1252 */
1253 Assert(isrv <= (int)UINT8_MAX);
1254 uint8_t const uVector = isrv;
1255 bool const fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1256 if (fLevelTriggered)
1257 {
1258 VBOXSTRICTRC rc = PDMIoApicBroadcastEoi(pVCpu->CTX_SUFF(pVM), uVector);
1259 if (rc == VINF_SUCCESS)
1260 { /* likely */ }
1261 else
1262 return rcBusy;
1263
1264 /*
1265 * Clear the vector from the TMR.
1266 *
1267 * The broadcast to I/O APIC can re-trigger new interrupts to arrive via the bus. However,
1268 * APICUpdatePendingInterrupts() which updates TMR can only be done from EMT which we
1269 * currently are on, so no possibility of concurrent updates.
1270 */
1271 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1272
1273 /*
1274 * Clear the remote IRR bit for level-triggered, fixed mode LINT0 interrupt.
1275 * The LINT1 pin does not support level-triggered interrupts.
1276 * See Intel spec. 10.5.1 "Local Vector Table".
1277 */
1278 uint32_t const uLvtLint0 = pXApicPage->lvt_lint0.all.u32LvtLint0;
1279 if ( XAPIC_LVT_GET_REMOTE_IRR(uLvtLint0)
1280 && XAPIC_LVT_GET_VECTOR(uLvtLint0) == uVector
1281 && XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint0) == XAPICDELIVERYMODE_FIXED)
1282 {
1283 ASMAtomicAndU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, ~XAPIC_LVT_REMOTE_IRR);
1284 Log2(("APIC%u: apicSetEoi: Cleared remote-IRR for LINT0. uVector=%#x\n", pVCpu->idCpu, uVector));
1285 }
1286
1287 Log2(("APIC%u: apicSetEoi: Cleared level triggered interrupt from TMR. uVector=%#x\n", pVCpu->idCpu, uVector));
1288 }
1289
1290 /*
1291 * Mark interrupt as serviced, update the PPR and signal pending interrupts.
1292 */
1293 Log2(("APIC%u: apicSetEoi: Clearing interrupt from ISR. uVector=%#x\n", pVCpu->idCpu, uVector));
1294 apicClearVectorInReg(&pXApicPage->isr, uVector);
1295 apicUpdatePpr(pVCpu);
1296 apicSignalNextPendingIntr(pVCpu);
1297 }
1298 else
1299 {
1300#ifdef DEBUG_ramshankar
1301 /** @todo Figure out if this is done intentionally by guests or is a bug
1302 * in our emulation. Happened with Win10 SMP VM during reboot after
1303 * installation of guest additions with 3D support. */
1304 AssertMsgFailed(("APIC%u: apicSetEoi: Failed to find any ISR bit\n", pVCpu->idCpu));
1305#endif
1306 }
1307
1308 return VINF_SUCCESS;
1309}
1310
1311
1312/**
1313 * Sets the Logical Destination Register (LDR).
1314 *
1315 * @returns Strict VBox status code.
1316 * @param pVCpu The cross context virtual CPU structure.
1317 * @param uLdr The LDR value.
1318 *
1319 * @remarks LDR is read-only in x2APIC mode.
1320 */
1321static VBOXSTRICTRC apicSetLdr(PVMCPUCC pVCpu, uint32_t uLdr)
1322{
1323 VMCPU_ASSERT_EMT(pVCpu);
1324 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1325 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu) || pApic->fHyperVCompatMode); RT_NOREF_PV(pApic);
1326
1327 Log2(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1328
1329 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1330 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR_VALID);
1331 return VINF_SUCCESS;
1332}
1333
1334
1335/**
1336 * Sets the Destination Format Register (DFR).
1337 *
1338 * @returns Strict VBox status code.
1339 * @param pVCpu The cross context virtual CPU structure.
1340 * @param uDfr The DFR value.
1341 *
1342 * @remarks DFR is not available in x2APIC mode.
1343 */
1344static VBOXSTRICTRC apicSetDfr(PVMCPUCC pVCpu, uint32_t uDfr)
1345{
1346 VMCPU_ASSERT_EMT(pVCpu);
1347 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1348
1349 uDfr &= XAPIC_DFR_VALID;
1350 uDfr |= XAPIC_DFR_RSVD_MB1;
1351
1352 Log2(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1353
1354 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1355 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1356 return VINF_SUCCESS;
1357}
1358
1359
1360/**
1361 * Sets the Timer Divide Configuration Register (DCR).
1362 *
1363 * @returns Strict VBox status code.
1364 * @param pVCpu The cross context virtual CPU structure.
1365 * @param uTimerDcr The timer DCR value.
1366 */
1367static VBOXSTRICTRC apicSetTimerDcr(PVMCPUCC pVCpu, uint32_t uTimerDcr)
1368{
1369 VMCPU_ASSERT_EMT(pVCpu);
1370 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1371 && (uTimerDcr & ~XAPIC_TIMER_DCR_VALID))
1372 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1373
1374 Log2(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1375
1376 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1377 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1378 return VINF_SUCCESS;
1379}
1380
1381
1382/**
1383 * Gets the timer's Current Count Register (CCR).
1384 *
1385 * @returns VBox status code.
1386 * @param pDevIns The device instance.
1387 * @param pVCpu The cross context virtual CPU structure.
1388 * @param rcBusy The busy return code for the timer critical section.
1389 * @param puValue Where to store the LVT timer CCR.
1390 */
1391static VBOXSTRICTRC apicGetTimerCcr(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, int rcBusy, uint32_t *puValue)
1392{
1393 VMCPU_ASSERT_EMT(pVCpu);
1394 Assert(puValue);
1395
1396 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1397 *puValue = 0;
1398
1399 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1400 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1401 return VINF_SUCCESS;
1402
1403 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1404 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1405 if (!uInitialCount)
1406 return VINF_SUCCESS;
1407
1408 /*
1409 * Reading the virtual-sync clock requires locking its timer because it's not
1410 * a simple atomic operation, see tmVirtualSyncGetEx().
1411 *
1412 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1413 */
1414 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1415 TMTIMERHANDLE hTimer = pApicCpu->hTimer;
1416
1417 VBOXSTRICTRC rc = PDMDevHlpTimerLockClock(pDevIns, hTimer, rcBusy);
1418 if (rc == VINF_SUCCESS)
1419 {
1420 /* If the current-count register is 0, it implies the timer expired. */
1421 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1422 if (uCurrentCount)
1423 {
1424 uint64_t const cTicksElapsed = PDMDevHlpTimerGet(pDevIns, hTimer) - pApicCpu->u64TimerInitial;
1425 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1426 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1427 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1428 if (uInitialCount > uDelta)
1429 *puValue = uInitialCount - uDelta;
1430 }
1431 else
1432 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1433 }
1434 return rc;
1435}
1436
1437
1438/**
1439 * Sets the timer's Initial-Count Register (ICR).
1440 *
1441 * @returns Strict VBox status code.
1442 * @param pDevIns The device instance.
1443 * @param pVCpu The cross context virtual CPU structure.
1444 * @param rcBusy The busy return code for the timer critical section.
1445 * @param uInitialCount The timer ICR.
1446 */
1447static VBOXSTRICTRC apicSetTimerIcr(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, int rcBusy, uint32_t uInitialCount)
1448{
1449 VMCPU_ASSERT_EMT(pVCpu);
1450
1451 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1452 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1453 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1454
1455 Log2(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1456 STAM_COUNTER_INC(&pApicCpu->StatTimerIcrWrite);
1457
1458 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1459 if ( pApic->fSupportsTscDeadline
1460 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1461 return VINF_SUCCESS;
1462
1463 /*
1464 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1465 * so obtain the lock -before- updating it here to be consistent with the
1466 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1467 */
1468 TMTIMERHANDLE hTimer = pApicCpu->hTimer;
1469 VBOXSTRICTRC rc = PDMDevHlpTimerLockClock(pDevIns, hTimer, rcBusy);
1470 if (rc == VINF_SUCCESS)
1471 {
1472 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1473 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1474 if (uInitialCount)
1475 apicStartTimer(pVCpu, uInitialCount);
1476 else
1477 apicStopTimer(pVCpu);
1478 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1479 }
1480 return rc;
1481}
1482
1483
1484/**
1485 * Sets an LVT entry.
1486 *
1487 * @returns Strict VBox status code.
1488 * @param pVCpu The cross context virtual CPU structure.
1489 * @param offLvt The LVT entry offset in the xAPIC page.
1490 * @param uLvt The LVT value to set.
1491 */
1492static VBOXSTRICTRC apicSetLvtEntry(PVMCPUCC pVCpu, uint16_t offLvt, uint32_t uLvt)
1493{
1494 VMCPU_ASSERT_EMT(pVCpu);
1495
1496#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1497 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1498 || offLvt == XAPIC_OFF_LVT_THERMAL
1499 || offLvt == XAPIC_OFF_LVT_PERF
1500 || offLvt == XAPIC_OFF_LVT_LINT0
1501 || offLvt == XAPIC_OFF_LVT_LINT1
1502 || offLvt == XAPIC_OFF_LVT_ERROR,
1503 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1504
1505 /*
1506 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1507 * and raise #GP(0) in x2APIC mode.
1508 */
1509 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1510 if (offLvt == XAPIC_OFF_LVT_TIMER)
1511 {
1512 if ( !pApic->fSupportsTscDeadline
1513 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1514 {
1515 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1516 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1517 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1518 /** @todo TSC-deadline timer mode transition */
1519 }
1520 }
1521
1522 /*
1523 * Validate rest of the LVT bits.
1524 */
1525 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1526 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1527
1528 /*
1529 * For x2APIC, disallow setting of invalid/reserved bits.
1530 * For xAPIC, mask out invalid/reserved bits (i.e. ignore them).
1531 */
1532 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1533 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1534 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1535
1536 uLvt &= g_au32LvtValidMasks[idxLvt];
1537
1538 /*
1539 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1540 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1541 */
1542 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1543 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1544 uLvt |= XAPIC_LVT_MASK;
1545
1546 /*
1547 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1548 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signalling the
1549 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1550 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1551 * the interrupt for the vector happens to be generated, see apicPostInterrupt().
1552 *
1553 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1554 */
1555 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1556 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1557 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1558
1559 Log2(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1560
1561 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1562 return VINF_SUCCESS;
1563#else
1564# error "Implement Pentium and P6 family APIC architectures"
1565#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1566}
1567
1568
1569#if 0
1570/**
1571 * Sets an LVT entry in the extended LVT range.
1572 *
1573 * @returns VBox status code.
1574 * @param pVCpu The cross context virtual CPU structure.
1575 * @param offLvt The LVT entry offset in the xAPIC page.
1576 * @param uValue The LVT value to set.
1577 */
1578static int apicSetLvtExtEntry(PVMCPUCC pVCpu, uint16_t offLvt, uint32_t uLvt)
1579{
1580 VMCPU_ASSERT_EMT(pVCpu);
1581 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1582
1583 /** @todo support CMCI. */
1584 return VERR_NOT_IMPLEMENTED;
1585}
1586#endif
1587
1588
1589/**
1590 * Hints TM about the APIC timer frequency.
1591 *
1592 * @param pDevIns The device instance.
1593 * @param pApicCpu The APIC CPU state.
1594 * @param uInitialCount The new initial count.
1595 * @param uTimerShift The new timer shift.
1596 * @thread Any.
1597 */
1598void apicHintTimerFreq(PPDMDEVINS pDevIns, PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1599{
1600 Assert(pApicCpu);
1601
1602 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1603 || pApicCpu->uHintedTimerShift != uTimerShift)
1604 {
1605 uint32_t uHz;
1606 if (uInitialCount)
1607 {
1608 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1609 uHz = PDMDevHlpTimerGetFreq(pDevIns, pApicCpu->hTimer) / cTicksPerPeriod;
1610 }
1611 else
1612 uHz = 0;
1613
1614 PDMDevHlpTimerSetFrequencyHint(pDevIns, pApicCpu->hTimer, uHz);
1615 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1616 pApicCpu->uHintedTimerShift = uTimerShift;
1617 }
1618}
1619
1620
1621/**
1622 * Gets the Interrupt Command Register (ICR), without performing any interface
1623 * checks.
1624 *
1625 * @returns The ICR value.
1626 * @param pVCpu The cross context virtual CPU structure.
1627 */
1628DECLINLINE(uint64_t) apicGetIcrNoCheck(PVMCPUCC pVCpu)
1629{
1630 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1631 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1632 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1633 uint64_t const uIcr = RT_MAKE_U64(uLo, uHi);
1634 return uIcr;
1635}
1636
1637
1638/**
1639 * Reads an APIC register.
1640 *
1641 * @returns VBox status code.
1642 * @param pDevIns The device instance.
1643 * @param pVCpu The cross context virtual CPU structure.
1644 * @param offReg The offset of the register being read.
1645 * @param puValue Where to store the register value.
1646 */
1647DECLINLINE(VBOXSTRICTRC) apicReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
1648{
1649 VMCPU_ASSERT_EMT(pVCpu);
1650 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1651
1652 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1653 uint32_t uValue = 0;
1654 VBOXSTRICTRC rc = VINF_SUCCESS;
1655 switch (offReg)
1656 {
1657 case XAPIC_OFF_ID:
1658 case XAPIC_OFF_VERSION:
1659 case XAPIC_OFF_TPR:
1660 case XAPIC_OFF_EOI:
1661 case XAPIC_OFF_RRD:
1662 case XAPIC_OFF_LDR:
1663 case XAPIC_OFF_DFR:
1664 case XAPIC_OFF_SVR:
1665 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1666 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1667 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1668 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1669 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1670 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1671 case XAPIC_OFF_ESR:
1672 case XAPIC_OFF_ICR_LO:
1673 case XAPIC_OFF_ICR_HI:
1674 case XAPIC_OFF_LVT_TIMER:
1675#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1676 case XAPIC_OFF_LVT_THERMAL:
1677#endif
1678 case XAPIC_OFF_LVT_PERF:
1679 case XAPIC_OFF_LVT_LINT0:
1680 case XAPIC_OFF_LVT_LINT1:
1681 case XAPIC_OFF_LVT_ERROR:
1682 case XAPIC_OFF_TIMER_ICR:
1683 case XAPIC_OFF_TIMER_DCR:
1684 {
1685 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1686 || ( offReg != XAPIC_OFF_DFR
1687 && offReg != XAPIC_OFF_ICR_HI
1688 && offReg != XAPIC_OFF_EOI));
1689 uValue = apicReadRaw32(pXApicPage, offReg);
1690 Log2(("APIC%u: apicReadRegister: offReg=%#x uValue=%#x\n", pVCpu->idCpu, offReg, uValue));
1691 break;
1692 }
1693
1694 case XAPIC_OFF_PPR:
1695 {
1696 uValue = apicGetPpr(pVCpu);
1697 break;
1698 }
1699
1700 case XAPIC_OFF_TIMER_CCR:
1701 {
1702 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1703 rc = apicGetTimerCcr(pDevIns, pVCpu, VINF_IOM_R3_MMIO_READ, &uValue);
1704 break;
1705 }
1706
1707 case XAPIC_OFF_APR:
1708 {
1709#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1710 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1711 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1712#else
1713# error "Implement Pentium and P6 family APIC architectures"
1714#endif
1715 break;
1716 }
1717
1718 default:
1719 {
1720 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1721 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu, offReg);
1722 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1723 break;
1724 }
1725 }
1726
1727 *puValue = uValue;
1728 return rc;
1729}
1730
1731
1732/**
1733 * Writes an APIC register.
1734 *
1735 * @returns Strict VBox status code.
1736 * @param pDevIns The device instance.
1737 * @param pVCpu The cross context virtual CPU structure.
1738 * @param offReg The offset of the register being written.
1739 * @param uValue The register value.
1740 */
1741DECLINLINE(VBOXSTRICTRC) apicWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
1742{
1743 VMCPU_ASSERT_EMT(pVCpu);
1744 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1745 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1746
1747 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1748 switch (offReg)
1749 {
1750 case XAPIC_OFF_TPR:
1751 {
1752 rcStrict = apicSetTprEx(pVCpu, uValue, false /* fForceX2ApicBehaviour */);
1753 break;
1754 }
1755
1756 case XAPIC_OFF_LVT_TIMER:
1757#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1758 case XAPIC_OFF_LVT_THERMAL:
1759#endif
1760 case XAPIC_OFF_LVT_PERF:
1761 case XAPIC_OFF_LVT_LINT0:
1762 case XAPIC_OFF_LVT_LINT1:
1763 case XAPIC_OFF_LVT_ERROR:
1764 {
1765 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1766 break;
1767 }
1768
1769 case XAPIC_OFF_TIMER_ICR:
1770 {
1771 rcStrict = apicSetTimerIcr(pDevIns, pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1772 break;
1773 }
1774
1775 case XAPIC_OFF_EOI:
1776 {
1777 rcStrict = apicSetEoi(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, false /* fForceX2ApicBehaviour */);
1778 break;
1779 }
1780
1781 case XAPIC_OFF_LDR:
1782 {
1783 rcStrict = apicSetLdr(pVCpu, uValue);
1784 break;
1785 }
1786
1787 case XAPIC_OFF_DFR:
1788 {
1789 rcStrict = apicSetDfr(pVCpu, uValue);
1790 break;
1791 }
1792
1793 case XAPIC_OFF_SVR:
1794 {
1795 rcStrict = apicSetSvr(pVCpu, uValue);
1796 break;
1797 }
1798
1799 case XAPIC_OFF_ICR_LO:
1800 {
1801 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, true /* fUpdateStat */);
1802 break;
1803 }
1804
1805 case XAPIC_OFF_ICR_HI:
1806 {
1807 rcStrict = apicSetIcrHi(pVCpu, uValue);
1808 break;
1809 }
1810
1811 case XAPIC_OFF_TIMER_DCR:
1812 {
1813 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1814 break;
1815 }
1816
1817 case XAPIC_OFF_ESR:
1818 {
1819 rcStrict = apicSetEsr(pVCpu, uValue);
1820 break;
1821 }
1822
1823 case XAPIC_OFF_APR:
1824 case XAPIC_OFF_RRD:
1825 {
1826#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1827 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1828#else
1829# error "Implement Pentium and P6 family APIC architectures"
1830#endif
1831 break;
1832 }
1833
1834 /* Read-only, write ignored: */
1835 case XAPIC_OFF_VERSION:
1836 case XAPIC_OFF_ID:
1837 break;
1838
1839 /* Unavailable/reserved in xAPIC mode: */
1840 case X2APIC_OFF_SELF_IPI:
1841 /* Read-only registers: */
1842 case XAPIC_OFF_PPR:
1843 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1844 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1845 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1846 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1847 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1848 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1849 case XAPIC_OFF_TIMER_CCR:
1850 default:
1851 {
1852 rcStrict = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu, offReg);
1853 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1854 break;
1855 }
1856 }
1857
1858 return rcStrict;
1859}
1860
1861
1862/**
1863 * Reads an APIC MSR.
1864 *
1865 * @returns Strict VBox status code.
1866 * @param pVCpu The cross context virtual CPU structure.
1867 * @param u32Reg The MSR being read.
1868 * @param pu64Value Where to store the read value.
1869 */
1870VMM_INT_DECL(VBOXSTRICTRC) APICReadMsr(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1871{
1872 /*
1873 * Validate.
1874 */
1875 VMCPU_ASSERT_EMT(pVCpu);
1876 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1877 Assert(pu64Value);
1878
1879 /*
1880 * Is the APIC enabled?
1881 */
1882 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1883 if (APICIsEnabled(pVCpu))
1884 { /* likely */ }
1885 else
1886 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
1887 APICMSRACCESS_READ_DISALLOWED_CONFIG : APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1888
1889#ifndef IN_RING3
1890 if (pApic->CTXALLMID(f,Enabled))
1891 { /* likely */}
1892 else
1893 return VINF_CPUM_R3_MSR_READ;
1894#endif
1895
1896 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrRead));
1897
1898 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1899 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
1900 || pApic->fHyperVCompatMode))
1901 {
1902 switch (u32Reg)
1903 {
1904 /* Special handling for x2APIC: */
1905 case MSR_IA32_X2APIC_ICR:
1906 {
1907 *pu64Value = apicGetIcrNoCheck(pVCpu);
1908 break;
1909 }
1910
1911 /* Special handling, compatible with xAPIC: */
1912 case MSR_IA32_X2APIC_TIMER_CCR:
1913 {
1914 uint32_t uValue;
1915 rcStrict = apicGetTimerCcr(VMCPU_TO_DEVINS(pVCpu), pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1916 *pu64Value = uValue;
1917 break;
1918 }
1919
1920 /* Special handling, compatible with xAPIC: */
1921 case MSR_IA32_X2APIC_PPR:
1922 {
1923 *pu64Value = apicGetPpr(pVCpu);
1924 break;
1925 }
1926
1927 /* Raw read, compatible with xAPIC: */
1928 case MSR_IA32_X2APIC_ID:
1929 {
1930 STAM_COUNTER_INC(&pVCpu->apic.s.StatIdMsrRead);
1931 RT_FALL_THRU();
1932 }
1933 case MSR_IA32_X2APIC_VERSION:
1934 case MSR_IA32_X2APIC_TPR:
1935 case MSR_IA32_X2APIC_LDR:
1936 case MSR_IA32_X2APIC_SVR:
1937 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1938 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1939 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1940 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1941 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1942 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1943 case MSR_IA32_X2APIC_ESR:
1944 case MSR_IA32_X2APIC_LVT_TIMER:
1945 case MSR_IA32_X2APIC_LVT_THERMAL:
1946 case MSR_IA32_X2APIC_LVT_PERF:
1947 case MSR_IA32_X2APIC_LVT_LINT0:
1948 case MSR_IA32_X2APIC_LVT_LINT1:
1949 case MSR_IA32_X2APIC_LVT_ERROR:
1950 case MSR_IA32_X2APIC_TIMER_ICR:
1951 case MSR_IA32_X2APIC_TIMER_DCR:
1952 {
1953 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1954 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1955 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1956 break;
1957 }
1958
1959 /* Write-only MSRs: */
1960 case MSR_IA32_X2APIC_SELF_IPI:
1961 case MSR_IA32_X2APIC_EOI:
1962 {
1963 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1964 break;
1965 }
1966
1967 /*
1968 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to read the "high"
1969 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
1970 * index (0x80E), see @bugref{8382#c175}.
1971 */
1972 case MSR_IA32_X2APIC_LDR + 1:
1973 {
1974 if (pApic->fHyperVCompatMode)
1975 *pu64Value = 0;
1976 else
1977 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1978 break;
1979 }
1980
1981 /* Reserved MSRs: */
1982 case MSR_IA32_X2APIC_LVT_CMCI:
1983 default:
1984 {
1985 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1986 break;
1987 }
1988 }
1989 }
1990 else
1991 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1992
1993 return rcStrict;
1994}
1995
1996
1997/**
1998 * Writes an APIC MSR.
1999 *
2000 * @returns Strict VBox status code.
2001 * @param pVCpu The cross context virtual CPU structure.
2002 * @param u32Reg The MSR being written.
2003 * @param u64Value The value to write.
2004 */
2005VMM_INT_DECL(VBOXSTRICTRC) APICWriteMsr(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
2006{
2007 /*
2008 * Validate.
2009 */
2010 VMCPU_ASSERT_EMT(pVCpu);
2011 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
2012
2013 /*
2014 * Is the APIC enabled?
2015 */
2016 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2017 if (APICIsEnabled(pVCpu))
2018 { /* likely */ }
2019 else
2020 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
2021 APICMSRACCESS_WRITE_DISALLOWED_CONFIG : APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2022
2023#ifndef IN_RING3
2024 if (pApic->CTXALLMID(f,Enabled))
2025 { /* likely */ }
2026 else
2027 return VINF_CPUM_R3_MSR_WRITE;
2028#endif
2029
2030 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrWrite));
2031
2032 /*
2033 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
2034 * accesses where they are ignored. Hence, we need to validate each register before
2035 * invoking the generic/xAPIC write functions.
2036 *
2037 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
2038 * case first and handle validating the remaining bits on a per-register basis.
2039 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
2040 */
2041 if ( u32Reg != MSR_IA32_X2APIC_ICR
2042 && RT_HI_U32(u64Value))
2043 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
2044
2045 uint32_t u32Value = RT_LO_U32(u64Value);
2046 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2047 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
2048 || pApic->fHyperVCompatMode))
2049 {
2050 switch (u32Reg)
2051 {
2052 case MSR_IA32_X2APIC_TPR:
2053 {
2054 rcStrict = apicSetTprEx(pVCpu, u32Value, false /* fForceX2ApicBehaviour */);
2055 break;
2056 }
2057
2058 case MSR_IA32_X2APIC_ICR:
2059 {
2060 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
2061 break;
2062 }
2063
2064 case MSR_IA32_X2APIC_SVR:
2065 {
2066 rcStrict = apicSetSvr(pVCpu, u32Value);
2067 break;
2068 }
2069
2070 case MSR_IA32_X2APIC_ESR:
2071 {
2072 rcStrict = apicSetEsr(pVCpu, u32Value);
2073 break;
2074 }
2075
2076 case MSR_IA32_X2APIC_TIMER_DCR:
2077 {
2078 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
2079 break;
2080 }
2081
2082 case MSR_IA32_X2APIC_LVT_TIMER:
2083 case MSR_IA32_X2APIC_LVT_THERMAL:
2084 case MSR_IA32_X2APIC_LVT_PERF:
2085 case MSR_IA32_X2APIC_LVT_LINT0:
2086 case MSR_IA32_X2APIC_LVT_LINT1:
2087 case MSR_IA32_X2APIC_LVT_ERROR:
2088 {
2089 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
2090 break;
2091 }
2092
2093 case MSR_IA32_X2APIC_TIMER_ICR:
2094 {
2095 rcStrict = apicSetTimerIcr(VMCPU_TO_DEVINS(pVCpu), pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
2096 break;
2097 }
2098
2099 /* Write-only MSRs: */
2100 case MSR_IA32_X2APIC_SELF_IPI:
2101 {
2102 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
2103 apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE, 0 /* uSrcTag */);
2104 rcStrict = VINF_SUCCESS;
2105 break;
2106 }
2107
2108 case MSR_IA32_X2APIC_EOI:
2109 {
2110 rcStrict = apicSetEoi(pVCpu, u32Value, VINF_CPUM_R3_MSR_WRITE, false /* fForceX2ApicBehaviour */);
2111 break;
2112 }
2113
2114 /*
2115 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to write the "high"
2116 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
2117 * index (0x80E). The write value was 0xffffffff on a Windows 8.1 64-bit guest. We can
2118 * safely ignore this nonsense, See @bugref{8382#c7}.
2119 */
2120 case MSR_IA32_X2APIC_LDR + 1:
2121 {
2122 if (pApic->fHyperVCompatMode)
2123 rcStrict = VINF_SUCCESS;
2124 else
2125 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2126 break;
2127 }
2128
2129 /* Special-treament (read-only normally, but not with Hyper-V) */
2130 case MSR_IA32_X2APIC_LDR:
2131 {
2132 if (pApic->fHyperVCompatMode)
2133 {
2134 rcStrict = apicSetLdr(pVCpu, u32Value);
2135 break;
2136 }
2137 }
2138 RT_FALL_THRU();
2139 /* Read-only MSRs: */
2140 case MSR_IA32_X2APIC_ID:
2141 case MSR_IA32_X2APIC_VERSION:
2142 case MSR_IA32_X2APIC_PPR:
2143 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
2144 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
2145 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
2146 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
2147 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
2148 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
2149 case MSR_IA32_X2APIC_TIMER_CCR:
2150 {
2151 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
2152 break;
2153 }
2154
2155 /* Reserved MSRs: */
2156 case MSR_IA32_X2APIC_LVT_CMCI:
2157 default:
2158 {
2159 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2160 break;
2161 }
2162 }
2163 }
2164 else
2165 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
2166
2167 return rcStrict;
2168}
2169
2170
2171/**
2172 * Resets the APIC base MSR.
2173 *
2174 * @param pVCpu The cross context virtual CPU structure.
2175 */
2176static void apicResetBaseMsr(PVMCPUCC pVCpu)
2177{
2178 /*
2179 * Initialize the APIC base MSR. The APIC enable-bit is set upon power-up or reset[1].
2180 *
2181 * A Reset (in xAPIC and x2APIC mode) brings up the local APIC in xAPIC mode.
2182 * An INIT IPI does -not- cause a transition between xAPIC and x2APIC mode[2].
2183 *
2184 * [1] See AMD spec. 14.1.3 "Processor Initialization State"
2185 * [2] See Intel spec. 10.12.5.1 "x2APIC States".
2186 */
2187 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2188
2189 /* Construct. */
2190 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2191 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2192 uint64_t uApicBaseMsr = MSR_IA32_APICBASE_ADDR;
2193 if (pVCpu->idCpu == 0)
2194 uApicBaseMsr |= MSR_IA32_APICBASE_BSP;
2195
2196 /* If the VM was configured with no APIC, don't enable xAPIC mode, obviously. */
2197 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2198 {
2199 uApicBaseMsr |= MSR_IA32_APICBASE_EN;
2200
2201 /*
2202 * While coming out of a reset the APIC is enabled and in xAPIC mode. If software had previously
2203 * disabled the APIC (which results in the CPUID bit being cleared as well) we re-enable it here.
2204 * See Intel spec. 10.12.5.1 "x2APIC States".
2205 */
2206 if (CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/) == false)
2207 LogRel(("APIC%u: Resetting mode to xAPIC\n", pVCpu->idCpu));
2208 }
2209
2210 /* Commit. */
2211 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uApicBaseMsr);
2212}
2213
2214
2215/**
2216 * Initializes per-VCPU APIC to the state following an INIT reset
2217 * ("Wait-for-SIPI" state).
2218 *
2219 * @param pVCpu The cross context virtual CPU structure.
2220 */
2221void apicInitIpi(PVMCPUCC pVCpu)
2222{
2223 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2224 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2225
2226 /*
2227 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset (Wait-for-SIPI State)"
2228 * and AMD spec 16.3.2 "APIC Registers".
2229 *
2230 * The reason we don't simply zero out the entire APIC page and only set the non-zero members
2231 * is because there are some registers that are not touched by the INIT IPI (e.g. version)
2232 * operation and this function is only a subset of the reset operation.
2233 */
2234 RT_ZERO(pXApicPage->irr);
2235 RT_ZERO(pXApicPage->irr);
2236 RT_ZERO(pXApicPage->isr);
2237 RT_ZERO(pXApicPage->tmr);
2238 RT_ZERO(pXApicPage->icr_hi);
2239 RT_ZERO(pXApicPage->icr_lo);
2240 RT_ZERO(pXApicPage->ldr);
2241 RT_ZERO(pXApicPage->tpr);
2242 RT_ZERO(pXApicPage->ppr);
2243 RT_ZERO(pXApicPage->timer_icr);
2244 RT_ZERO(pXApicPage->timer_ccr);
2245 RT_ZERO(pXApicPage->timer_dcr);
2246
2247 pXApicPage->dfr.u.u4Model = XAPICDESTFORMAT_FLAT;
2248 pXApicPage->dfr.u.u28ReservedMb1 = UINT32_C(0xfffffff);
2249
2250 /** @todo CMCI. */
2251
2252 RT_ZERO(pXApicPage->lvt_timer);
2253 pXApicPage->lvt_timer.u.u1Mask = 1;
2254
2255#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2256 RT_ZERO(pXApicPage->lvt_thermal);
2257 pXApicPage->lvt_thermal.u.u1Mask = 1;
2258#endif
2259
2260 RT_ZERO(pXApicPage->lvt_perf);
2261 pXApicPage->lvt_perf.u.u1Mask = 1;
2262
2263 RT_ZERO(pXApicPage->lvt_lint0);
2264 pXApicPage->lvt_lint0.u.u1Mask = 1;
2265
2266 RT_ZERO(pXApicPage->lvt_lint1);
2267 pXApicPage->lvt_lint1.u.u1Mask = 1;
2268
2269 RT_ZERO(pXApicPage->lvt_error);
2270 pXApicPage->lvt_error.u.u1Mask = 1;
2271
2272 RT_ZERO(pXApicPage->svr);
2273 pXApicPage->svr.u.u8SpuriousVector = 0xff;
2274
2275 /* The self-IPI register is reset to 0. See Intel spec. 10.12.5.1 "x2APIC States" */
2276 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2277 RT_ZERO(pX2ApicPage->self_ipi);
2278
2279 /* Clear the pending-interrupt bitmaps. */
2280 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2281 RT_BZERO(&pApicCpu->ApicPibLevel, sizeof(APICPIB));
2282 RT_BZERO(pApicCpu->CTX_SUFF(pvApicPib), sizeof(APICPIB));
2283
2284 /* Clear the interrupt line states for LINT0 and LINT1 pins. */
2285 pApicCpu->fActiveLint0 = false;
2286 pApicCpu->fActiveLint1 = false;
2287}
2288
2289
2290/**
2291 * Initializes per-VCPU APIC to the state following a power-up or hardware
2292 * reset.
2293 *
2294 * @param pVCpu The cross context virtual CPU structure.
2295 * @param fResetApicBaseMsr Whether to reset the APIC base MSR.
2296 */
2297void apicResetCpu(PVMCPUCC pVCpu, bool fResetApicBaseMsr)
2298{
2299 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2300
2301 LogFlow(("APIC%u: apicR3ResetCpu: fResetApicBaseMsr=%RTbool\n", pVCpu->idCpu, fResetApicBaseMsr));
2302
2303#ifdef VBOX_STRICT
2304 /* Verify that the initial APIC ID reported via CPUID matches our VMCPU ID assumption. */
2305 uint32_t uEax, uEbx, uEcx, uEdx;
2306 uEax = uEbx = uEcx = uEdx = UINT32_MAX;
2307 CPUMGetGuestCpuId(pVCpu, 1, 0, &uEax, &uEbx, &uEcx, &uEdx);
2308 Assert(((uEbx >> 24) & 0xff) == pVCpu->idCpu);
2309#endif
2310
2311 /*
2312 * The state following a power-up or reset is a superset of the INIT state.
2313 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset ('Wait-for-SIPI' State)"
2314 */
2315 apicInitIpi(pVCpu);
2316
2317 /*
2318 * The APIC version register is read-only, so just initialize it here.
2319 * It is not clear from the specs, where exactly it is initialized.
2320 * The version determines the number of LVT entries and size of the APIC ID (8 bits for P4).
2321 */
2322 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2323#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2324 pXApicPage->version.u.u8MaxLvtEntry = XAPIC_MAX_LVT_ENTRIES_P4 - 1;
2325 pXApicPage->version.u.u8Version = XAPIC_HARDWARE_VERSION_P4;
2326 AssertCompile(sizeof(pXApicPage->id.u8ApicId) >= XAPIC_APIC_ID_BIT_COUNT_P4 / 8);
2327#else
2328# error "Implement Pentium and P6 family APIC architectures"
2329#endif
2330
2331 /** @todo It isn't clear in the spec. where exactly the default base address
2332 * is (re)initialized, atm we do it here in Reset. */
2333 if (fResetApicBaseMsr)
2334 apicResetBaseMsr(pVCpu);
2335
2336 /*
2337 * Initialize the APIC ID register to xAPIC format.
2338 */
2339 ASMMemZero32(&pXApicPage->id, sizeof(pXApicPage->id));
2340 pXApicPage->id.u8ApicId = pVCpu->idCpu;
2341}
2342
2343
2344/**
2345 * Sets the APIC base MSR.
2346 *
2347 * @returns VBox status code - no informational ones, esp. not
2348 * VINF_CPUM_R3_MSR_WRITE. Only the following two:
2349 * @retval VINF_SUCCESS
2350 * @retval VERR_CPUM_RAISE_GP_0
2351 *
2352 * @param pVCpu The cross context virtual CPU structure.
2353 * @param u64BaseMsr The value to set.
2354 */
2355VMM_INT_DECL(int) APICSetBaseMsr(PVMCPUCC pVCpu, uint64_t u64BaseMsr)
2356{
2357 Assert(pVCpu);
2358
2359 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2360 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2361 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
2362 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
2363 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
2364
2365 Log2(("APIC%u: ApicSetBaseMsr: u64BaseMsr=%#RX64 enmNewMode=%s enmOldMode=%s\n", pVCpu->idCpu, u64BaseMsr,
2366 apicGetModeName(enmNewMode), apicGetModeName(enmOldMode)));
2367
2368 /*
2369 * We do not support re-mapping the APIC base address because:
2370 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
2371 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
2372 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
2373 * region remains mapped but doesn't belong to the called VCPU's APIC).
2374 */
2375 /** @todo Handle per-VCPU APIC base relocation. */
2376 if (MSR_IA32_APICBASE_GET_ADDR(uBaseMsr) != MSR_IA32_APICBASE_ADDR)
2377 {
2378 if (pVCpu->apic.s.cLogMaxSetApicBaseAddr++ < 5)
2379 LogRel(("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
2380 MSR_IA32_APICBASE_GET_ADDR(uBaseMsr)));
2381 return VERR_CPUM_RAISE_GP_0;
2382 }
2383
2384 /* Don't allow enabling xAPIC/x2APIC if the VM is configured with the APIC disabled. */
2385 if (pApic->enmMaxMode == PDMAPICMODE_NONE)
2386 {
2387 LogRel(("APIC%u: Disallowing APIC base MSR write as the VM is configured with APIC disabled!\n", pVCpu->idCpu));
2388 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_DISALLOWED_CONFIG);
2389 }
2390
2391 /*
2392 * Act on state transition.
2393 */
2394 if (enmNewMode != enmOldMode)
2395 {
2396 switch (enmNewMode)
2397 {
2398 case APICMODE_DISABLED:
2399 {
2400 /*
2401 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2402 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2403 *
2404 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2405 *
2406 * We'll also manually manage the APIC base MSR here. We want a single-point of commit
2407 * at the end of this function rather than updating it in apicR3ResetCpu. This means we also
2408 * need to update the CPUID leaf ourselves.
2409 */
2410 apicResetCpu(pVCpu, false /* fResetApicBaseMsr */);
2411 uBaseMsr &= ~(MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD);
2412 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, false /*fVisible*/);
2413 LogRel(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2414 break;
2415 }
2416
2417 case APICMODE_XAPIC:
2418 {
2419 if (enmOldMode != APICMODE_DISABLED)
2420 {
2421 LogRel(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2422 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2423 }
2424
2425 uBaseMsr |= MSR_IA32_APICBASE_EN;
2426 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/);
2427 LogRel(("APIC%u: Switched mode to xAPIC\n", pVCpu->idCpu));
2428 break;
2429 }
2430
2431 case APICMODE_X2APIC:
2432 {
2433 if (pApic->enmMaxMode != PDMAPICMODE_X2APIC)
2434 {
2435 LogRel(("APIC%u: Disallowing transition to x2APIC mode as the VM is configured with the x2APIC disabled!\n",
2436 pVCpu->idCpu));
2437 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2438 }
2439
2440 if (enmOldMode != APICMODE_XAPIC)
2441 {
2442 LogRel(("APIC%u: Can only transition to x2APIC state from xAPIC state\n", pVCpu->idCpu));
2443 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2444 }
2445
2446 uBaseMsr |= MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD;
2447
2448 /*
2449 * The APIC ID needs updating when entering x2APIC mode.
2450 * Software written APIC ID in xAPIC mode isn't preserved.
2451 * The APIC ID becomes read-only to software in x2APIC mode.
2452 *
2453 * See Intel spec. 10.12.5.1 "x2APIC States".
2454 */
2455 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2456 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2457 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2458
2459 /*
2460 * LDR initialization occurs when entering x2APIC mode.
2461 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2462 */
2463 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2464 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2465
2466 LogRel(("APIC%u: Switched mode to x2APIC\n", pVCpu->idCpu));
2467 break;
2468 }
2469
2470 case APICMODE_INVALID:
2471 default:
2472 {
2473 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2474 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2475 }
2476 }
2477 }
2478
2479 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2480 return VINF_SUCCESS;
2481}
2482
2483
2484/**
2485 * Gets the APIC base MSR (no checks are performed wrt APIC hardware or its
2486 * state).
2487 *
2488 * @returns The base MSR value.
2489 * @param pVCpu The cross context virtual CPU structure.
2490 */
2491VMM_INT_DECL(uint64_t) APICGetBaseMsrNoCheck(PCVMCPUCC pVCpu)
2492{
2493 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2494 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2495 return pApicCpu->uApicBaseMsr;
2496}
2497
2498
2499/**
2500 * Gets the APIC base MSR.
2501 *
2502 * @returns Strict VBox status code.
2503 * @param pVCpu The cross context virtual CPU structure.
2504 * @param pu64Value Where to store the MSR value.
2505 */
2506VMM_INT_DECL(VBOXSTRICTRC) APICGetBaseMsr(PVMCPUCC pVCpu, uint64_t *pu64Value)
2507{
2508 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2509
2510 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2511 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2512 {
2513 *pu64Value = APICGetBaseMsrNoCheck(pVCpu);
2514 return VINF_SUCCESS;
2515 }
2516
2517 if (pVCpu->apic.s.cLogMaxGetApicBaseAddr++ < 5)
2518 LogRel(("APIC%u: Reading APIC base MSR (%#x) when there is no APIC -> #GP(0)\n", pVCpu->idCpu, MSR_IA32_APICBASE));
2519 return VERR_CPUM_RAISE_GP_0;
2520}
2521
2522
2523/**
2524 * Sets the TPR (Task Priority Register).
2525 *
2526 * @retval VINF_SUCCESS
2527 * @retval VERR_CPUM_RAISE_GP_0
2528 * @retval VERR_PDM_NO_APIC_INSTANCE
2529 *
2530 * @param pVCpu The cross context virtual CPU structure.
2531 * @param u8Tpr The TPR value to set.
2532 */
2533VMMDECL(int) APICSetTpr(PVMCPUCC pVCpu, uint8_t u8Tpr)
2534{
2535 if (APICIsEnabled(pVCpu))
2536 return apicSetTprEx(pVCpu, u8Tpr, false /* fForceX2ApicBehaviour */);
2537 return VERR_PDM_NO_APIC_INSTANCE;
2538}
2539
2540
2541/**
2542 * Gets the highest priority pending interrupt.
2543 *
2544 * @returns true if any interrupt is pending, false otherwise.
2545 * @param pVCpu The cross context virtual CPU structure.
2546 * @param pu8PendingIntr Where to store the interrupt vector if the
2547 * interrupt is pending (optional, can be NULL).
2548 */
2549static bool apicGetHighestPendingInterrupt(PCVMCPUCC pVCpu, uint8_t *pu8PendingIntr)
2550{
2551 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2552 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2553 if (irrv >= 0)
2554 {
2555 Assert(irrv <= (int)UINT8_MAX);
2556 if (pu8PendingIntr)
2557 *pu8PendingIntr = (uint8_t)irrv;
2558 return true;
2559 }
2560 return false;
2561}
2562
2563
2564/**
2565 * Gets the APIC TPR (Task Priority Register).
2566 *
2567 * @returns VBox status code.
2568 * @param pVCpu The cross context virtual CPU structure.
2569 * @param pu8Tpr Where to store the TPR.
2570 * @param pfPending Where to store whether there is a pending interrupt
2571 * (optional, can be NULL).
2572 * @param pu8PendingIntr Where to store the highest-priority pending
2573 * interrupt (optional, can be NULL).
2574 */
2575VMMDECL(int) APICGetTpr(PCVMCPUCC pVCpu, uint8_t *pu8Tpr, bool *pfPending, uint8_t *pu8PendingIntr)
2576{
2577 VMCPU_ASSERT_EMT(pVCpu);
2578 if (APICIsEnabled(pVCpu))
2579 {
2580 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2581 if (pfPending)
2582 {
2583 /*
2584 * Just return whatever the highest pending interrupt is in the IRR.
2585 * The caller is responsible for figuring out if it's masked by the TPR etc.
2586 */
2587 *pfPending = apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2588 }
2589
2590 *pu8Tpr = pXApicPage->tpr.u8Tpr;
2591 return VINF_SUCCESS;
2592 }
2593
2594 *pu8Tpr = 0;
2595 return VERR_PDM_NO_APIC_INSTANCE;
2596}
2597
2598
2599/**
2600 * Gets the APIC timer frequency.
2601 *
2602 * @returns Strict VBox status code.
2603 * @param pVM The cross context VM structure.
2604 * @param pu64Value Where to store the timer frequency.
2605 */
2606VMM_INT_DECL(int) APICGetTimerFreq(PVMCC pVM, uint64_t *pu64Value)
2607{
2608 /*
2609 * Validate.
2610 */
2611 Assert(pVM);
2612 AssertPtrReturn(pu64Value, VERR_INVALID_PARAMETER);
2613
2614 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[0];
2615 if (APICIsEnabled(pVCpu))
2616 {
2617 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2618 *pu64Value = PDMDevHlpTimerGetFreq(VMCPU_TO_DEVINS(pVCpu), pApicCpu->hTimer);
2619 return VINF_SUCCESS;
2620 }
2621 return VERR_PDM_NO_APIC_INSTANCE;
2622}
2623
2624
2625/**
2626 * Delivers an interrupt message via the system bus.
2627 *
2628 * @returns VBox status code.
2629 * @param pVM The cross context VM structure.
2630 * @param uDest The destination mask.
2631 * @param uDestMode The destination mode.
2632 * @param uDeliveryMode The delivery mode.
2633 * @param uVector The interrupt vector.
2634 * @param uPolarity The interrupt line polarity.
2635 * @param uTriggerMode The trigger mode.
2636 * @param uSrcTag The interrupt source tag (debugging).
2637 */
2638VMM_INT_DECL(int) APICBusDeliver(PVMCC pVM, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2639 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uSrcTag)
2640{
2641 NOREF(uPolarity);
2642
2643 /*
2644 * If the APIC isn't enabled, do nothing and pretend success.
2645 */
2646 if (APICIsEnabled(pVM->CTX_SUFF(apCpus)[0]))
2647 { /* likely */ }
2648 else
2649 return VINF_SUCCESS;
2650
2651 /*
2652 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2653 * Hence, the broadcast mask is 0xff.
2654 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2655 */
2656 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2657 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2658 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2659 uint32_t fDestMask = uDest;
2660 uint32_t fBroadcastMask = UINT32_C(0xff);
2661
2662 Log2(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s uVector=%#x\n", fDestMask,
2663 apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode), apicGetDeliveryModeName(enmDeliveryMode),
2664 uVector));
2665
2666 bool fIntrAccepted;
2667 VMCPUSET DestCpuSet;
2668 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2669 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2670 &fIntrAccepted, uSrcTag, VINF_SUCCESS /* rcRZ */);
2671 if (fIntrAccepted)
2672 return VBOXSTRICTRC_VAL(rcStrict);
2673 return VERR_APIC_INTR_DISCARDED;
2674}
2675
2676
2677/**
2678 * Assert/de-assert the local APIC's LINT0/LINT1 interrupt pins.
2679 *
2680 * @returns Strict VBox status code.
2681 * @param pVCpu The cross context virtual CPU structure.
2682 * @param u8Pin The interrupt pin (0 for LINT0 or 1 for LINT1).
2683 * @param u8Level The level (0 for low or 1 for high).
2684 * @param rcRZ The return code if the operation cannot be performed in
2685 * the current context.
2686 *
2687 * @note All callers totally ignores the status code!
2688 */
2689VMM_INT_DECL(VBOXSTRICTRC) APICLocalInterrupt(PVMCPUCC pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2690{
2691 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2692 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2693
2694 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2695
2696 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2697 if (APICIsEnabled(pVCpu))
2698 {
2699 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2700
2701 /* Pick the LVT entry corresponding to the interrupt pin. */
2702 static const uint16_t s_au16LvtOffsets[] =
2703 {
2704 XAPIC_OFF_LVT_LINT0,
2705 XAPIC_OFF_LVT_LINT1
2706 };
2707 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2708 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2709 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
2710
2711 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2712 if (!XAPIC_LVT_IS_MASKED(uLvt))
2713 {
2714 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2715 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2716
2717 switch (enmDeliveryMode)
2718 {
2719 case XAPICDELIVERYMODE_INIT:
2720 {
2721 /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2722 AssertMsgFailed(("INIT through LINT0/LINT1 is not yet supported\n"));
2723 }
2724 RT_FALL_THRU();
2725 case XAPICDELIVERYMODE_FIXED:
2726 {
2727 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2728 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2729 bool fActive = RT_BOOL(u8Level & 1);
2730 bool volatile *pfActiveLine = u8Pin == 0 ? &pApicCpu->fActiveLint0 : &pApicCpu->fActiveLint1;
2731 /** @todo Polarity is busted elsewhere, we need to fix that
2732 * first. See @bugref{8386#c7}. */
2733#if 0
2734 uint8_t const u8Polarity = XAPIC_LVT_GET_POLARITY(uLvt);
2735 fActive ^= u8Polarity; */
2736#endif
2737 if (!fActive)
2738 {
2739 ASMAtomicCmpXchgBool(pfActiveLine, false, true);
2740 break;
2741 }
2742
2743 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2744 if (offLvt == XAPIC_OFF_LVT_LINT1)
2745 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2746 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2747 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2748 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2749 means. */
2750
2751 bool fSendIntr;
2752 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2753 {
2754 /* Recognize and send the interrupt only on an edge transition. */
2755 fSendIntr = ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2756 }
2757 else
2758 {
2759 /* For level-triggered interrupts, redundant interrupts are not a problem. */
2760 Assert(enmTriggerMode == XAPICTRIGGERMODE_LEVEL);
2761 ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2762
2763 /* Only when the remote IRR isn't set, set it and send the interrupt. */
2764 if (!(pXApicPage->lvt_lint0.all.u32LvtLint0 & XAPIC_LVT_REMOTE_IRR))
2765 {
2766 Assert(offLvt == XAPIC_OFF_LVT_LINT0);
2767 ASMAtomicOrU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, XAPIC_LVT_REMOTE_IRR);
2768 fSendIntr = true;
2769 }
2770 else
2771 fSendIntr = false;
2772 }
2773
2774 if (fSendIntr)
2775 {
2776 VMCPUSET DestCpuSet;
2777 VMCPUSET_EMPTY(&DestCpuSet);
2778 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2779 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode,
2780 &DestCpuSet, NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2781 }
2782 break;
2783 }
2784
2785 case XAPICDELIVERYMODE_SMI:
2786 case XAPICDELIVERYMODE_NMI:
2787 {
2788 VMCPUSET DestCpuSet;
2789 VMCPUSET_EMPTY(&DestCpuSet);
2790 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2791 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2792 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2793 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2794 break;
2795 }
2796
2797 case XAPICDELIVERYMODE_EXTINT:
2798 {
2799 Log2(("APIC%u: apicLocalInterrupt: %s ExtINT through LINT%u\n", pVCpu->idCpu,
2800 u8Level ? "Raising" : "Lowering", u8Pin));
2801 if (u8Level)
2802 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2803 else
2804 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2805 break;
2806 }
2807
2808 /* Reserved/unknown delivery modes: */
2809 case XAPICDELIVERYMODE_LOWEST_PRIO:
2810 case XAPICDELIVERYMODE_STARTUP:
2811 default:
2812 {
2813 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2814 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2815 rcStrict = VERR_INTERNAL_ERROR_3;
2816 break;
2817 }
2818 }
2819 }
2820 }
2821 else
2822 {
2823 /* The APIC is hardware disabled. The CPU behaves as though there is no on-chip APIC. */
2824 if (u8Pin == 0)
2825 {
2826 /* LINT0 behaves as an external interrupt pin. */
2827 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, %s INTR\n", pVCpu->idCpu,
2828 u8Level ? "raising" : "lowering"));
2829 if (u8Level)
2830 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2831 else
2832 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2833 }
2834 else
2835 {
2836 /* LINT1 behaves as NMI. */
2837 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, raising NMI\n", pVCpu->idCpu));
2838 apicSetInterruptFF(pVCpu, PDMAPICIRQ_NMI);
2839 }
2840 }
2841
2842 return rcStrict;
2843}
2844
2845
2846/**
2847 * Gets the next highest-priority interrupt from the APIC, marking it as an
2848 * "in-service" interrupt.
2849 *
2850 * @returns VBox status code.
2851 * @param pVCpu The cross context virtual CPU structure.
2852 * @param pu8Vector Where to store the vector.
2853 * @param puSrcTag Where to store the interrupt source tag (debugging).
2854 */
2855VMM_INT_DECL(int) APICGetInterrupt(PVMCPUCC pVCpu, uint8_t *pu8Vector, uint32_t *puSrcTag)
2856{
2857 VMCPU_ASSERT_EMT(pVCpu);
2858 Assert(pu8Vector);
2859
2860 LogFlow(("APIC%u: apicGetInterrupt:\n", pVCpu->idCpu));
2861
2862 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2863 bool const fApicHwEnabled = APICIsEnabled(pVCpu);
2864 if ( fApicHwEnabled
2865 && pXApicPage->svr.u.fApicSoftwareEnable)
2866 {
2867 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2868 if (RT_LIKELY(irrv >= 0))
2869 {
2870 Assert(irrv <= (int)UINT8_MAX);
2871 uint8_t const uVector = irrv;
2872
2873 /*
2874 * This can happen if the APIC receives an interrupt when the CPU has interrupts
2875 * disabled but the TPR is raised by the guest before re-enabling interrupts.
2876 */
2877 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2878 if ( uTpr > 0
2879 && XAPIC_TPR_GET_TP(uVector) <= XAPIC_TPR_GET_TP(uTpr))
2880 {
2881 Log2(("APIC%u: apicGetInterrupt: Interrupt masked. uVector=%#x uTpr=%#x SpuriousVector=%#x\n", pVCpu->idCpu,
2882 uVector, uTpr, pXApicPage->svr.u.u8SpuriousVector));
2883 *pu8Vector = uVector;
2884 *puSrcTag = 0;
2885 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByTpr);
2886 return VERR_APIC_INTR_MASKED_BY_TPR;
2887 }
2888
2889 /*
2890 * The PPR should be up-to-date at this point through apicSetEoi().
2891 * We're on EMT so no parallel updates possible.
2892 * Subject the pending vector to PPR prioritization.
2893 */
2894 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2895 if ( !uPpr
2896 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2897 {
2898 apicClearVectorInReg(&pXApicPage->irr, uVector);
2899 apicSetVectorInReg(&pXApicPage->isr, uVector);
2900 apicUpdatePpr(pVCpu);
2901 apicSignalNextPendingIntr(pVCpu);
2902
2903 /* Retrieve the interrupt source tag associated with this interrupt. */
2904 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2905 AssertCompile(RT_ELEMENTS(pApicCpu->auSrcTags) > UINT8_MAX);
2906 *puSrcTag = pApicCpu->auSrcTags[uVector];
2907 pApicCpu->auSrcTags[uVector] = 0;
2908
2909 Log2(("APIC%u: apicGetInterrupt: Valid Interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
2910 *pu8Vector = uVector;
2911 return VINF_SUCCESS;
2912 }
2913 else
2914 {
2915 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByPpr);
2916 Log2(("APIC%u: apicGetInterrupt: Interrupt's priority is not higher than the PPR. uVector=%#x PPR=%#x\n",
2917 pVCpu->idCpu, uVector, uPpr));
2918 }
2919 }
2920 else
2921 Log2(("APIC%u: apicGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2922 }
2923 else
2924 Log2(("APIC%u: apicGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2925
2926 *pu8Vector = 0;
2927 *puSrcTag = 0;
2928 return VERR_APIC_INTR_NOT_PENDING;
2929}
2930
2931
2932/**
2933 * @callback_method_impl{FNIOMMMIONEWREAD}
2934 */
2935DECLCALLBACK(VBOXSTRICTRC) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2936{
2937 NOREF(pvUser);
2938 Assert(!(off & 0xf));
2939 Assert(cb == 4); RT_NOREF_PV(cb);
2940
2941 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2942 uint16_t offReg = off & 0xff0;
2943 uint32_t uValue = 0;
2944
2945 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioRead));
2946
2947 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(apicReadRegister(pDevIns, pVCpu, offReg, &uValue));
2948 *(uint32_t *)pv = uValue;
2949
2950 Log2(("APIC%u: apicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2951 return rc;
2952}
2953
2954
2955/**
2956 * @callback_method_impl{FNIOMMMIONEWWRITE}
2957 */
2958DECLCALLBACK(VBOXSTRICTRC) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2959{
2960 NOREF(pvUser);
2961 Assert(!(off & 0xf));
2962 Assert(cb == 4); RT_NOREF_PV(cb);
2963
2964 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2965 uint16_t offReg = off & 0xff0;
2966 uint32_t uValue = *(uint32_t *)pv;
2967
2968 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioWrite));
2969
2970 Log2(("APIC%u: apicWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2971
2972 return apicWriteRegister(pDevIns, pVCpu, offReg, uValue);
2973}
2974
2975
2976/**
2977 * Sets the interrupt pending force-flag and pokes the EMT if required.
2978 *
2979 * @param pVCpu The cross context virtual CPU structure.
2980 * @param enmType The IRQ type.
2981 */
2982static void apicSetInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType)
2983{
2984#ifdef IN_RING3
2985 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
2986 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
2987#endif
2988
2989 switch (enmType)
2990 {
2991 case PDMAPICIRQ_HARDWARE:
2992 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2993 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
2994 break;
2995 case PDMAPICIRQ_UPDATE_PENDING: VMCPU_FF_SET(pVCpu, VMCPU_FF_UPDATE_APIC); break;
2996 case PDMAPICIRQ_NMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); break;
2997 case PDMAPICIRQ_SMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); break;
2998 case PDMAPICIRQ_EXTINT: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
2999 default:
3000 AssertMsgFailed(("enmType=%d\n", enmType));
3001 break;
3002 }
3003
3004 /*
3005 * We need to wake up the target CPU if we're not on EMT.
3006 */
3007#if defined(IN_RING0)
3008 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3009 VMCPUID idCpu = pVCpu->idCpu;
3010 if ( enmType != PDMAPICIRQ_HARDWARE
3011 && VMMGetCpuId(pVM) != idCpu)
3012 {
3013 switch (VMCPU_GET_STATE(pVCpu))
3014 {
3015 case VMCPUSTATE_STARTED_EXEC:
3016 GVMMR0SchedPokeNoGVMNoLock(pVM, idCpu);
3017 break;
3018
3019 case VMCPUSTATE_STARTED_HALTED:
3020 GVMMR0SchedWakeUpNoGVMNoLock(pVM, idCpu);
3021 break;
3022
3023 default:
3024 break; /* nothing to do in other states. */
3025 }
3026 }
3027#elif defined(IN_RING3)
3028 if (enmType != PDMAPICIRQ_HARDWARE)
3029 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE);
3030#endif
3031}
3032
3033
3034/**
3035 * Clears the interrupt pending force-flag.
3036 *
3037 * @param pVCpu The cross context virtual CPU structure.
3038 * @param enmType The IRQ type.
3039 */
3040void apicClearInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType)
3041{
3042#ifdef IN_RING3
3043 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
3044 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
3045#endif
3046
3047 /* NMI/SMI can't be cleared. */
3048 switch (enmType)
3049 {
3050 case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break;
3051 case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
3052 default:
3053 AssertMsgFailed(("enmType=%d\n", enmType));
3054 break;
3055 }
3056}
3057
3058
3059/**
3060 * Posts an interrupt to a target APIC.
3061 *
3062 * This function handles interrupts received from the system bus or
3063 * interrupts generated locally from the LVT or via a self IPI.
3064 *
3065 * Don't use this function to try and deliver ExtINT style interrupts.
3066 *
3067 * @returns true if the interrupt was accepted, false otherwise.
3068 * @param pVCpu The cross context virtual CPU structure.
3069 * @param uVector The vector of the interrupt to be posted.
3070 * @param enmTriggerMode The trigger mode of the interrupt.
3071 * @param uSrcTag The interrupt source tag (debugging).
3072 *
3073 * @thread Any.
3074 */
3075bool apicPostInterrupt(PVMCPUCC pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode, uint32_t uSrcTag)
3076{
3077 Assert(pVCpu);
3078 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
3079
3080 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3081 PCAPIC pApic = VM_TO_APIC(pVM);
3082 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3083 bool fAccepted = true;
3084
3085 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
3086
3087 /*
3088 * Only post valid interrupt vectors.
3089 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
3090 */
3091 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
3092 {
3093 /*
3094 * If the interrupt is already pending in the IRR we can skip the
3095 * potential expensive operation of poking the guest EMT out of execution.
3096 */
3097 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3098 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
3099 {
3100 /* Update the interrupt source tag (debugging). */
3101 if (!pApicCpu->auSrcTags[uVector])
3102 pApicCpu->auSrcTags[uVector] = uSrcTag;
3103 else
3104 pApicCpu->auSrcTags[uVector] |= RT_BIT_32(31);
3105
3106 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u uVector=%#x\n", VMMGetCpuId(pVM), pVCpu->idCpu, uVector));
3107 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
3108 {
3109 if (pApic->fPostedIntrsEnabled)
3110 { /** @todo posted-interrupt call to hardware */ }
3111 else
3112 {
3113 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
3114 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3115 if (!fAlreadySet)
3116 {
3117 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for edge-triggered intr. uVector=%#x\n", uVector));
3118 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3119 }
3120 }
3121 }
3122 else
3123 {
3124 /*
3125 * Level-triggered interrupts requires updating of the TMR and thus cannot be
3126 * delivered asynchronously.
3127 */
3128 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
3129 uint32_t const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel);
3130 if (!fAlreadySet)
3131 {
3132 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for level-triggered intr. uVector=%#x\n", uVector));
3133 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3134 }
3135 }
3136 }
3137 else
3138 {
3139 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u. Vector %#x Already in IRR, skipping\n", VMMGetCpuId(pVM),
3140 pVCpu->idCpu, uVector));
3141 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
3142 }
3143 }
3144 else
3145 {
3146 fAccepted = false;
3147 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
3148 }
3149
3150 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
3151 return fAccepted;
3152}
3153
3154
3155/**
3156 * Starts the APIC timer.
3157 *
3158 * @param pVCpu The cross context virtual CPU structure.
3159 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
3160 * 0.
3161 * @thread Any.
3162 */
3163void apicStartTimer(PVMCPUCC pVCpu, uint32_t uInitialCount)
3164{
3165 Assert(pVCpu);
3166 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3167 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
3168 Assert(PDMDevHlpTimerIsLockOwner(pDevIns, pApicCpu->hTimer));
3169 Assert(uInitialCount > 0);
3170
3171 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
3172 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
3173 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
3174
3175 Log2(("APIC%u: apicStartTimer: uInitialCount=%#RX32 uTimerShift=%u cTicksToNext=%RU64\n", pVCpu->idCpu, uInitialCount,
3176 uTimerShift, cTicksToNext));
3177
3178 /*
3179 * The assumption here is that the timer doesn't tick during this call
3180 * and thus setting a relative time to fire next is accurate. The advantage
3181 * however is updating u64TimerInitial 'atomically' while setting the next
3182 * tick.
3183 */
3184 PDMDevHlpTimerSetRelative(pDevIns, pApicCpu->hTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
3185 apicHintTimerFreq(pDevIns, pApicCpu, uInitialCount, uTimerShift);
3186}
3187
3188
3189/**
3190 * Stops the APIC timer.
3191 *
3192 * @param pVCpu The cross context virtual CPU structure.
3193 * @thread Any.
3194 */
3195static void apicStopTimer(PVMCPUCC pVCpu)
3196{
3197 Assert(pVCpu);
3198 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3199 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
3200 Assert(PDMDevHlpTimerIsLockOwner(pDevIns, pApicCpu->hTimer));
3201
3202 Log2(("APIC%u: apicStopTimer\n", pVCpu->idCpu));
3203
3204 PDMDevHlpTimerStop(pDevIns, pApicCpu->hTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
3205 pApicCpu->uHintedTimerInitialCount = 0;
3206 pApicCpu->uHintedTimerShift = 0;
3207}
3208
3209
3210/**
3211 * Queues a pending interrupt as in-service.
3212 *
3213 * This function should only be needed without virtualized APIC
3214 * registers. With virtualized APIC registers, it's sufficient to keep
3215 * the interrupts pending in the IRR as the hardware takes care of
3216 * virtual interrupt delivery.
3217 *
3218 * @returns true if the interrupt was queued to in-service interrupts,
3219 * false otherwise.
3220 * @param pVCpu The cross context virtual CPU structure.
3221 * @param u8PendingIntr The pending interrupt to queue as
3222 * in-service.
3223 *
3224 * @remarks This assumes the caller has done the necessary checks and
3225 * is ready to take actually service the interrupt (TPR,
3226 * interrupt shadow etc.)
3227 */
3228VMM_INT_DECL(bool) APICQueueInterruptToService(PVMCPUCC pVCpu, uint8_t u8PendingIntr)
3229{
3230 VMCPU_ASSERT_EMT(pVCpu);
3231
3232 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3233 PAPIC pApic = VM_TO_APIC(pVM);
3234 Assert(!pApic->fVirtApicRegsEnabled);
3235 NOREF(pApic);
3236
3237 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3238 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
3239 if (fIsPending)
3240 {
3241 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
3242 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
3243 apicUpdatePpr(pVCpu);
3244 return true;
3245 }
3246 return false;
3247}
3248
3249
3250/**
3251 * De-queues a pending interrupt from in-service.
3252 *
3253 * This undoes APICQueueInterruptToService() for premature VM-exits before event
3254 * injection.
3255 *
3256 * @param pVCpu The cross context virtual CPU structure.
3257 * @param u8PendingIntr The pending interrupt to de-queue from
3258 * in-service.
3259 */
3260VMM_INT_DECL(void) APICDequeueInterruptFromService(PVMCPUCC pVCpu, uint8_t u8PendingIntr)
3261{
3262 VMCPU_ASSERT_EMT(pVCpu);
3263
3264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3265 PAPIC pApic = VM_TO_APIC(pVM);
3266 Assert(!pApic->fVirtApicRegsEnabled);
3267 NOREF(pApic);
3268
3269 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3270 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
3271 if (fInService)
3272 {
3273 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
3274 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
3275 apicUpdatePpr(pVCpu);
3276 }
3277}
3278
3279
3280/**
3281 * Updates pending interrupts from the pending-interrupt bitmaps to the IRR.
3282 *
3283 * @param pVCpu The cross context virtual CPU structure.
3284 *
3285 * @note NEM/win is ASSUMING the an up to date TPR is not required here.
3286 */
3287VMMDECL(void) APICUpdatePendingInterrupts(PVMCPUCC pVCpu)
3288{
3289 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3290
3291 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3292 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3293 bool fHasPendingIntrs = false;
3294
3295 Log3(("APIC%u: APICUpdatePendingInterrupts:\n", pVCpu->idCpu));
3296 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
3297
3298 /* Update edge-triggered pending interrupts. */
3299 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
3300 for (;;)
3301 {
3302 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3303 if (!fAlreadySet)
3304 break;
3305
3306 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3307 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3308 {
3309 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3310 if (u64Fragment)
3311 {
3312 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3313 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3314
3315 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3316 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3317
3318 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
3319 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
3320 fHasPendingIntrs = true;
3321 }
3322 }
3323 }
3324
3325 /* Update level-triggered pending interrupts. */
3326 pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
3327 for (;;)
3328 {
3329 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
3330 if (!fAlreadySet)
3331 break;
3332
3333 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3334 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3335 {
3336 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3337 if (u64Fragment)
3338 {
3339 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3340 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3341
3342 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3343 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3344
3345 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
3346 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3347 fHasPendingIntrs = true;
3348 }
3349 }
3350 }
3351
3352 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
3353 Log3(("APIC%u: APICUpdatePendingInterrupts: fHasPendingIntrs=%RTbool\n", pVCpu->idCpu, fHasPendingIntrs));
3354
3355 if ( fHasPendingIntrs
3356 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))
3357 apicSignalNextPendingIntr(pVCpu);
3358}
3359
3360
3361/**
3362 * Gets the highest priority pending interrupt.
3363 *
3364 * @returns true if any interrupt is pending, false otherwise.
3365 * @param pVCpu The cross context virtual CPU structure.
3366 * @param pu8PendingIntr Where to store the interrupt vector if the
3367 * interrupt is pending.
3368 */
3369VMM_INT_DECL(bool) APICGetHighestPendingInterrupt(PVMCPUCC pVCpu, uint8_t *pu8PendingIntr)
3370{
3371 VMCPU_ASSERT_EMT(pVCpu);
3372 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
3373}
3374
3375
3376/**
3377 * Posts an interrupt to a target APIC, Hyper-V interface.
3378 *
3379 * @returns true if the interrupt was accepted, false otherwise.
3380 * @param pVCpu The cross context virtual CPU structure.
3381 * @param uVector The vector of the interrupt to be posted.
3382 * @param fAutoEoi Whether this interrupt has automatic EOI
3383 * treatment.
3384 * @param enmTriggerMode The trigger mode of the interrupt.
3385 *
3386 * @thread Any.
3387 */
3388VMM_INT_DECL(void) APICHvSendInterrupt(PVMCPUCC pVCpu, uint8_t uVector, bool fAutoEoi, XAPICTRIGGERMODE enmTriggerMode)
3389{
3390 Assert(pVCpu);
3391 Assert(!fAutoEoi); /** @todo AutoEOI. */
3392 RT_NOREF(fAutoEoi);
3393 apicPostInterrupt(pVCpu, uVector, enmTriggerMode, 0 /* uSrcTag */);
3394}
3395
3396
3397/**
3398 * Sets the Task Priority Register (TPR), Hyper-V interface.
3399 *
3400 * @returns Strict VBox status code.
3401 * @param pVCpu The cross context virtual CPU structure.
3402 * @param uTpr The TPR value to set.
3403 *
3404 * @remarks Validates like in x2APIC mode.
3405 */
3406VMM_INT_DECL(VBOXSTRICTRC) APICHvSetTpr(PVMCPUCC pVCpu, uint8_t uTpr)
3407{
3408 Assert(pVCpu);
3409 VMCPU_ASSERT_EMT(pVCpu);
3410 return apicSetTprEx(pVCpu, uTpr, true /* fForceX2ApicBehaviour */);
3411}
3412
3413
3414/**
3415 * Gets the Task Priority Register (TPR), Hyper-V interface.
3416 *
3417 * @returns The TPR value.
3418 * @param pVCpu The cross context virtual CPU structure.
3419 */
3420VMM_INT_DECL(uint8_t) APICHvGetTpr(PVMCPUCC pVCpu)
3421{
3422 Assert(pVCpu);
3423 VMCPU_ASSERT_EMT(pVCpu);
3424
3425 /*
3426 * The APIC could be operating in xAPIC mode and thus we should not use the apicReadMsr()
3427 * interface which validates the APIC mode and will throw a #GP(0) if not in x2APIC mode.
3428 * We could use the apicReadRegister() MMIO interface, but why bother getting the PDMDEVINS
3429 * pointer, so just directly read the APIC page.
3430 */
3431 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3432 return apicReadRaw32(pXApicPage, XAPIC_OFF_TPR);
3433}
3434
3435
3436/**
3437 * Sets the Interrupt Command Register (ICR), Hyper-V interface.
3438 *
3439 * @returns Strict VBox status code.
3440 * @param pVCpu The cross context virtual CPU structure.
3441 * @param uIcr The ICR value to set.
3442 */
3443VMM_INT_DECL(VBOXSTRICTRC) APICHvSetIcr(PVMCPUCC pVCpu, uint64_t uIcr)
3444{
3445 Assert(pVCpu);
3446 VMCPU_ASSERT_EMT(pVCpu);
3447 return apicSetIcr(pVCpu, uIcr, VINF_CPUM_R3_MSR_WRITE);
3448}
3449
3450
3451/**
3452 * Gets the Interrupt Command Register (ICR), Hyper-V interface.
3453 *
3454 * @returns The ICR value.
3455 * @param pVCpu The cross context virtual CPU structure.
3456 */
3457VMM_INT_DECL(uint64_t) APICHvGetIcr(PVMCPUCC pVCpu)
3458{
3459 Assert(pVCpu);
3460 VMCPU_ASSERT_EMT(pVCpu);
3461 return apicGetIcrNoCheck(pVCpu);
3462}
3463
3464
3465/**
3466 * Sets the End-Of-Interrupt (EOI) register, Hyper-V interface.
3467 *
3468 * @returns Strict VBox status code.
3469 * @param pVCpu The cross context virtual CPU structure.
3470 * @param uEoi The EOI value.
3471 */
3472VMM_INT_DECL(VBOXSTRICTRC) APICHvSetEoi(PVMCPUCC pVCpu, uint32_t uEoi)
3473{
3474 Assert(pVCpu);
3475 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3476 return apicSetEoi(pVCpu, uEoi, VINF_CPUM_R3_MSR_WRITE, true /* fForceX2ApicBehaviour */);
3477}
3478
3479
3480/**
3481 * Gets the APIC page pointers for the specified VCPU.
3482 *
3483 * @returns VBox status code.
3484 * @param pVCpu The cross context virtual CPU structure.
3485 * @param pHCPhys Where to store the host-context physical address.
3486 * @param pR0Ptr Where to store the ring-0 address.
3487 * @param pR3Ptr Where to store the ring-3 address (optional).
3488 */
3489VMM_INT_DECL(int) APICGetApicPageForCpu(PCVMCPUCC pVCpu, PRTHCPHYS pHCPhys, PRTR0PTR pR0Ptr, PRTR3PTR pR3Ptr)
3490{
3491 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
3492 AssertReturn(pHCPhys, VERR_INVALID_PARAMETER);
3493 AssertReturn(pR0Ptr, VERR_INVALID_PARAMETER);
3494
3495 Assert(PDMHasApic(pVCpu->CTX_SUFF(pVM)));
3496
3497 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3498 *pHCPhys = pApicCpu->HCPhysApicPage;
3499 *pR0Ptr = pApicCpu->pvApicPageR0;
3500 if (pR3Ptr)
3501 *pR3Ptr = pApicCpu->pvApicPageR3;
3502 return VINF_SUCCESS;
3503}
3504
3505#ifndef IN_RING3
3506
3507/**
3508 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
3509 */
3510static DECLCALLBACK(int) apicRZConstruct(PPDMDEVINS pDevIns)
3511{
3512 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
3513 PAPICDEV pThis = PDMDEVINS_2_DATA(pDevIns, PAPICDEV);
3514 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
3515
3516 pVM->apicr0.s.pDevInsR0 = pDevIns;
3517
3518 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
3519 AssertRCReturn(rc, rc);
3520
3521 rc = PDMDevHlpApicSetUpContext(pDevIns);
3522 AssertRCReturn(rc, rc);
3523
3524 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, apicWriteMmio, apicReadMmio, NULL /*pvUser*/);
3525 AssertRCReturn(rc, rc);
3526
3527 return VINF_SUCCESS;
3528}
3529#endif /* !IN_RING3 */
3530
3531/**
3532 * APIC device registration structure.
3533 */
3534const PDMDEVREG g_DeviceAPIC =
3535{
3536 /* .u32Version = */ PDM_DEVREG_VERSION,
3537 /* .uReserved0 = */ 0,
3538 /* .szName = */ "apic",
3539 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE
3540 | PDM_DEVREG_FLAGS_REQUIRE_R0 | PDM_DEVREG_FLAGS_REQUIRE_RC,
3541 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
3542 /* .cMaxInstances = */ 1,
3543 /* .uSharedVersion = */ 42,
3544 /* .cbInstanceShared = */ sizeof(APICDEV),
3545 /* .cbInstanceCC = */ 0,
3546 /* .cbInstanceRC = */ 0,
3547 /* .cMaxPciDevices = */ 0,
3548 /* .cMaxMsixVectors = */ 0,
3549 /* .pszDescription = */ "Advanced Programmable Interrupt Controller",
3550#if defined(IN_RING3)
3551 /* .szRCMod = */ "VMMRC.rc",
3552 /* .szR0Mod = */ "VMMR0.r0",
3553 /* .pfnConstruct = */ apicR3Construct,
3554 /* .pfnDestruct = */ apicR3Destruct,
3555 /* .pfnRelocate = */ apicR3Relocate,
3556 /* .pfnMemSetup = */ NULL,
3557 /* .pfnPowerOn = */ NULL,
3558 /* .pfnReset = */ apicR3Reset,
3559 /* .pfnSuspend = */ NULL,
3560 /* .pfnResume = */ NULL,
3561 /* .pfnAttach = */ NULL,
3562 /* .pfnDetach = */ NULL,
3563 /* .pfnQueryInterface = */ NULL,
3564 /* .pfnInitComplete = */ apicR3InitComplete,
3565 /* .pfnPowerOff = */ NULL,
3566 /* .pfnSoftReset = */ NULL,
3567 /* .pfnReserved0 = */ NULL,
3568 /* .pfnReserved1 = */ NULL,
3569 /* .pfnReserved2 = */ NULL,
3570 /* .pfnReserved3 = */ NULL,
3571 /* .pfnReserved4 = */ NULL,
3572 /* .pfnReserved5 = */ NULL,
3573 /* .pfnReserved6 = */ NULL,
3574 /* .pfnReserved7 = */ NULL,
3575#elif defined(IN_RING0)
3576 /* .pfnEarlyConstruct = */ NULL,
3577 /* .pfnConstruct = */ apicRZConstruct,
3578 /* .pfnDestruct = */ NULL,
3579 /* .pfnFinalDestruct = */ NULL,
3580 /* .pfnRequest = */ NULL,
3581 /* .pfnReserved0 = */ NULL,
3582 /* .pfnReserved1 = */ NULL,
3583 /* .pfnReserved2 = */ NULL,
3584 /* .pfnReserved3 = */ NULL,
3585 /* .pfnReserved4 = */ NULL,
3586 /* .pfnReserved5 = */ NULL,
3587 /* .pfnReserved6 = */ NULL,
3588 /* .pfnReserved7 = */ NULL,
3589#elif defined(IN_RC)
3590 /* .pfnConstruct = */ apicRZConstruct,
3591 /* .pfnReserved0 = */ NULL,
3592 /* .pfnReserved1 = */ NULL,
3593 /* .pfnReserved2 = */ NULL,
3594 /* .pfnReserved3 = */ NULL,
3595 /* .pfnReserved4 = */ NULL,
3596 /* .pfnReserved5 = */ NULL,
3597 /* .pfnReserved6 = */ NULL,
3598 /* .pfnReserved7 = */ NULL,
3599#else
3600# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3601#endif
3602 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3603};
3604
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette