VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 85416

Last change on this file since 85416 was 84652, checked in by vboxsync, 5 years ago

APIC: Move generic defs from VBox/vmm/apic.h and APICInternal.h to VBox/apic.h.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 127.6 KB
Line 
1/* $Id: APICAll.cpp 84652 2020-06-03 09:08:30Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/apic.h>
25#include <VBox/vmm/pdmdev.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/vmm/vmm.h>
29#include <VBox/vmm/vmcpuset.h>
30#ifdef IN_RING0
31# include <VBox/vmm/gvmm.h>
32#endif
33
34
35/*********************************************************************************************************************************
36* Internal Functions *
37*********************************************************************************************************************************/
38static void apicSetInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType);
39static void apicStopTimer(PVMCPUCC pVCpu);
40
41
42/*********************************************************************************************************************************
43* Global Variables *
44*********************************************************************************************************************************/
45#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
46/** An ordered array of valid LVT masks. */
47static const uint32_t g_au32LvtValidMasks[] =
48{
49 XAPIC_LVT_TIMER_VALID,
50 XAPIC_LVT_THERMAL_VALID,
51 XAPIC_LVT_PERF_VALID,
52 XAPIC_LVT_LINT_VALID, /* LINT0 */
53 XAPIC_LVT_LINT_VALID, /* LINT1 */
54 XAPIC_LVT_ERROR_VALID
55};
56#endif
57
58#if 0
59/** @todo CMCI */
60static const uint32_t g_au32LvtExtValidMask[] =
61{
62 XAPIC_LVT_CMCI_VALID
63};
64#endif
65
66
67/**
68 * Checks if a vector is set in an APIC 256-bit sparse register.
69 *
70 * @returns true if the specified vector is set, false otherwise.
71 * @param pApicReg The APIC 256-bit spare register.
72 * @param uVector The vector to check if set.
73 */
74DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
75{
76 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
77 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
78}
79
80
81/**
82 * Sets the vector in an APIC 256-bit sparse register.
83 *
84 * @param pApicReg The APIC 256-bit spare register.
85 * @param uVector The vector to set.
86 */
87DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
88{
89 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
90 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
91}
92
93
94/**
95 * Clears the vector in an APIC 256-bit sparse register.
96 *
97 * @param pApicReg The APIC 256-bit spare register.
98 * @param uVector The vector to clear.
99 */
100DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
101{
102 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
103 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
104}
105
106
107#if 0 /* unused */
108/**
109 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
110 *
111 * @returns true if the specified vector is set, false otherwise.
112 * @param pvPib Opaque pointer to the PIB.
113 * @param uVector The vector to check if set.
114 */
115DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
116{
117 return ASMBitTest(pvPib, uVector);
118}
119#endif /* unused */
120
121
122/**
123 * Atomically sets the PIB notification bit.
124 *
125 * @returns non-zero if the bit was already set, 0 otherwise.
126 * @param pApicPib Pointer to the PIB.
127 */
128DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
129{
130 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
131}
132
133
134/**
135 * Atomically tests and clears the PIB notification bit.
136 *
137 * @returns non-zero if the bit was already set, 0 otherwise.
138 * @param pApicPib Pointer to the PIB.
139 */
140DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
141{
142 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
143}
144
145
146/**
147 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
148 *
149 * @param pvPib Opaque pointer to the PIB.
150 * @param uVector The vector to set.
151 */
152DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
153{
154 ASMAtomicBitSet(pvPib, uVector);
155}
156
157#if 0 /* unused */
158/**
159 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
160 *
161 * @param pvPib Opaque pointer to the PIB.
162 * @param uVector The vector to clear.
163 */
164DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
165{
166 ASMAtomicBitClear(pvPib, uVector);
167}
168#endif /* unused */
169
170#if 0 /* unused */
171/**
172 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
173 * register.
174 *
175 * @param pApicReg The APIC 256-bit spare register.
176 * @param idxFragment The index of the 32-bit fragment in @a
177 * pApicReg.
178 * @param u32Fragment The 32-bit vector fragment to OR.
179 */
180DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
181{
182 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
183 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
184}
185#endif /* unused */
186
187
188#if 0 /* unused */
189/**
190 * Atomically AND's a fragment (32 vectors) into an APIC
191 * 256-bit sparse register.
192 *
193 * @param pApicReg The APIC 256-bit spare register.
194 * @param idxFragment The index of the 32-bit fragment in @a
195 * pApicReg.
196 * @param u32Fragment The 32-bit vector fragment to AND.
197 */
198DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
199{
200 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
201 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
202}
203#endif /* unused */
204
205
206/**
207 * Reports and returns appropriate error code for invalid MSR accesses.
208 *
209 * @returns VERR_CPUM_RAISE_GP_0
210 *
211 * @param pVCpu The cross context virtual CPU structure.
212 * @param u32Reg The MSR being accessed.
213 * @param enmAccess The invalid-access type.
214 */
215static int apicMsrAccessError(PVMCPUCC pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
216{
217 static struct
218 {
219 const char *pszBefore; /* The error message before printing the MSR index */
220 const char *pszAfter; /* The error message after printing the MSR index */
221 } const s_aAccess[] =
222 {
223 /* enmAccess pszBefore pszAfter */
224 /* 0 */ { "read MSR", " while not in x2APIC mode" },
225 /* 1 */ { "write MSR", " while not in x2APIC mode" },
226 /* 2 */ { "read reserved/unknown MSR", "" },
227 /* 3 */ { "write reserved/unknown MSR", "" },
228 /* 4 */ { "read write-only MSR", "" },
229 /* 5 */ { "write read-only MSR", "" },
230 /* 6 */ { "read reserved bits of MSR", "" },
231 /* 7 */ { "write reserved bits of MSR", "" },
232 /* 8 */ { "write an invalid value to MSR", "" },
233 /* 9 */ { "write MSR", " disallowed by configuration" },
234 /* 10 */ { "read MSR", " disallowed by configuration" },
235 };
236 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
237
238 size_t const i = enmAccess;
239 Assert(i < RT_ELEMENTS(s_aAccess));
240 if (pVCpu->apic.s.cLogMaxAccessError++ < 5)
241 LogRel(("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg, s_aAccess[i].pszAfter));
242 return VERR_CPUM_RAISE_GP_0;
243}
244
245
246/**
247 * Gets the descriptive APIC mode.
248 *
249 * @returns The name.
250 * @param enmMode The xAPIC mode.
251 */
252const char *apicGetModeName(APICMODE enmMode)
253{
254 switch (enmMode)
255 {
256 case APICMODE_DISABLED: return "Disabled";
257 case APICMODE_XAPIC: return "xAPIC";
258 case APICMODE_X2APIC: return "x2APIC";
259 default: break;
260 }
261 return "Invalid";
262}
263
264
265/**
266 * Gets the descriptive destination format name.
267 *
268 * @returns The destination format name.
269 * @param enmDestFormat The destination format.
270 */
271const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
272{
273 switch (enmDestFormat)
274 {
275 case XAPICDESTFORMAT_FLAT: return "Flat";
276 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
277 default: break;
278 }
279 return "Invalid";
280}
281
282
283/**
284 * Gets the descriptive delivery mode name.
285 *
286 * @returns The delivery mode name.
287 * @param enmDeliveryMode The delivery mode.
288 */
289const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
290{
291 switch (enmDeliveryMode)
292 {
293 case XAPICDELIVERYMODE_FIXED: return "Fixed";
294 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
295 case XAPICDELIVERYMODE_SMI: return "SMI";
296 case XAPICDELIVERYMODE_NMI: return "NMI";
297 case XAPICDELIVERYMODE_INIT: return "INIT";
298 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
299 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
300 default: break;
301 }
302 return "Invalid";
303}
304
305
306/**
307 * Gets the descriptive destination mode name.
308 *
309 * @returns The destination mode name.
310 * @param enmDestMode The destination mode.
311 */
312const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
313{
314 switch (enmDestMode)
315 {
316 case XAPICDESTMODE_PHYSICAL: return "Physical";
317 case XAPICDESTMODE_LOGICAL: return "Logical";
318 default: break;
319 }
320 return "Invalid";
321}
322
323
324/**
325 * Gets the descriptive trigger mode name.
326 *
327 * @returns The trigger mode name.
328 * @param enmTriggerMode The trigger mode.
329 */
330const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
331{
332 switch (enmTriggerMode)
333 {
334 case XAPICTRIGGERMODE_EDGE: return "Edge";
335 case XAPICTRIGGERMODE_LEVEL: return "Level";
336 default: break;
337 }
338 return "Invalid";
339}
340
341
342/**
343 * Gets the destination shorthand name.
344 *
345 * @returns The destination shorthand name.
346 * @param enmDestShorthand The destination shorthand.
347 */
348const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
349{
350 switch (enmDestShorthand)
351 {
352 case XAPICDESTSHORTHAND_NONE: return "None";
353 case XAPICDESTSHORTHAND_SELF: return "Self";
354 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
355 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
356 default: break;
357 }
358 return "Invalid";
359}
360
361
362/**
363 * Gets the timer mode name.
364 *
365 * @returns The timer mode name.
366 * @param enmTimerMode The timer mode.
367 */
368const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
369{
370 switch (enmTimerMode)
371 {
372 case XAPICTIMERMODE_ONESHOT: return "One-shot";
373 case XAPICTIMERMODE_PERIODIC: return "Periodic";
374 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
375 default: break;
376 }
377 return "Invalid";
378}
379
380
381/**
382 * Gets the APIC mode given the base MSR value.
383 *
384 * @returns The APIC mode.
385 * @param uApicBaseMsr The APIC Base MSR value.
386 */
387APICMODE apicGetMode(uint64_t uApicBaseMsr)
388{
389 uint32_t const uMode = (uApicBaseMsr >> 10) & UINT64_C(3);
390 APICMODE const enmMode = (APICMODE)uMode;
391#ifdef VBOX_STRICT
392 /* Paranoia. */
393 switch (uMode)
394 {
395 case APICMODE_DISABLED:
396 case APICMODE_INVALID:
397 case APICMODE_XAPIC:
398 case APICMODE_X2APIC:
399 break;
400 default:
401 AssertMsgFailed(("Invalid mode"));
402 }
403#endif
404 return enmMode;
405}
406
407
408/**
409 * Returns whether the APIC is hardware enabled or not.
410 *
411 * @returns true if enabled, false otherwise.
412 * @param pVCpu The cross context virtual CPU structure.
413 */
414VMM_INT_DECL(bool) APICIsEnabled(PCVMCPUCC pVCpu)
415{
416 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
417 return RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN);
418}
419
420
421/**
422 * Finds the most significant set bit in an APIC 256-bit sparse register.
423 *
424 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
425 * @param pReg The APIC 256-bit sparse register.
426 * @param rcNotFound What to return when no bit is set.
427 */
428static int apicGetHighestSetBitInReg(volatile const XAPIC256BITREG *pReg, int rcNotFound)
429{
430 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
431 unsigned const uFragmentShift = 5;
432 AssertCompile(1 << uFragmentShift == sizeof(pReg->u[0].u32Reg) * 8);
433 for (ssize_t i = cFragments - 1; i >= 0; i--)
434 {
435 uint32_t const uFragment = pReg->u[i].u32Reg;
436 if (uFragment)
437 {
438 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
439 --idxSetBit;
440 idxSetBit |= i << uFragmentShift;
441 return idxSetBit;
442 }
443 }
444 return rcNotFound;
445}
446
447
448/**
449 * Reads a 32-bit register at a specified offset.
450 *
451 * @returns The value at the specified offset.
452 * @param pXApicPage The xAPIC page.
453 * @param offReg The offset of the register being read.
454 */
455DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
456{
457 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
458 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
459 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
460 return uValue;
461}
462
463
464/**
465 * Writes a 32-bit register at a specified offset.
466 *
467 * @param pXApicPage The xAPIC page.
468 * @param offReg The offset of the register being written.
469 * @param uReg The value of the register.
470 */
471DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
472{
473 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
474 uint8_t *pbXApic = (uint8_t *)pXApicPage;
475 *(uint32_t *)(pbXApic + offReg) = uReg;
476}
477
478
479/**
480 * Sets an error in the internal ESR of the specified APIC.
481 *
482 * @param pVCpu The cross context virtual CPU structure.
483 * @param uError The error.
484 * @thread Any.
485 */
486DECLINLINE(void) apicSetError(PVMCPUCC pVCpu, uint32_t uError)
487{
488 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
489 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
490}
491
492
493/**
494 * Clears all errors in the internal ESR.
495 *
496 * @returns The value of the internal ESR before clearing.
497 * @param pVCpu The cross context virtual CPU structure.
498 */
499DECLINLINE(uint32_t) apicClearAllErrors(PVMCPUCC pVCpu)
500{
501 VMCPU_ASSERT_EMT(pVCpu);
502 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
503 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
504}
505
506
507/**
508 * Signals the guest if a pending interrupt is ready to be serviced.
509 *
510 * @param pVCpu The cross context virtual CPU structure.
511 */
512static void apicSignalNextPendingIntr(PVMCPUCC pVCpu)
513{
514 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
515
516 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
517 if (pXApicPage->svr.u.fApicSoftwareEnable)
518 {
519 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1 /* rcNotFound */);
520 if (irrv >= 0)
521 {
522 Assert(irrv <= (int)UINT8_MAX);
523 uint8_t const uVector = irrv;
524 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
525 if ( !uPpr
526 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
527 {
528 Log2(("APIC%u: apicSignalNextPendingIntr: Signalling pending interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
529 apicSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
530 }
531 else
532 {
533 Log2(("APIC%u: apicSignalNextPendingIntr: Nothing to signal. uVector=%#x uPpr=%#x uTpr=%#x\n", pVCpu->idCpu,
534 uVector, uPpr, pXApicPage->tpr.u8Tpr));
535 }
536 }
537 }
538 else
539 {
540 Log2(("APIC%u: apicSignalNextPendingIntr: APIC software-disabled, clearing pending interrupt\n", pVCpu->idCpu));
541 apicClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
542 }
543}
544
545
546/**
547 * Sets the Spurious-Interrupt Vector Register (SVR).
548 *
549 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
550 * @param pVCpu The cross context virtual CPU structure.
551 * @param uSvr The SVR value.
552 */
553static int apicSetSvr(PVMCPUCC pVCpu, uint32_t uSvr)
554{
555 VMCPU_ASSERT_EMT(pVCpu);
556
557 uint32_t uValidMask = XAPIC_SVR_VALID;
558 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
559 if (pXApicPage->version.u.fEoiBroadcastSupression)
560 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
561
562 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
563 && (uSvr & ~uValidMask))
564 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
565
566 Log2(("APIC%u: apicSetSvr: uSvr=%#RX32\n", pVCpu->idCpu, uSvr));
567 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
568 if (!pXApicPage->svr.u.fApicSoftwareEnable)
569 {
570 /** @todo CMCI. */
571 pXApicPage->lvt_timer.u.u1Mask = 1;
572#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
573 pXApicPage->lvt_thermal.u.u1Mask = 1;
574#endif
575 pXApicPage->lvt_perf.u.u1Mask = 1;
576 pXApicPage->lvt_lint0.u.u1Mask = 1;
577 pXApicPage->lvt_lint1.u.u1Mask = 1;
578 pXApicPage->lvt_error.u.u1Mask = 1;
579 }
580
581 apicSignalNextPendingIntr(pVCpu);
582 return VINF_SUCCESS;
583}
584
585
586/**
587 * Sends an interrupt to one or more APICs.
588 *
589 * @returns Strict VBox status code.
590 * @param pVM The cross context VM structure.
591 * @param pVCpu The cross context virtual CPU structure, can be
592 * NULL if the source of the interrupt is not an
593 * APIC (for e.g. a bus).
594 * @param uVector The interrupt vector.
595 * @param enmTriggerMode The trigger mode.
596 * @param enmDeliveryMode The delivery mode.
597 * @param pDestCpuSet The destination CPU set.
598 * @param pfIntrAccepted Where to store whether this interrupt was
599 * accepted by the target APIC(s) or not.
600 * Optional, can be NULL.
601 * @param uSrcTag The interrupt source tag (debugging).
602 * @param rcRZ The return code if the operation cannot be
603 * performed in the current context.
604 */
605static VBOXSTRICTRC apicSendIntr(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
606 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, bool *pfIntrAccepted,
607 uint32_t uSrcTag, int rcRZ)
608{
609 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
610 VMCPUID const cCpus = pVM->cCpus;
611 bool fAccepted = false;
612 switch (enmDeliveryMode)
613 {
614 case XAPICDELIVERYMODE_FIXED:
615 {
616 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
617 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
618 {
619 PVMCPUCC pItVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
620 if (APICIsEnabled(pItVCpu))
621 fAccepted = apicPostInterrupt(pItVCpu, uVector, enmTriggerMode, uSrcTag);
622 }
623 break;
624 }
625
626 case XAPICDELIVERYMODE_LOWEST_PRIO:
627 {
628 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
629 AssertMsgBreak(idCpu < pVM->cCpus, ("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode! idCpu=%u\n", idCpu));
630 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
631 if (APICIsEnabled(pVCpuDst))
632 fAccepted = apicPostInterrupt(pVCpuDst, uVector, enmTriggerMode, uSrcTag);
633 else
634 AssertMsgFailed(("APIC: apicSendIntr: Target APIC not enabled in lowest-priority delivery mode! idCpu=%u\n", idCpu));
635 break;
636 }
637
638 case XAPICDELIVERYMODE_SMI:
639 {
640 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
641 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
642 {
643 Log2(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
644 apicSetInterruptFF(pVM->CTX_SUFF(apCpus)[idCpu], PDMAPICIRQ_SMI);
645 fAccepted = true;
646 }
647 break;
648 }
649
650 case XAPICDELIVERYMODE_NMI:
651 {
652 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
653 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
654 {
655 PVMCPUCC pItVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
656 if (APICIsEnabled(pItVCpu))
657 {
658 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
659 apicSetInterruptFF(pItVCpu, PDMAPICIRQ_NMI);
660 fAccepted = true;
661 }
662 }
663 break;
664 }
665
666 case XAPICDELIVERYMODE_INIT:
667 {
668#ifdef IN_RING3
669 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
670 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
671 {
672 Log2(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
673 VMMR3SendInitIpi(pVM, idCpu);
674 fAccepted = true;
675 }
676#else
677 /* We need to return to ring-3 to deliver the INIT. */
678 rcStrict = rcRZ;
679 fAccepted = true;
680#endif
681 break;
682 }
683
684 case XAPICDELIVERYMODE_STARTUP:
685 {
686#ifdef IN_RING3
687 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
688 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
689 {
690 Log2(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
691 VMMR3SendStartupIpi(pVM, idCpu, uVector);
692 fAccepted = true;
693 }
694#else
695 /* We need to return to ring-3 to deliver the SIPI. */
696 rcStrict = rcRZ;
697 fAccepted = true;
698 Log2(("APIC: apicSendIntr: SIPI issued, returning to RZ. rc=%Rrc\n", rcRZ));
699#endif
700 break;
701 }
702
703 case XAPICDELIVERYMODE_EXTINT:
704 {
705 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
706 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
707 {
708 Log2(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
709 apicSetInterruptFF(pVM->CTX_SUFF(apCpus)[idCpu], PDMAPICIRQ_EXTINT);
710 fAccepted = true;
711 }
712 break;
713 }
714
715 default:
716 {
717 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
718 apicGetDeliveryModeName(enmDeliveryMode)));
719 break;
720 }
721 }
722
723 /*
724 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
725 * interrupt is being sent by an APIC.
726 *
727 * The 'receive illegal vector' will be set on the target APIC when the interrupt
728 * gets generated, see apicPostInterrupt().
729 *
730 * See Intel spec. 10.5.3 "Error Handling".
731 */
732 if ( rcStrict != rcRZ
733 && pVCpu)
734 {
735 /*
736 * Flag only errors when the delivery mode is fixed and not others.
737 *
738 * Ubuntu 10.04-3 amd64 live CD with 2 VCPUs gets upset as it sends an SIPI to the
739 * 2nd VCPU with vector 6 and checks the ESR for no errors, see @bugref{8245#c86}.
740 */
741 /** @todo The spec says this for LVT, but not explcitly for ICR-lo
742 * but it probably is true. */
743 if (enmDeliveryMode == XAPICDELIVERYMODE_FIXED)
744 {
745 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
746 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
747 }
748 }
749
750 if (pfIntrAccepted)
751 *pfIntrAccepted = fAccepted;
752
753 return rcStrict;
754}
755
756
757/**
758 * Checks if this APIC belongs to a logical destination.
759 *
760 * @returns true if the APIC belongs to the logical
761 * destination, false otherwise.
762 * @param pVCpu The cross context virtual CPU structure.
763 * @param fDest The destination mask.
764 *
765 * @thread Any.
766 */
767static bool apicIsLogicalDest(PVMCPUCC pVCpu, uint32_t fDest)
768{
769 if (XAPIC_IN_X2APIC_MODE(pVCpu))
770 {
771 /*
772 * Flat logical mode is not supported in x2APIC mode.
773 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
774 * - High 16 bits is the cluster ID.
775 * - Low 16 bits: each bit represents a unique APIC within the cluster.
776 */
777 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
778 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
779 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
780 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
781 return false;
782 }
783
784#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
785 /*
786 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
787 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
788 */
789 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
790 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
791 return true;
792
793 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
794 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
795 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
796 {
797 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
798 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
799 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
800 }
801
802 /*
803 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
804 * - High 4 bits is the cluster ID.
805 * - Low 4 bits: each bit represents a unique APIC within the cluster.
806 */
807 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
808 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
809 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
810 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
811 return false;
812#else
813# error "Implement Pentium and P6 family APIC architectures"
814#endif
815}
816
817
818/**
819 * Figures out the set of destination CPUs for a given destination mode, format
820 * and delivery mode setting.
821 *
822 * @param pVM The cross context VM structure.
823 * @param fDestMask The destination mask.
824 * @param fBroadcastMask The broadcast mask.
825 * @param enmDestMode The destination mode.
826 * @param enmDeliveryMode The delivery mode.
827 * @param pDestCpuSet The destination CPU set to update.
828 */
829static void apicGetDestCpuSet(PVMCC pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
830 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
831{
832 VMCPUSET_EMPTY(pDestCpuSet);
833
834 /*
835 * Physical destination mode only supports either a broadcast or a single target.
836 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
837 * as a regular broadcast like in fixed delivery mode.
838 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
839 * to the target like in fixed delivery mode.
840 *
841 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
842 */
843 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
844 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
845 {
846 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
847 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
848 }
849
850 uint32_t const cCpus = pVM->cCpus;
851 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
852 {
853 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
854#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
855 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
856 uint8_t u8LowestTpr = UINT8_C(0xff);
857 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
858 {
859 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
860 if (apicIsLogicalDest(pVCpuDst, fDestMask))
861 {
862 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDst);
863 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
864
865 /*
866 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
867 * Hence the use of "<=" in the check below.
868 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
869 */
870 if (u8Tpr <= u8LowestTpr)
871 {
872 u8LowestTpr = u8Tpr;
873 idCpuLowestTpr = idCpu;
874 }
875 }
876 }
877 if (idCpuLowestTpr != NIL_VMCPUID)
878 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
879#else
880# error "Implement Pentium and P6 family APIC architectures"
881#endif
882 return;
883 }
884
885 /*
886 * x2APIC:
887 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
888 * xAPIC:
889 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
890 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
891 *
892 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
893 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
894 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
895 */
896 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
897 {
898 VMCPUSET_FILL(pDestCpuSet);
899 return;
900 }
901
902 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
903 {
904 /* The destination mask is interpreted as the physical APIC ID of a single target. */
905#if 1
906 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
907 if (RT_LIKELY(fDestMask < cCpus))
908 VMCPUSET_ADD(pDestCpuSet, fDestMask);
909#else
910 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
911 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
912 {
913 PVMCPUCC pVCpuDst = &pVM->aCpus[idCpu];
914 if (XAPIC_IN_X2APIC_MODE(pVCpuDst))
915 {
916 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDst);
917 if (pX2ApicPage->id.u32ApicId == fDestMask)
918 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
919 }
920 else
921 {
922 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDst);
923 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
924 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
925 }
926 }
927#endif
928 }
929 else
930 {
931 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
932
933 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
934 if (RT_UNLIKELY(!fDestMask))
935 return;
936
937 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
938 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
939 {
940 PVMCPUCC pVCpuDst = pVM->CTX_SUFF(apCpus)[idCpu];
941 if (apicIsLogicalDest(pVCpuDst, fDestMask))
942 VMCPUSET_ADD(pDestCpuSet, pVCpuDst->idCpu);
943 }
944 }
945}
946
947
948/**
949 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
950 * Command Register (ICR).
951 *
952 * @returns VBox status code.
953 * @param pVCpu The cross context virtual CPU structure.
954 * @param rcRZ The return code if the operation cannot be
955 * performed in the current context.
956 */
957DECLINLINE(VBOXSTRICTRC) apicSendIpi(PVMCPUCC pVCpu, int rcRZ)
958{
959 VMCPU_ASSERT_EMT(pVCpu);
960
961 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
962 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
963 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
964 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
965 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
966 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
967 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
968
969 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
970 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
971
972#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
973 /*
974 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
975 * Apparently, this also applies to NMI, SMI, lowest-priority and fixed delivery modes,
976 * see @bugref{8245#c116}.
977 *
978 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
979 */
980 if ( enmTriggerMode == XAPICTRIGGERMODE_LEVEL
981 && enmInitLevel == XAPICINITLEVEL_DEASSERT
982 && ( enmDeliveryMode == XAPICDELIVERYMODE_FIXED
983 || enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO
984 || enmDeliveryMode == XAPICDELIVERYMODE_SMI
985 || enmDeliveryMode == XAPICDELIVERYMODE_NMI
986 || enmDeliveryMode == XAPICDELIVERYMODE_INIT))
987 {
988 Log2(("APIC%u: %s level de-assert unsupported, ignoring!\n", pVCpu->idCpu, apicGetDeliveryModeName(enmDeliveryMode)));
989 return VINF_SUCCESS;
990 }
991#else
992# error "Implement Pentium and P6 family APIC architectures"
993#endif
994
995 /*
996 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
997 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
998 */
999 VMCPUSET DestCpuSet;
1000 switch (enmDestShorthand)
1001 {
1002 case XAPICDESTSHORTHAND_NONE:
1003 {
1004 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1005 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
1006 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1007 break;
1008 }
1009
1010 case XAPICDESTSHORTHAND_SELF:
1011 {
1012 VMCPUSET_EMPTY(&DestCpuSet);
1013 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1014 break;
1015 }
1016
1017 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
1018 {
1019 VMCPUSET_FILL(&DestCpuSet);
1020 break;
1021 }
1022
1023 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
1024 {
1025 VMCPUSET_FILL(&DestCpuSet);
1026 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
1027 break;
1028 }
1029 }
1030
1031 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
1032 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
1033}
1034
1035
1036/**
1037 * Sets the Interrupt Command Register (ICR) high dword.
1038 *
1039 * @returns Strict VBox status code.
1040 * @param pVCpu The cross context virtual CPU structure.
1041 * @param uIcrHi The ICR high dword.
1042 */
1043static VBOXSTRICTRC apicSetIcrHi(PVMCPUCC pVCpu, uint32_t uIcrHi)
1044{
1045 VMCPU_ASSERT_EMT(pVCpu);
1046 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1047
1048 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1049 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1050 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrHiWrite);
1051 Log2(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1052
1053 return VINF_SUCCESS;
1054}
1055
1056
1057/**
1058 * Sets the Interrupt Command Register (ICR) low dword.
1059 *
1060 * @returns Strict VBox status code.
1061 * @param pVCpu The cross context virtual CPU structure.
1062 * @param uIcrLo The ICR low dword.
1063 * @param rcRZ The return code if the operation cannot be performed
1064 * in the current context.
1065 * @param fUpdateStat Whether to update the ICR low write statistics
1066 * counter.
1067 */
1068static VBOXSTRICTRC apicSetIcrLo(PVMCPUCC pVCpu, uint32_t uIcrLo, int rcRZ, bool fUpdateStat)
1069{
1070 VMCPU_ASSERT_EMT(pVCpu);
1071
1072 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1073 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR_VALID;
1074 Log2(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1075
1076 if (fUpdateStat)
1077 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrLoWrite);
1078 RT_NOREF(fUpdateStat);
1079
1080 return apicSendIpi(pVCpu, rcRZ);
1081}
1082
1083
1084/**
1085 * Sets the Interrupt Command Register (ICR).
1086 *
1087 * @returns Strict VBox status code.
1088 * @param pVCpu The cross context virtual CPU structure.
1089 * @param u64Icr The ICR (High and Low combined).
1090 * @param rcRZ The return code if the operation cannot be performed
1091 * in the current context.
1092 *
1093 * @remarks This function is used by both x2APIC interface and the Hyper-V
1094 * interface, see APICHvSetIcr. The Hyper-V spec isn't clear what
1095 * happens when invalid bits are set. For the time being, it will
1096 * \#GP like a regular x2APIC access.
1097 */
1098static VBOXSTRICTRC apicSetIcr(PVMCPUCC pVCpu, uint64_t u64Icr, int rcRZ)
1099{
1100 VMCPU_ASSERT_EMT(pVCpu);
1101
1102 /* Validate. */
1103 uint32_t const uLo = RT_LO_U32(u64Icr);
1104 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR_VALID)))
1105 {
1106 /* Update high dword first, then update the low dword which sends the IPI. */
1107 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1108 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1109 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrFullWrite);
1110 return apicSetIcrLo(pVCpu, uLo, rcRZ, false /* fUpdateStat */);
1111 }
1112 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1113}
1114
1115
1116/**
1117 * Sets the Error Status Register (ESR).
1118 *
1119 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1120 * @param pVCpu The cross context virtual CPU structure.
1121 * @param uEsr The ESR value.
1122 */
1123static int apicSetEsr(PVMCPUCC pVCpu, uint32_t uEsr)
1124{
1125 VMCPU_ASSERT_EMT(pVCpu);
1126
1127 Log2(("APIC%u: apicSetEsr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1128
1129 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1130 && (uEsr & ~XAPIC_ESR_WO_VALID))
1131 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1132
1133 /*
1134 * Writes to the ESR causes the internal state to be updated in the register,
1135 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1136 */
1137 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1138 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1139 return VINF_SUCCESS;
1140}
1141
1142
1143/**
1144 * Updates the Processor Priority Register (PPR).
1145 *
1146 * @param pVCpu The cross context virtual CPU structure.
1147 */
1148static void apicUpdatePpr(PVMCPUCC pVCpu)
1149{
1150 VMCPU_ASSERT_EMT(pVCpu);
1151
1152 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1153 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1154 uint8_t const uIsrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
1155 uint8_t uPpr;
1156 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1157 uPpr = pXApicPage->tpr.u8Tpr;
1158 else
1159 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1160 pXApicPage->ppr.u8Ppr = uPpr;
1161}
1162
1163
1164/**
1165 * Gets the Processor Priority Register (PPR).
1166 *
1167 * @returns The PPR value.
1168 * @param pVCpu The cross context virtual CPU structure.
1169 */
1170static uint8_t apicGetPpr(PVMCPUCC pVCpu)
1171{
1172 VMCPU_ASSERT_EMT(pVCpu);
1173 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprRead);
1174
1175 /*
1176 * With virtualized APIC registers or with TPR virtualization, the hardware may
1177 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1178 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1179 *
1180 * In all other instances, whenever the TPR or ISR changes, we need to update the PPR
1181 * as well (e.g. like we do manually in apicR3InitIpi and by calling apicUpdatePpr).
1182 */
1183 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1184 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1185 apicUpdatePpr(pVCpu);
1186 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1187 return pXApicPage->ppr.u8Ppr;
1188}
1189
1190
1191/**
1192 * Sets the Task Priority Register (TPR).
1193 *
1194 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1195 * @param pVCpu The cross context virtual CPU structure.
1196 * @param uTpr The TPR value.
1197 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1198 * this write.
1199 */
1200static int apicSetTprEx(PVMCPUCC pVCpu, uint32_t uTpr, bool fForceX2ApicBehaviour)
1201{
1202 VMCPU_ASSERT_EMT(pVCpu);
1203
1204 Log2(("APIC%u: apicSetTprEx: uTpr=%#RX32\n", pVCpu->idCpu, uTpr));
1205 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprWrite);
1206
1207 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1208 if ( fX2ApicMode
1209 && (uTpr & ~XAPIC_TPR_VALID))
1210 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1211
1212 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1213 pXApicPage->tpr.u8Tpr = uTpr;
1214 apicUpdatePpr(pVCpu);
1215 apicSignalNextPendingIntr(pVCpu);
1216 return VINF_SUCCESS;
1217}
1218
1219
1220/**
1221 * Sets the End-Of-Interrupt (EOI) register.
1222 *
1223 * @returns Strict VBox status code.
1224 * @param pVCpu The cross context virtual CPU structure.
1225 * @param uEoi The EOI value.
1226 * @param rcBusy The busy return code when the write cannot
1227 * be completed successfully in this context.
1228 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1229 * this write.
1230 */
1231static VBOXSTRICTRC apicSetEoi(PVMCPUCC pVCpu, uint32_t uEoi, int rcBusy, bool fForceX2ApicBehaviour)
1232{
1233 VMCPU_ASSERT_EMT(pVCpu);
1234
1235 Log2(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1236 STAM_COUNTER_INC(&pVCpu->apic.s.StatEoiWrite);
1237
1238 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1239 if ( fX2ApicMode
1240 && (uEoi & ~XAPIC_EOI_WO_VALID))
1241 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1242
1243 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1244 int isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, -1 /* rcNotFound */);
1245 if (isrv >= 0)
1246 {
1247 /*
1248 * Broadcast the EOI to the I/O APIC(s).
1249 *
1250 * We'll handle the EOI broadcast first as there is tiny chance we get rescheduled to
1251 * ring-3 due to contention on the I/O APIC lock. This way we don't mess with the rest
1252 * of the APIC state and simply restart the EOI write operation from ring-3.
1253 */
1254 Assert(isrv <= (int)UINT8_MAX);
1255 uint8_t const uVector = isrv;
1256 bool const fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1257 if (fLevelTriggered)
1258 {
1259 VBOXSTRICTRC rc = PDMIoApicBroadcastEoi(pVCpu->CTX_SUFF(pVM), uVector);
1260 if (rc == VINF_SUCCESS)
1261 { /* likely */ }
1262 else
1263 return rcBusy;
1264
1265 /*
1266 * Clear the vector from the TMR.
1267 *
1268 * The broadcast to I/O APIC can re-trigger new interrupts to arrive via the bus. However,
1269 * APICUpdatePendingInterrupts() which updates TMR can only be done from EMT which we
1270 * currently are on, so no possibility of concurrent updates.
1271 */
1272 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1273
1274 /*
1275 * Clear the remote IRR bit for level-triggered, fixed mode LINT0 interrupt.
1276 * The LINT1 pin does not support level-triggered interrupts.
1277 * See Intel spec. 10.5.1 "Local Vector Table".
1278 */
1279 uint32_t const uLvtLint0 = pXApicPage->lvt_lint0.all.u32LvtLint0;
1280 if ( XAPIC_LVT_GET_REMOTE_IRR(uLvtLint0)
1281 && XAPIC_LVT_GET_VECTOR(uLvtLint0) == uVector
1282 && XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint0) == XAPICDELIVERYMODE_FIXED)
1283 {
1284 ASMAtomicAndU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, ~XAPIC_LVT_REMOTE_IRR);
1285 Log2(("APIC%u: apicSetEoi: Cleared remote-IRR for LINT0. uVector=%#x\n", pVCpu->idCpu, uVector));
1286 }
1287
1288 Log2(("APIC%u: apicSetEoi: Cleared level triggered interrupt from TMR. uVector=%#x\n", pVCpu->idCpu, uVector));
1289 }
1290
1291 /*
1292 * Mark interrupt as serviced, update the PPR and signal pending interrupts.
1293 */
1294 Log2(("APIC%u: apicSetEoi: Clearing interrupt from ISR. uVector=%#x\n", pVCpu->idCpu, uVector));
1295 apicClearVectorInReg(&pXApicPage->isr, uVector);
1296 apicUpdatePpr(pVCpu);
1297 apicSignalNextPendingIntr(pVCpu);
1298 }
1299 else
1300 {
1301#ifdef DEBUG_ramshankar
1302 /** @todo Figure out if this is done intentionally by guests or is a bug
1303 * in our emulation. Happened with Win10 SMP VM during reboot after
1304 * installation of guest additions with 3D support. */
1305 AssertMsgFailed(("APIC%u: apicSetEoi: Failed to find any ISR bit\n", pVCpu->idCpu));
1306#endif
1307 }
1308
1309 return VINF_SUCCESS;
1310}
1311
1312
1313/**
1314 * Sets the Logical Destination Register (LDR).
1315 *
1316 * @returns Strict VBox status code.
1317 * @param pVCpu The cross context virtual CPU structure.
1318 * @param uLdr The LDR value.
1319 *
1320 * @remarks LDR is read-only in x2APIC mode.
1321 */
1322static VBOXSTRICTRC apicSetLdr(PVMCPUCC pVCpu, uint32_t uLdr)
1323{
1324 VMCPU_ASSERT_EMT(pVCpu);
1325 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1326 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu) || pApic->fHyperVCompatMode); RT_NOREF_PV(pApic);
1327
1328 Log2(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1329
1330 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1331 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR_VALID);
1332 return VINF_SUCCESS;
1333}
1334
1335
1336/**
1337 * Sets the Destination Format Register (DFR).
1338 *
1339 * @returns Strict VBox status code.
1340 * @param pVCpu The cross context virtual CPU structure.
1341 * @param uDfr The DFR value.
1342 *
1343 * @remarks DFR is not available in x2APIC mode.
1344 */
1345static VBOXSTRICTRC apicSetDfr(PVMCPUCC pVCpu, uint32_t uDfr)
1346{
1347 VMCPU_ASSERT_EMT(pVCpu);
1348 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1349
1350 uDfr &= XAPIC_DFR_VALID;
1351 uDfr |= XAPIC_DFR_RSVD_MB1;
1352
1353 Log2(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1354
1355 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1356 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1357 return VINF_SUCCESS;
1358}
1359
1360
1361/**
1362 * Sets the Timer Divide Configuration Register (DCR).
1363 *
1364 * @returns Strict VBox status code.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param uTimerDcr The timer DCR value.
1367 */
1368static VBOXSTRICTRC apicSetTimerDcr(PVMCPUCC pVCpu, uint32_t uTimerDcr)
1369{
1370 VMCPU_ASSERT_EMT(pVCpu);
1371 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1372 && (uTimerDcr & ~XAPIC_TIMER_DCR_VALID))
1373 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1374
1375 Log2(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1376
1377 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1378 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1379 return VINF_SUCCESS;
1380}
1381
1382
1383/**
1384 * Gets the timer's Current Count Register (CCR).
1385 *
1386 * @returns VBox status code.
1387 * @param pDevIns The device instance.
1388 * @param pVCpu The cross context virtual CPU structure.
1389 * @param rcBusy The busy return code for the timer critical section.
1390 * @param puValue Where to store the LVT timer CCR.
1391 */
1392static VBOXSTRICTRC apicGetTimerCcr(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, int rcBusy, uint32_t *puValue)
1393{
1394 VMCPU_ASSERT_EMT(pVCpu);
1395 Assert(puValue);
1396
1397 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1398 *puValue = 0;
1399
1400 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1401 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1402 return VINF_SUCCESS;
1403
1404 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1405 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1406 if (!uInitialCount)
1407 return VINF_SUCCESS;
1408
1409 /*
1410 * Reading the virtual-sync clock requires locking its timer because it's not
1411 * a simple atomic operation, see tmVirtualSyncGetEx().
1412 *
1413 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1414 */
1415 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1416 TMTIMERHANDLE hTimer = pApicCpu->hTimer;
1417
1418 VBOXSTRICTRC rc = PDMDevHlpTimerLockClock(pDevIns, hTimer, rcBusy);
1419 if (rc == VINF_SUCCESS)
1420 {
1421 /* If the current-count register is 0, it implies the timer expired. */
1422 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1423 if (uCurrentCount)
1424 {
1425 uint64_t const cTicksElapsed = PDMDevHlpTimerGet(pDevIns, hTimer) - pApicCpu->u64TimerInitial;
1426 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1427 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1428 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1429 if (uInitialCount > uDelta)
1430 *puValue = uInitialCount - uDelta;
1431 }
1432 else
1433 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1434 }
1435 return rc;
1436}
1437
1438
1439/**
1440 * Sets the timer's Initial-Count Register (ICR).
1441 *
1442 * @returns Strict VBox status code.
1443 * @param pDevIns The device instance.
1444 * @param pVCpu The cross context virtual CPU structure.
1445 * @param rcBusy The busy return code for the timer critical section.
1446 * @param uInitialCount The timer ICR.
1447 */
1448static VBOXSTRICTRC apicSetTimerIcr(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, int rcBusy, uint32_t uInitialCount)
1449{
1450 VMCPU_ASSERT_EMT(pVCpu);
1451
1452 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1453 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1454 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1455
1456 Log2(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1457 STAM_COUNTER_INC(&pApicCpu->StatTimerIcrWrite);
1458
1459 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1460 if ( pApic->fSupportsTscDeadline
1461 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1462 return VINF_SUCCESS;
1463
1464 /*
1465 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1466 * so obtain the lock -before- updating it here to be consistent with the
1467 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1468 */
1469 TMTIMERHANDLE hTimer = pApicCpu->hTimer;
1470 VBOXSTRICTRC rc = PDMDevHlpTimerLockClock(pDevIns, hTimer, rcBusy);
1471 if (rc == VINF_SUCCESS)
1472 {
1473 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1474 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1475 if (uInitialCount)
1476 apicStartTimer(pVCpu, uInitialCount);
1477 else
1478 apicStopTimer(pVCpu);
1479 PDMDevHlpTimerUnlockClock(pDevIns, hTimer);
1480 }
1481 return rc;
1482}
1483
1484
1485/**
1486 * Sets an LVT entry.
1487 *
1488 * @returns Strict VBox status code.
1489 * @param pVCpu The cross context virtual CPU structure.
1490 * @param offLvt The LVT entry offset in the xAPIC page.
1491 * @param uLvt The LVT value to set.
1492 */
1493static VBOXSTRICTRC apicSetLvtEntry(PVMCPUCC pVCpu, uint16_t offLvt, uint32_t uLvt)
1494{
1495 VMCPU_ASSERT_EMT(pVCpu);
1496
1497#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1498 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1499 || offLvt == XAPIC_OFF_LVT_THERMAL
1500 || offLvt == XAPIC_OFF_LVT_PERF
1501 || offLvt == XAPIC_OFF_LVT_LINT0
1502 || offLvt == XAPIC_OFF_LVT_LINT1
1503 || offLvt == XAPIC_OFF_LVT_ERROR,
1504 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1505
1506 /*
1507 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1508 * and raise #GP(0) in x2APIC mode.
1509 */
1510 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1511 if (offLvt == XAPIC_OFF_LVT_TIMER)
1512 {
1513 if ( !pApic->fSupportsTscDeadline
1514 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1515 {
1516 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1517 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1518 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1519 /** @todo TSC-deadline timer mode transition */
1520 }
1521 }
1522
1523 /*
1524 * Validate rest of the LVT bits.
1525 */
1526 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1527 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1528
1529 /*
1530 * For x2APIC, disallow setting of invalid/reserved bits.
1531 * For xAPIC, mask out invalid/reserved bits (i.e. ignore them).
1532 */
1533 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1534 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1535 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1536
1537 uLvt &= g_au32LvtValidMasks[idxLvt];
1538
1539 /*
1540 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1541 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1542 */
1543 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1544 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1545 uLvt |= XAPIC_LVT_MASK;
1546
1547 /*
1548 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1549 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signalling the
1550 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1551 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1552 * the interrupt for the vector happens to be generated, see apicPostInterrupt().
1553 *
1554 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1555 */
1556 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1557 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1558 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1559
1560 Log2(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1561
1562 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1563 return VINF_SUCCESS;
1564#else
1565# error "Implement Pentium and P6 family APIC architectures"
1566#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1567}
1568
1569
1570#if 0
1571/**
1572 * Sets an LVT entry in the extended LVT range.
1573 *
1574 * @returns VBox status code.
1575 * @param pVCpu The cross context virtual CPU structure.
1576 * @param offLvt The LVT entry offset in the xAPIC page.
1577 * @param uValue The LVT value to set.
1578 */
1579static int apicSetLvtExtEntry(PVMCPUCC pVCpu, uint16_t offLvt, uint32_t uLvt)
1580{
1581 VMCPU_ASSERT_EMT(pVCpu);
1582 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1583
1584 /** @todo support CMCI. */
1585 return VERR_NOT_IMPLEMENTED;
1586}
1587#endif
1588
1589
1590/**
1591 * Hints TM about the APIC timer frequency.
1592 *
1593 * @param pDevIns The device instance.
1594 * @param pApicCpu The APIC CPU state.
1595 * @param uInitialCount The new initial count.
1596 * @param uTimerShift The new timer shift.
1597 * @thread Any.
1598 */
1599void apicHintTimerFreq(PPDMDEVINS pDevIns, PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1600{
1601 Assert(pApicCpu);
1602
1603 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1604 || pApicCpu->uHintedTimerShift != uTimerShift)
1605 {
1606 uint32_t uHz;
1607 if (uInitialCount)
1608 {
1609 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1610 uHz = PDMDevHlpTimerGetFreq(pDevIns, pApicCpu->hTimer) / cTicksPerPeriod;
1611 }
1612 else
1613 uHz = 0;
1614
1615 PDMDevHlpTimerSetFrequencyHint(pDevIns, pApicCpu->hTimer, uHz);
1616 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1617 pApicCpu->uHintedTimerShift = uTimerShift;
1618 }
1619}
1620
1621
1622/**
1623 * Gets the Interrupt Command Register (ICR), without performing any interface
1624 * checks.
1625 *
1626 * @returns The ICR value.
1627 * @param pVCpu The cross context virtual CPU structure.
1628 */
1629DECLINLINE(uint64_t) apicGetIcrNoCheck(PVMCPUCC pVCpu)
1630{
1631 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1632 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1633 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1634 uint64_t const uIcr = RT_MAKE_U64(uLo, uHi);
1635 return uIcr;
1636}
1637
1638
1639/**
1640 * Reads an APIC register.
1641 *
1642 * @returns VBox status code.
1643 * @param pDevIns The device instance.
1644 * @param pVCpu The cross context virtual CPU structure.
1645 * @param offReg The offset of the register being read.
1646 * @param puValue Where to store the register value.
1647 */
1648DECLINLINE(VBOXSTRICTRC) apicReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
1649{
1650 VMCPU_ASSERT_EMT(pVCpu);
1651 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1652
1653 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1654 uint32_t uValue = 0;
1655 VBOXSTRICTRC rc = VINF_SUCCESS;
1656 switch (offReg)
1657 {
1658 case XAPIC_OFF_ID:
1659 case XAPIC_OFF_VERSION:
1660 case XAPIC_OFF_TPR:
1661 case XAPIC_OFF_EOI:
1662 case XAPIC_OFF_RRD:
1663 case XAPIC_OFF_LDR:
1664 case XAPIC_OFF_DFR:
1665 case XAPIC_OFF_SVR:
1666 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1667 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1668 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1669 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1670 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1671 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1672 case XAPIC_OFF_ESR:
1673 case XAPIC_OFF_ICR_LO:
1674 case XAPIC_OFF_ICR_HI:
1675 case XAPIC_OFF_LVT_TIMER:
1676#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1677 case XAPIC_OFF_LVT_THERMAL:
1678#endif
1679 case XAPIC_OFF_LVT_PERF:
1680 case XAPIC_OFF_LVT_LINT0:
1681 case XAPIC_OFF_LVT_LINT1:
1682 case XAPIC_OFF_LVT_ERROR:
1683 case XAPIC_OFF_TIMER_ICR:
1684 case XAPIC_OFF_TIMER_DCR:
1685 {
1686 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1687 || ( offReg != XAPIC_OFF_DFR
1688 && offReg != XAPIC_OFF_ICR_HI
1689 && offReg != XAPIC_OFF_EOI));
1690 uValue = apicReadRaw32(pXApicPage, offReg);
1691 Log2(("APIC%u: apicReadRegister: offReg=%#x uValue=%#x\n", pVCpu->idCpu, offReg, uValue));
1692 break;
1693 }
1694
1695 case XAPIC_OFF_PPR:
1696 {
1697 uValue = apicGetPpr(pVCpu);
1698 break;
1699 }
1700
1701 case XAPIC_OFF_TIMER_CCR:
1702 {
1703 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1704 rc = apicGetTimerCcr(pDevIns, pVCpu, VINF_IOM_R3_MMIO_READ, &uValue);
1705 break;
1706 }
1707
1708 case XAPIC_OFF_APR:
1709 {
1710#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1711 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1712 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1713#else
1714# error "Implement Pentium and P6 family APIC architectures"
1715#endif
1716 break;
1717 }
1718
1719 default:
1720 {
1721 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1722 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu, offReg);
1723 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1724 break;
1725 }
1726 }
1727
1728 *puValue = uValue;
1729 return rc;
1730}
1731
1732
1733/**
1734 * Writes an APIC register.
1735 *
1736 * @returns Strict VBox status code.
1737 * @param pDevIns The device instance.
1738 * @param pVCpu The cross context virtual CPU structure.
1739 * @param offReg The offset of the register being written.
1740 * @param uValue The register value.
1741 */
1742DECLINLINE(VBOXSTRICTRC) apicWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
1743{
1744 VMCPU_ASSERT_EMT(pVCpu);
1745 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1746 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1747
1748 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1749 switch (offReg)
1750 {
1751 case XAPIC_OFF_TPR:
1752 {
1753 rcStrict = apicSetTprEx(pVCpu, uValue, false /* fForceX2ApicBehaviour */);
1754 break;
1755 }
1756
1757 case XAPIC_OFF_LVT_TIMER:
1758#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1759 case XAPIC_OFF_LVT_THERMAL:
1760#endif
1761 case XAPIC_OFF_LVT_PERF:
1762 case XAPIC_OFF_LVT_LINT0:
1763 case XAPIC_OFF_LVT_LINT1:
1764 case XAPIC_OFF_LVT_ERROR:
1765 {
1766 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1767 break;
1768 }
1769
1770 case XAPIC_OFF_TIMER_ICR:
1771 {
1772 rcStrict = apicSetTimerIcr(pDevIns, pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1773 break;
1774 }
1775
1776 case XAPIC_OFF_EOI:
1777 {
1778 rcStrict = apicSetEoi(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, false /* fForceX2ApicBehaviour */);
1779 break;
1780 }
1781
1782 case XAPIC_OFF_LDR:
1783 {
1784 rcStrict = apicSetLdr(pVCpu, uValue);
1785 break;
1786 }
1787
1788 case XAPIC_OFF_DFR:
1789 {
1790 rcStrict = apicSetDfr(pVCpu, uValue);
1791 break;
1792 }
1793
1794 case XAPIC_OFF_SVR:
1795 {
1796 rcStrict = apicSetSvr(pVCpu, uValue);
1797 break;
1798 }
1799
1800 case XAPIC_OFF_ICR_LO:
1801 {
1802 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, true /* fUpdateStat */);
1803 break;
1804 }
1805
1806 case XAPIC_OFF_ICR_HI:
1807 {
1808 rcStrict = apicSetIcrHi(pVCpu, uValue);
1809 break;
1810 }
1811
1812 case XAPIC_OFF_TIMER_DCR:
1813 {
1814 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1815 break;
1816 }
1817
1818 case XAPIC_OFF_ESR:
1819 {
1820 rcStrict = apicSetEsr(pVCpu, uValue);
1821 break;
1822 }
1823
1824 case XAPIC_OFF_APR:
1825 case XAPIC_OFF_RRD:
1826 {
1827#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1828 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1829#else
1830# error "Implement Pentium and P6 family APIC architectures"
1831#endif
1832 break;
1833 }
1834
1835 /* Read-only, write ignored: */
1836 case XAPIC_OFF_VERSION:
1837 case XAPIC_OFF_ID:
1838 break;
1839
1840 /* Unavailable/reserved in xAPIC mode: */
1841 case X2APIC_OFF_SELF_IPI:
1842 /* Read-only registers: */
1843 case XAPIC_OFF_PPR:
1844 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1845 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1846 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1847 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1848 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1849 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1850 case XAPIC_OFF_TIMER_CCR:
1851 default:
1852 {
1853 rcStrict = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu, offReg);
1854 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1855 break;
1856 }
1857 }
1858
1859 return rcStrict;
1860}
1861
1862
1863/**
1864 * Reads an APIC MSR.
1865 *
1866 * @returns Strict VBox status code.
1867 * @param pVCpu The cross context virtual CPU structure.
1868 * @param u32Reg The MSR being read.
1869 * @param pu64Value Where to store the read value.
1870 */
1871VMM_INT_DECL(VBOXSTRICTRC) APICReadMsr(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1872{
1873 /*
1874 * Validate.
1875 */
1876 VMCPU_ASSERT_EMT(pVCpu);
1877 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1878 Assert(pu64Value);
1879
1880 /*
1881 * Is the APIC enabled?
1882 */
1883 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1884 if (APICIsEnabled(pVCpu))
1885 { /* likely */ }
1886 else
1887 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
1888 APICMSRACCESS_READ_DISALLOWED_CONFIG : APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1889
1890#ifndef IN_RING3
1891 if (pApic->CTXALLMID(f,Enabled))
1892 { /* likely */}
1893 else
1894 return VINF_CPUM_R3_MSR_READ;
1895#endif
1896
1897 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrRead));
1898
1899 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1900 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
1901 || pApic->fHyperVCompatMode))
1902 {
1903 switch (u32Reg)
1904 {
1905 /* Special handling for x2APIC: */
1906 case MSR_IA32_X2APIC_ICR:
1907 {
1908 *pu64Value = apicGetIcrNoCheck(pVCpu);
1909 break;
1910 }
1911
1912 /* Special handling, compatible with xAPIC: */
1913 case MSR_IA32_X2APIC_TIMER_CCR:
1914 {
1915 uint32_t uValue;
1916 rcStrict = apicGetTimerCcr(VMCPU_TO_DEVINS(pVCpu), pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1917 *pu64Value = uValue;
1918 break;
1919 }
1920
1921 /* Special handling, compatible with xAPIC: */
1922 case MSR_IA32_X2APIC_PPR:
1923 {
1924 *pu64Value = apicGetPpr(pVCpu);
1925 break;
1926 }
1927
1928 /* Raw read, compatible with xAPIC: */
1929 case MSR_IA32_X2APIC_ID:
1930 {
1931 STAM_COUNTER_INC(&pVCpu->apic.s.StatIdMsrRead);
1932 RT_FALL_THRU();
1933 }
1934 case MSR_IA32_X2APIC_VERSION:
1935 case MSR_IA32_X2APIC_TPR:
1936 case MSR_IA32_X2APIC_LDR:
1937 case MSR_IA32_X2APIC_SVR:
1938 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1939 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1940 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1941 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1942 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1943 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1944 case MSR_IA32_X2APIC_ESR:
1945 case MSR_IA32_X2APIC_LVT_TIMER:
1946 case MSR_IA32_X2APIC_LVT_THERMAL:
1947 case MSR_IA32_X2APIC_LVT_PERF:
1948 case MSR_IA32_X2APIC_LVT_LINT0:
1949 case MSR_IA32_X2APIC_LVT_LINT1:
1950 case MSR_IA32_X2APIC_LVT_ERROR:
1951 case MSR_IA32_X2APIC_TIMER_ICR:
1952 case MSR_IA32_X2APIC_TIMER_DCR:
1953 {
1954 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1955 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1956 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1957 break;
1958 }
1959
1960 /* Write-only MSRs: */
1961 case MSR_IA32_X2APIC_SELF_IPI:
1962 case MSR_IA32_X2APIC_EOI:
1963 {
1964 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1965 break;
1966 }
1967
1968 /*
1969 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to read the "high"
1970 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
1971 * index (0x80E), see @bugref{8382#c175}.
1972 */
1973 case MSR_IA32_X2APIC_LDR + 1:
1974 {
1975 if (pApic->fHyperVCompatMode)
1976 *pu64Value = 0;
1977 else
1978 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1979 break;
1980 }
1981
1982 /* Reserved MSRs: */
1983 case MSR_IA32_X2APIC_LVT_CMCI:
1984 default:
1985 {
1986 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1987 break;
1988 }
1989 }
1990 }
1991 else
1992 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1993
1994 return rcStrict;
1995}
1996
1997
1998/**
1999 * Writes an APIC MSR.
2000 *
2001 * @returns Strict VBox status code.
2002 * @param pVCpu The cross context virtual CPU structure.
2003 * @param u32Reg The MSR being written.
2004 * @param u64Value The value to write.
2005 */
2006VMM_INT_DECL(VBOXSTRICTRC) APICWriteMsr(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
2007{
2008 /*
2009 * Validate.
2010 */
2011 VMCPU_ASSERT_EMT(pVCpu);
2012 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
2013
2014 /*
2015 * Is the APIC enabled?
2016 */
2017 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2018 if (APICIsEnabled(pVCpu))
2019 { /* likely */ }
2020 else
2021 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
2022 APICMSRACCESS_WRITE_DISALLOWED_CONFIG : APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2023
2024#ifndef IN_RING3
2025 if (pApic->CTXALLMID(f,Enabled))
2026 { /* likely */ }
2027 else
2028 return VINF_CPUM_R3_MSR_WRITE;
2029#endif
2030
2031 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrWrite));
2032
2033 /*
2034 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
2035 * accesses where they are ignored. Hence, we need to validate each register before
2036 * invoking the generic/xAPIC write functions.
2037 *
2038 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
2039 * case first and handle validating the remaining bits on a per-register basis.
2040 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
2041 */
2042 if ( u32Reg != MSR_IA32_X2APIC_ICR
2043 && RT_HI_U32(u64Value))
2044 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
2045
2046 uint32_t u32Value = RT_LO_U32(u64Value);
2047 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2048 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
2049 || pApic->fHyperVCompatMode))
2050 {
2051 switch (u32Reg)
2052 {
2053 case MSR_IA32_X2APIC_TPR:
2054 {
2055 rcStrict = apicSetTprEx(pVCpu, u32Value, false /* fForceX2ApicBehaviour */);
2056 break;
2057 }
2058
2059 case MSR_IA32_X2APIC_ICR:
2060 {
2061 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
2062 break;
2063 }
2064
2065 case MSR_IA32_X2APIC_SVR:
2066 {
2067 rcStrict = apicSetSvr(pVCpu, u32Value);
2068 break;
2069 }
2070
2071 case MSR_IA32_X2APIC_ESR:
2072 {
2073 rcStrict = apicSetEsr(pVCpu, u32Value);
2074 break;
2075 }
2076
2077 case MSR_IA32_X2APIC_TIMER_DCR:
2078 {
2079 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
2080 break;
2081 }
2082
2083 case MSR_IA32_X2APIC_LVT_TIMER:
2084 case MSR_IA32_X2APIC_LVT_THERMAL:
2085 case MSR_IA32_X2APIC_LVT_PERF:
2086 case MSR_IA32_X2APIC_LVT_LINT0:
2087 case MSR_IA32_X2APIC_LVT_LINT1:
2088 case MSR_IA32_X2APIC_LVT_ERROR:
2089 {
2090 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
2091 break;
2092 }
2093
2094 case MSR_IA32_X2APIC_TIMER_ICR:
2095 {
2096 rcStrict = apicSetTimerIcr(VMCPU_TO_DEVINS(pVCpu), pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
2097 break;
2098 }
2099
2100 /* Write-only MSRs: */
2101 case MSR_IA32_X2APIC_SELF_IPI:
2102 {
2103 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
2104 apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE, 0 /* uSrcTag */);
2105 rcStrict = VINF_SUCCESS;
2106 break;
2107 }
2108
2109 case MSR_IA32_X2APIC_EOI:
2110 {
2111 rcStrict = apicSetEoi(pVCpu, u32Value, VINF_CPUM_R3_MSR_WRITE, false /* fForceX2ApicBehaviour */);
2112 break;
2113 }
2114
2115 /*
2116 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to write the "high"
2117 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
2118 * index (0x80E). The write value was 0xffffffff on a Windows 8.1 64-bit guest. We can
2119 * safely ignore this nonsense, See @bugref{8382#c7}.
2120 */
2121 case MSR_IA32_X2APIC_LDR + 1:
2122 {
2123 if (pApic->fHyperVCompatMode)
2124 rcStrict = VINF_SUCCESS;
2125 else
2126 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2127 break;
2128 }
2129
2130 /* Special-treament (read-only normally, but not with Hyper-V) */
2131 case MSR_IA32_X2APIC_LDR:
2132 {
2133 if (pApic->fHyperVCompatMode)
2134 {
2135 rcStrict = apicSetLdr(pVCpu, u32Value);
2136 break;
2137 }
2138 }
2139 RT_FALL_THRU();
2140 /* Read-only MSRs: */
2141 case MSR_IA32_X2APIC_ID:
2142 case MSR_IA32_X2APIC_VERSION:
2143 case MSR_IA32_X2APIC_PPR:
2144 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
2145 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
2146 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
2147 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
2148 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
2149 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
2150 case MSR_IA32_X2APIC_TIMER_CCR:
2151 {
2152 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
2153 break;
2154 }
2155
2156 /* Reserved MSRs: */
2157 case MSR_IA32_X2APIC_LVT_CMCI:
2158 default:
2159 {
2160 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2161 break;
2162 }
2163 }
2164 }
2165 else
2166 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
2167
2168 return rcStrict;
2169}
2170
2171
2172/**
2173 * Resets the APIC base MSR.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure.
2176 */
2177static void apicResetBaseMsr(PVMCPUCC pVCpu)
2178{
2179 /*
2180 * Initialize the APIC base MSR. The APIC enable-bit is set upon power-up or reset[1].
2181 *
2182 * A Reset (in xAPIC and x2APIC mode) brings up the local APIC in xAPIC mode.
2183 * An INIT IPI does -not- cause a transition between xAPIC and x2APIC mode[2].
2184 *
2185 * [1] See AMD spec. 14.1.3 "Processor Initialization State"
2186 * [2] See Intel spec. 10.12.5.1 "x2APIC States".
2187 */
2188 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2189
2190 /* Construct. */
2191 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2192 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2193 uint64_t uApicBaseMsr = MSR_IA32_APICBASE_ADDR;
2194 if (pVCpu->idCpu == 0)
2195 uApicBaseMsr |= MSR_IA32_APICBASE_BSP;
2196
2197 /* If the VM was configured with no APIC, don't enable xAPIC mode, obviously. */
2198 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2199 {
2200 uApicBaseMsr |= MSR_IA32_APICBASE_EN;
2201
2202 /*
2203 * While coming out of a reset the APIC is enabled and in xAPIC mode. If software had previously
2204 * disabled the APIC (which results in the CPUID bit being cleared as well) we re-enable it here.
2205 * See Intel spec. 10.12.5.1 "x2APIC States".
2206 */
2207 if (CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/) == false)
2208 LogRel(("APIC%u: Resetting mode to xAPIC\n", pVCpu->idCpu));
2209 }
2210
2211 /* Commit. */
2212 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uApicBaseMsr);
2213}
2214
2215
2216/**
2217 * Initializes per-VCPU APIC to the state following an INIT reset
2218 * ("Wait-for-SIPI" state).
2219 *
2220 * @param pVCpu The cross context virtual CPU structure.
2221 */
2222void apicInitIpi(PVMCPUCC pVCpu)
2223{
2224 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2225 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2226
2227 /*
2228 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset (Wait-for-SIPI State)"
2229 * and AMD spec 16.3.2 "APIC Registers".
2230 *
2231 * The reason we don't simply zero out the entire APIC page and only set the non-zero members
2232 * is because there are some registers that are not touched by the INIT IPI (e.g. version)
2233 * operation and this function is only a subset of the reset operation.
2234 */
2235 RT_ZERO(pXApicPage->irr);
2236 RT_ZERO(pXApicPage->irr);
2237 RT_ZERO(pXApicPage->isr);
2238 RT_ZERO(pXApicPage->tmr);
2239 RT_ZERO(pXApicPage->icr_hi);
2240 RT_ZERO(pXApicPage->icr_lo);
2241 RT_ZERO(pXApicPage->ldr);
2242 RT_ZERO(pXApicPage->tpr);
2243 RT_ZERO(pXApicPage->ppr);
2244 RT_ZERO(pXApicPage->timer_icr);
2245 RT_ZERO(pXApicPage->timer_ccr);
2246 RT_ZERO(pXApicPage->timer_dcr);
2247
2248 pXApicPage->dfr.u.u4Model = XAPICDESTFORMAT_FLAT;
2249 pXApicPage->dfr.u.u28ReservedMb1 = UINT32_C(0xfffffff);
2250
2251 /** @todo CMCI. */
2252
2253 RT_ZERO(pXApicPage->lvt_timer);
2254 pXApicPage->lvt_timer.u.u1Mask = 1;
2255
2256#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2257 RT_ZERO(pXApicPage->lvt_thermal);
2258 pXApicPage->lvt_thermal.u.u1Mask = 1;
2259#endif
2260
2261 RT_ZERO(pXApicPage->lvt_perf);
2262 pXApicPage->lvt_perf.u.u1Mask = 1;
2263
2264 RT_ZERO(pXApicPage->lvt_lint0);
2265 pXApicPage->lvt_lint0.u.u1Mask = 1;
2266
2267 RT_ZERO(pXApicPage->lvt_lint1);
2268 pXApicPage->lvt_lint1.u.u1Mask = 1;
2269
2270 RT_ZERO(pXApicPage->lvt_error);
2271 pXApicPage->lvt_error.u.u1Mask = 1;
2272
2273 RT_ZERO(pXApicPage->svr);
2274 pXApicPage->svr.u.u8SpuriousVector = 0xff;
2275
2276 /* The self-IPI register is reset to 0. See Intel spec. 10.12.5.1 "x2APIC States" */
2277 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2278 RT_ZERO(pX2ApicPage->self_ipi);
2279
2280 /* Clear the pending-interrupt bitmaps. */
2281 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2282 RT_BZERO(&pApicCpu->ApicPibLevel, sizeof(APICPIB));
2283 RT_BZERO(pApicCpu->CTX_SUFF(pvApicPib), sizeof(APICPIB));
2284
2285 /* Clear the interrupt line states for LINT0 and LINT1 pins. */
2286 pApicCpu->fActiveLint0 = false;
2287 pApicCpu->fActiveLint1 = false;
2288}
2289
2290
2291/**
2292 * Initializes per-VCPU APIC to the state following a power-up or hardware
2293 * reset.
2294 *
2295 * @param pVCpu The cross context virtual CPU structure.
2296 * @param fResetApicBaseMsr Whether to reset the APIC base MSR.
2297 */
2298void apicResetCpu(PVMCPUCC pVCpu, bool fResetApicBaseMsr)
2299{
2300 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2301
2302 LogFlow(("APIC%u: apicR3ResetCpu: fResetApicBaseMsr=%RTbool\n", pVCpu->idCpu, fResetApicBaseMsr));
2303
2304#ifdef VBOX_STRICT
2305 /* Verify that the initial APIC ID reported via CPUID matches our VMCPU ID assumption. */
2306 uint32_t uEax, uEbx, uEcx, uEdx;
2307 uEax = uEbx = uEcx = uEdx = UINT32_MAX;
2308 CPUMGetGuestCpuId(pVCpu, 1, 0, &uEax, &uEbx, &uEcx, &uEdx);
2309 Assert(((uEbx >> 24) & 0xff) == pVCpu->idCpu);
2310#endif
2311
2312 /*
2313 * The state following a power-up or reset is a superset of the INIT state.
2314 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset ('Wait-for-SIPI' State)"
2315 */
2316 apicInitIpi(pVCpu);
2317
2318 /*
2319 * The APIC version register is read-only, so just initialize it here.
2320 * It is not clear from the specs, where exactly it is initialized.
2321 * The version determines the number of LVT entries and size of the APIC ID (8 bits for P4).
2322 */
2323 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2324#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2325 pXApicPage->version.u.u8MaxLvtEntry = XAPIC_MAX_LVT_ENTRIES_P4 - 1;
2326 pXApicPage->version.u.u8Version = XAPIC_HARDWARE_VERSION_P4;
2327 AssertCompile(sizeof(pXApicPage->id.u8ApicId) >= XAPIC_APIC_ID_BIT_COUNT_P4 / 8);
2328#else
2329# error "Implement Pentium and P6 family APIC architectures"
2330#endif
2331
2332 /** @todo It isn't clear in the spec. where exactly the default base address
2333 * is (re)initialized, atm we do it here in Reset. */
2334 if (fResetApicBaseMsr)
2335 apicResetBaseMsr(pVCpu);
2336
2337 /*
2338 * Initialize the APIC ID register to xAPIC format.
2339 */
2340 ASMMemZero32(&pXApicPage->id, sizeof(pXApicPage->id));
2341 pXApicPage->id.u8ApicId = pVCpu->idCpu;
2342}
2343
2344
2345/**
2346 * Sets the APIC base MSR.
2347 *
2348 * @returns VBox status code - no informational ones, esp. not
2349 * VINF_CPUM_R3_MSR_WRITE. Only the following two:
2350 * @retval VINF_SUCCESS
2351 * @retval VERR_CPUM_RAISE_GP_0
2352 *
2353 * @param pVCpu The cross context virtual CPU structure.
2354 * @param u64BaseMsr The value to set.
2355 */
2356VMM_INT_DECL(int) APICSetBaseMsr(PVMCPUCC pVCpu, uint64_t u64BaseMsr)
2357{
2358 Assert(pVCpu);
2359
2360 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2361 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2362 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
2363 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
2364 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
2365
2366 Log2(("APIC%u: ApicSetBaseMsr: u64BaseMsr=%#RX64 enmNewMode=%s enmOldMode=%s\n", pVCpu->idCpu, u64BaseMsr,
2367 apicGetModeName(enmNewMode), apicGetModeName(enmOldMode)));
2368
2369 /*
2370 * We do not support re-mapping the APIC base address because:
2371 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
2372 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
2373 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
2374 * region remains mapped but doesn't belong to the called VCPU's APIC).
2375 */
2376 /** @todo Handle per-VCPU APIC base relocation. */
2377 if (MSR_IA32_APICBASE_GET_ADDR(uBaseMsr) != MSR_IA32_APICBASE_ADDR)
2378 {
2379 if (pVCpu->apic.s.cLogMaxSetApicBaseAddr++ < 5)
2380 LogRel(("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
2381 MSR_IA32_APICBASE_GET_ADDR(uBaseMsr)));
2382 return VERR_CPUM_RAISE_GP_0;
2383 }
2384
2385 /* Don't allow enabling xAPIC/x2APIC if the VM is configured with the APIC disabled. */
2386 if (pApic->enmMaxMode == PDMAPICMODE_NONE)
2387 {
2388 LogRel(("APIC%u: Disallowing APIC base MSR write as the VM is configured with APIC disabled!\n", pVCpu->idCpu));
2389 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_DISALLOWED_CONFIG);
2390 }
2391
2392 /*
2393 * Act on state transition.
2394 */
2395 if (enmNewMode != enmOldMode)
2396 {
2397 switch (enmNewMode)
2398 {
2399 case APICMODE_DISABLED:
2400 {
2401 /*
2402 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2403 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2404 *
2405 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2406 *
2407 * We'll also manually manage the APIC base MSR here. We want a single-point of commit
2408 * at the end of this function rather than updating it in apicR3ResetCpu. This means we also
2409 * need to update the CPUID leaf ourselves.
2410 */
2411 apicResetCpu(pVCpu, false /* fResetApicBaseMsr */);
2412 uBaseMsr &= ~(MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD);
2413 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, false /*fVisible*/);
2414 LogRel(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2415 break;
2416 }
2417
2418 case APICMODE_XAPIC:
2419 {
2420 if (enmOldMode != APICMODE_DISABLED)
2421 {
2422 LogRel(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2423 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2424 }
2425
2426 uBaseMsr |= MSR_IA32_APICBASE_EN;
2427 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/);
2428 LogRel(("APIC%u: Switched mode to xAPIC\n", pVCpu->idCpu));
2429 break;
2430 }
2431
2432 case APICMODE_X2APIC:
2433 {
2434 if (pApic->enmMaxMode != PDMAPICMODE_X2APIC)
2435 {
2436 LogRel(("APIC%u: Disallowing transition to x2APIC mode as the VM is configured with the x2APIC disabled!\n",
2437 pVCpu->idCpu));
2438 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2439 }
2440
2441 if (enmOldMode != APICMODE_XAPIC)
2442 {
2443 LogRel(("APIC%u: Can only transition to x2APIC state from xAPIC state\n", pVCpu->idCpu));
2444 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2445 }
2446
2447 uBaseMsr |= MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD;
2448
2449 /*
2450 * The APIC ID needs updating when entering x2APIC mode.
2451 * Software written APIC ID in xAPIC mode isn't preserved.
2452 * The APIC ID becomes read-only to software in x2APIC mode.
2453 *
2454 * See Intel spec. 10.12.5.1 "x2APIC States".
2455 */
2456 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2457 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2458 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2459
2460 /*
2461 * LDR initialization occurs when entering x2APIC mode.
2462 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2463 */
2464 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2465 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2466
2467 LogRel(("APIC%u: Switched mode to x2APIC\n", pVCpu->idCpu));
2468 break;
2469 }
2470
2471 case APICMODE_INVALID:
2472 default:
2473 {
2474 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2475 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2476 }
2477 }
2478 }
2479
2480 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2481 return VINF_SUCCESS;
2482}
2483
2484
2485/**
2486 * Gets the APIC base MSR (no checks are performed wrt APIC hardware or its
2487 * state).
2488 *
2489 * @returns The base MSR value.
2490 * @param pVCpu The cross context virtual CPU structure.
2491 */
2492VMM_INT_DECL(uint64_t) APICGetBaseMsrNoCheck(PCVMCPUCC pVCpu)
2493{
2494 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2495 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2496 return pApicCpu->uApicBaseMsr;
2497}
2498
2499
2500/**
2501 * Gets the APIC base MSR.
2502 *
2503 * @returns Strict VBox status code.
2504 * @param pVCpu The cross context virtual CPU structure.
2505 * @param pu64Value Where to store the MSR value.
2506 */
2507VMM_INT_DECL(VBOXSTRICTRC) APICGetBaseMsr(PVMCPUCC pVCpu, uint64_t *pu64Value)
2508{
2509 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2510
2511 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2512 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2513 {
2514 *pu64Value = APICGetBaseMsrNoCheck(pVCpu);
2515 return VINF_SUCCESS;
2516 }
2517
2518 if (pVCpu->apic.s.cLogMaxGetApicBaseAddr++ < 5)
2519 LogRel(("APIC%u: Reading APIC base MSR (%#x) when there is no APIC -> #GP(0)\n", pVCpu->idCpu, MSR_IA32_APICBASE));
2520 return VERR_CPUM_RAISE_GP_0;
2521}
2522
2523
2524/**
2525 * Sets the TPR (Task Priority Register).
2526 *
2527 * @retval VINF_SUCCESS
2528 * @retval VERR_CPUM_RAISE_GP_0
2529 * @retval VERR_PDM_NO_APIC_INSTANCE
2530 *
2531 * @param pVCpu The cross context virtual CPU structure.
2532 * @param u8Tpr The TPR value to set.
2533 */
2534VMMDECL(int) APICSetTpr(PVMCPUCC pVCpu, uint8_t u8Tpr)
2535{
2536 if (APICIsEnabled(pVCpu))
2537 return apicSetTprEx(pVCpu, u8Tpr, false /* fForceX2ApicBehaviour */);
2538 return VERR_PDM_NO_APIC_INSTANCE;
2539}
2540
2541
2542/**
2543 * Gets the highest priority pending interrupt.
2544 *
2545 * @returns true if any interrupt is pending, false otherwise.
2546 * @param pVCpu The cross context virtual CPU structure.
2547 * @param pu8PendingIntr Where to store the interrupt vector if the
2548 * interrupt is pending (optional, can be NULL).
2549 */
2550static bool apicGetHighestPendingInterrupt(PCVMCPUCC pVCpu, uint8_t *pu8PendingIntr)
2551{
2552 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2553 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2554 if (irrv >= 0)
2555 {
2556 Assert(irrv <= (int)UINT8_MAX);
2557 if (pu8PendingIntr)
2558 *pu8PendingIntr = (uint8_t)irrv;
2559 return true;
2560 }
2561 return false;
2562}
2563
2564
2565/**
2566 * Gets the APIC TPR (Task Priority Register).
2567 *
2568 * @returns VBox status code.
2569 * @param pVCpu The cross context virtual CPU structure.
2570 * @param pu8Tpr Where to store the TPR.
2571 * @param pfPending Where to store whether there is a pending interrupt
2572 * (optional, can be NULL).
2573 * @param pu8PendingIntr Where to store the highest-priority pending
2574 * interrupt (optional, can be NULL).
2575 */
2576VMMDECL(int) APICGetTpr(PCVMCPUCC pVCpu, uint8_t *pu8Tpr, bool *pfPending, uint8_t *pu8PendingIntr)
2577{
2578 VMCPU_ASSERT_EMT(pVCpu);
2579 if (APICIsEnabled(pVCpu))
2580 {
2581 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2582 if (pfPending)
2583 {
2584 /*
2585 * Just return whatever the highest pending interrupt is in the IRR.
2586 * The caller is responsible for figuring out if it's masked by the TPR etc.
2587 */
2588 *pfPending = apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2589 }
2590
2591 *pu8Tpr = pXApicPage->tpr.u8Tpr;
2592 return VINF_SUCCESS;
2593 }
2594
2595 *pu8Tpr = 0;
2596 return VERR_PDM_NO_APIC_INSTANCE;
2597}
2598
2599
2600/**
2601 * Gets the APIC timer frequency.
2602 *
2603 * @returns Strict VBox status code.
2604 * @param pVM The cross context VM structure.
2605 * @param pu64Value Where to store the timer frequency.
2606 */
2607VMM_INT_DECL(int) APICGetTimerFreq(PVMCC pVM, uint64_t *pu64Value)
2608{
2609 /*
2610 * Validate.
2611 */
2612 Assert(pVM);
2613 AssertPtrReturn(pu64Value, VERR_INVALID_PARAMETER);
2614
2615 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[0];
2616 if (APICIsEnabled(pVCpu))
2617 {
2618 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2619 *pu64Value = PDMDevHlpTimerGetFreq(VMCPU_TO_DEVINS(pVCpu), pApicCpu->hTimer);
2620 return VINF_SUCCESS;
2621 }
2622 return VERR_PDM_NO_APIC_INSTANCE;
2623}
2624
2625
2626/**
2627 * Delivers an interrupt message via the system bus.
2628 *
2629 * @returns VBox status code.
2630 * @param pVM The cross context VM structure.
2631 * @param uDest The destination mask.
2632 * @param uDestMode The destination mode.
2633 * @param uDeliveryMode The delivery mode.
2634 * @param uVector The interrupt vector.
2635 * @param uPolarity The interrupt line polarity.
2636 * @param uTriggerMode The trigger mode.
2637 * @param uSrcTag The interrupt source tag (debugging).
2638 */
2639VMM_INT_DECL(int) APICBusDeliver(PVMCC pVM, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2640 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uSrcTag)
2641{
2642 NOREF(uPolarity);
2643
2644 /*
2645 * If the APIC isn't enabled, do nothing and pretend success.
2646 */
2647 if (APICIsEnabled(pVM->CTX_SUFF(apCpus)[0]))
2648 { /* likely */ }
2649 else
2650 return VINF_SUCCESS;
2651
2652 /*
2653 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2654 * Hence, the broadcast mask is 0xff.
2655 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2656 */
2657 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2658 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2659 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2660 uint32_t fDestMask = uDest;
2661 uint32_t fBroadcastMask = UINT32_C(0xff);
2662
2663 Log2(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s uVector=%#x\n", fDestMask,
2664 apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode), apicGetDeliveryModeName(enmDeliveryMode),
2665 uVector));
2666
2667 bool fIntrAccepted;
2668 VMCPUSET DestCpuSet;
2669 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2670 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2671 &fIntrAccepted, uSrcTag, VINF_SUCCESS /* rcRZ */);
2672 if (fIntrAccepted)
2673 return VBOXSTRICTRC_VAL(rcStrict);
2674 return VERR_APIC_INTR_DISCARDED;
2675}
2676
2677
2678/**
2679 * Assert/de-assert the local APIC's LINT0/LINT1 interrupt pins.
2680 *
2681 * @returns Strict VBox status code.
2682 * @param pVCpu The cross context virtual CPU structure.
2683 * @param u8Pin The interrupt pin (0 for LINT0 or 1 for LINT1).
2684 * @param u8Level The level (0 for low or 1 for high).
2685 * @param rcRZ The return code if the operation cannot be performed in
2686 * the current context.
2687 *
2688 * @note All callers totally ignores the status code!
2689 */
2690VMM_INT_DECL(VBOXSTRICTRC) APICLocalInterrupt(PVMCPUCC pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2691{
2692 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2693 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2694
2695 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2696
2697 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2698 if (APICIsEnabled(pVCpu))
2699 {
2700 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2701
2702 /* Pick the LVT entry corresponding to the interrupt pin. */
2703 static const uint16_t s_au16LvtOffsets[] =
2704 {
2705 XAPIC_OFF_LVT_LINT0,
2706 XAPIC_OFF_LVT_LINT1
2707 };
2708 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2709 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2710 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
2711
2712 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2713 if (!XAPIC_LVT_IS_MASKED(uLvt))
2714 {
2715 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2716 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2717
2718 switch (enmDeliveryMode)
2719 {
2720 case XAPICDELIVERYMODE_INIT:
2721 {
2722 /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2723 AssertMsgFailed(("INIT through LINT0/LINT1 is not yet supported\n"));
2724 }
2725 RT_FALL_THRU();
2726 case XAPICDELIVERYMODE_FIXED:
2727 {
2728 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2729 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2730 bool fActive = RT_BOOL(u8Level & 1);
2731 bool volatile *pfActiveLine = u8Pin == 0 ? &pApicCpu->fActiveLint0 : &pApicCpu->fActiveLint1;
2732 /** @todo Polarity is busted elsewhere, we need to fix that
2733 * first. See @bugref{8386#c7}. */
2734#if 0
2735 uint8_t const u8Polarity = XAPIC_LVT_GET_POLARITY(uLvt);
2736 fActive ^= u8Polarity; */
2737#endif
2738 if (!fActive)
2739 {
2740 ASMAtomicCmpXchgBool(pfActiveLine, false, true);
2741 break;
2742 }
2743
2744 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2745 if (offLvt == XAPIC_OFF_LVT_LINT1)
2746 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2747 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2748 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2749 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2750 means. */
2751
2752 bool fSendIntr;
2753 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2754 {
2755 /* Recognize and send the interrupt only on an edge transition. */
2756 fSendIntr = ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2757 }
2758 else
2759 {
2760 /* For level-triggered interrupts, redundant interrupts are not a problem. */
2761 Assert(enmTriggerMode == XAPICTRIGGERMODE_LEVEL);
2762 ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2763
2764 /* Only when the remote IRR isn't set, set it and send the interrupt. */
2765 if (!(pXApicPage->lvt_lint0.all.u32LvtLint0 & XAPIC_LVT_REMOTE_IRR))
2766 {
2767 Assert(offLvt == XAPIC_OFF_LVT_LINT0);
2768 ASMAtomicOrU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, XAPIC_LVT_REMOTE_IRR);
2769 fSendIntr = true;
2770 }
2771 else
2772 fSendIntr = false;
2773 }
2774
2775 if (fSendIntr)
2776 {
2777 VMCPUSET DestCpuSet;
2778 VMCPUSET_EMPTY(&DestCpuSet);
2779 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2780 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode,
2781 &DestCpuSet, NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2782 }
2783 break;
2784 }
2785
2786 case XAPICDELIVERYMODE_SMI:
2787 case XAPICDELIVERYMODE_NMI:
2788 {
2789 VMCPUSET DestCpuSet;
2790 VMCPUSET_EMPTY(&DestCpuSet);
2791 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2792 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2793 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2794 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2795 break;
2796 }
2797
2798 case XAPICDELIVERYMODE_EXTINT:
2799 {
2800 Log2(("APIC%u: apicLocalInterrupt: %s ExtINT through LINT%u\n", pVCpu->idCpu,
2801 u8Level ? "Raising" : "Lowering", u8Pin));
2802 if (u8Level)
2803 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2804 else
2805 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2806 break;
2807 }
2808
2809 /* Reserved/unknown delivery modes: */
2810 case XAPICDELIVERYMODE_LOWEST_PRIO:
2811 case XAPICDELIVERYMODE_STARTUP:
2812 default:
2813 {
2814 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2815 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2816 rcStrict = VERR_INTERNAL_ERROR_3;
2817 break;
2818 }
2819 }
2820 }
2821 }
2822 else
2823 {
2824 /* The APIC is hardware disabled. The CPU behaves as though there is no on-chip APIC. */
2825 if (u8Pin == 0)
2826 {
2827 /* LINT0 behaves as an external interrupt pin. */
2828 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, %s INTR\n", pVCpu->idCpu,
2829 u8Level ? "raising" : "lowering"));
2830 if (u8Level)
2831 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2832 else
2833 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2834 }
2835 else
2836 {
2837 /* LINT1 behaves as NMI. */
2838 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, raising NMI\n", pVCpu->idCpu));
2839 apicSetInterruptFF(pVCpu, PDMAPICIRQ_NMI);
2840 }
2841 }
2842
2843 return rcStrict;
2844}
2845
2846
2847/**
2848 * Gets the next highest-priority interrupt from the APIC, marking it as an
2849 * "in-service" interrupt.
2850 *
2851 * @returns VBox status code.
2852 * @param pVCpu The cross context virtual CPU structure.
2853 * @param pu8Vector Where to store the vector.
2854 * @param puSrcTag Where to store the interrupt source tag (debugging).
2855 */
2856VMM_INT_DECL(int) APICGetInterrupt(PVMCPUCC pVCpu, uint8_t *pu8Vector, uint32_t *puSrcTag)
2857{
2858 VMCPU_ASSERT_EMT(pVCpu);
2859 Assert(pu8Vector);
2860
2861 LogFlow(("APIC%u: apicGetInterrupt:\n", pVCpu->idCpu));
2862
2863 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2864 bool const fApicHwEnabled = APICIsEnabled(pVCpu);
2865 if ( fApicHwEnabled
2866 && pXApicPage->svr.u.fApicSoftwareEnable)
2867 {
2868 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2869 if (RT_LIKELY(irrv >= 0))
2870 {
2871 Assert(irrv <= (int)UINT8_MAX);
2872 uint8_t const uVector = irrv;
2873
2874 /*
2875 * This can happen if the APIC receives an interrupt when the CPU has interrupts
2876 * disabled but the TPR is raised by the guest before re-enabling interrupts.
2877 */
2878 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2879 if ( uTpr > 0
2880 && XAPIC_TPR_GET_TP(uVector) <= XAPIC_TPR_GET_TP(uTpr))
2881 {
2882 Log2(("APIC%u: apicGetInterrupt: Interrupt masked. uVector=%#x uTpr=%#x SpuriousVector=%#x\n", pVCpu->idCpu,
2883 uVector, uTpr, pXApicPage->svr.u.u8SpuriousVector));
2884 *pu8Vector = uVector;
2885 *puSrcTag = 0;
2886 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByTpr);
2887 return VERR_APIC_INTR_MASKED_BY_TPR;
2888 }
2889
2890 /*
2891 * The PPR should be up-to-date at this point through apicSetEoi().
2892 * We're on EMT so no parallel updates possible.
2893 * Subject the pending vector to PPR prioritization.
2894 */
2895 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2896 if ( !uPpr
2897 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2898 {
2899 apicClearVectorInReg(&pXApicPage->irr, uVector);
2900 apicSetVectorInReg(&pXApicPage->isr, uVector);
2901 apicUpdatePpr(pVCpu);
2902 apicSignalNextPendingIntr(pVCpu);
2903
2904 /* Retrieve the interrupt source tag associated with this interrupt. */
2905 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2906 AssertCompile(RT_ELEMENTS(pApicCpu->auSrcTags) > UINT8_MAX);
2907 *puSrcTag = pApicCpu->auSrcTags[uVector];
2908 pApicCpu->auSrcTags[uVector] = 0;
2909
2910 Log2(("APIC%u: apicGetInterrupt: Valid Interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
2911 *pu8Vector = uVector;
2912 return VINF_SUCCESS;
2913 }
2914 else
2915 {
2916 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByPpr);
2917 Log2(("APIC%u: apicGetInterrupt: Interrupt's priority is not higher than the PPR. uVector=%#x PPR=%#x\n",
2918 pVCpu->idCpu, uVector, uPpr));
2919 }
2920 }
2921 else
2922 Log2(("APIC%u: apicGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2923 }
2924 else
2925 Log2(("APIC%u: apicGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2926
2927 *pu8Vector = 0;
2928 *puSrcTag = 0;
2929 return VERR_APIC_INTR_NOT_PENDING;
2930}
2931
2932
2933/**
2934 * @callback_method_impl{FNIOMMMIONEWREAD}
2935 */
2936DECLCALLBACK(VBOXSTRICTRC) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2937{
2938 NOREF(pvUser);
2939 Assert(!(off & 0xf));
2940 Assert(cb == 4); RT_NOREF_PV(cb);
2941
2942 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2943 uint16_t offReg = off & 0xff0;
2944 uint32_t uValue = 0;
2945
2946 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioRead));
2947
2948 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(apicReadRegister(pDevIns, pVCpu, offReg, &uValue));
2949 *(uint32_t *)pv = uValue;
2950
2951 Log2(("APIC%u: apicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2952 return rc;
2953}
2954
2955
2956/**
2957 * @callback_method_impl{FNIOMMMIONEWWRITE}
2958 */
2959DECLCALLBACK(VBOXSTRICTRC) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2960{
2961 NOREF(pvUser);
2962 Assert(!(off & 0xf));
2963 Assert(cb == 4); RT_NOREF_PV(cb);
2964
2965 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2966 uint16_t offReg = off & 0xff0;
2967 uint32_t uValue = *(uint32_t *)pv;
2968
2969 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioWrite));
2970
2971 Log2(("APIC%u: apicWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2972
2973 return apicWriteRegister(pDevIns, pVCpu, offReg, uValue);
2974}
2975
2976
2977/**
2978 * Sets the interrupt pending force-flag and pokes the EMT if required.
2979 *
2980 * @param pVCpu The cross context virtual CPU structure.
2981 * @param enmType The IRQ type.
2982 */
2983static void apicSetInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType)
2984{
2985#ifdef IN_RING3
2986 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
2987 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
2988#endif
2989
2990 switch (enmType)
2991 {
2992 case PDMAPICIRQ_HARDWARE:
2993 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2994 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
2995 break;
2996 case PDMAPICIRQ_UPDATE_PENDING: VMCPU_FF_SET(pVCpu, VMCPU_FF_UPDATE_APIC); break;
2997 case PDMAPICIRQ_NMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); break;
2998 case PDMAPICIRQ_SMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); break;
2999 case PDMAPICIRQ_EXTINT: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
3000 default:
3001 AssertMsgFailed(("enmType=%d\n", enmType));
3002 break;
3003 }
3004
3005 /*
3006 * We need to wake up the target CPU if we're not on EMT.
3007 */
3008#if defined(IN_RING0)
3009 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3010 VMCPUID idCpu = pVCpu->idCpu;
3011 if ( enmType != PDMAPICIRQ_HARDWARE
3012 && VMMGetCpuId(pVM) != idCpu)
3013 {
3014 switch (VMCPU_GET_STATE(pVCpu))
3015 {
3016 case VMCPUSTATE_STARTED_EXEC:
3017 GVMMR0SchedPokeNoGVMNoLock(pVM, idCpu);
3018 break;
3019
3020 case VMCPUSTATE_STARTED_HALTED:
3021 GVMMR0SchedWakeUpNoGVMNoLock(pVM, idCpu);
3022 break;
3023
3024 default:
3025 break; /* nothing to do in other states. */
3026 }
3027 }
3028#elif defined(IN_RING3)
3029 if (enmType != PDMAPICIRQ_HARDWARE)
3030 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE);
3031#endif
3032}
3033
3034
3035/**
3036 * Clears the interrupt pending force-flag.
3037 *
3038 * @param pVCpu The cross context virtual CPU structure.
3039 * @param enmType The IRQ type.
3040 */
3041void apicClearInterruptFF(PVMCPUCC pVCpu, PDMAPICIRQ enmType)
3042{
3043#ifdef IN_RING3
3044 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
3045 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
3046#endif
3047
3048 /* NMI/SMI can't be cleared. */
3049 switch (enmType)
3050 {
3051 case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break;
3052 case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
3053 default:
3054 AssertMsgFailed(("enmType=%d\n", enmType));
3055 break;
3056 }
3057}
3058
3059
3060/**
3061 * Posts an interrupt to a target APIC.
3062 *
3063 * This function handles interrupts received from the system bus or
3064 * interrupts generated locally from the LVT or via a self IPI.
3065 *
3066 * Don't use this function to try and deliver ExtINT style interrupts.
3067 *
3068 * @returns true if the interrupt was accepted, false otherwise.
3069 * @param pVCpu The cross context virtual CPU structure.
3070 * @param uVector The vector of the interrupt to be posted.
3071 * @param enmTriggerMode The trigger mode of the interrupt.
3072 * @param uSrcTag The interrupt source tag (debugging).
3073 *
3074 * @thread Any.
3075 */
3076bool apicPostInterrupt(PVMCPUCC pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode, uint32_t uSrcTag)
3077{
3078 Assert(pVCpu);
3079 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
3080
3081 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3082 PCAPIC pApic = VM_TO_APIC(pVM);
3083 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3084 bool fAccepted = true;
3085
3086 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
3087
3088 /*
3089 * Only post valid interrupt vectors.
3090 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
3091 */
3092 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
3093 {
3094 /*
3095 * If the interrupt is already pending in the IRR we can skip the
3096 * potential expensive operation of poking the guest EMT out of execution.
3097 */
3098 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3099 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
3100 {
3101 /* Update the interrupt source tag (debugging). */
3102 if (!pApicCpu->auSrcTags[uVector])
3103 pApicCpu->auSrcTags[uVector] = uSrcTag;
3104 else
3105 pApicCpu->auSrcTags[uVector] |= RT_BIT_32(31);
3106
3107 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u uVector=%#x\n", VMMGetCpuId(pVM), pVCpu->idCpu, uVector));
3108 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
3109 {
3110 if (pApic->fPostedIntrsEnabled)
3111 { /** @todo posted-interrupt call to hardware */ }
3112 else
3113 {
3114 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
3115 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3116 if (!fAlreadySet)
3117 {
3118 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for edge-triggered intr. uVector=%#x\n", uVector));
3119 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3120 }
3121 }
3122 }
3123 else
3124 {
3125 /*
3126 * Level-triggered interrupts requires updating of the TMR and thus cannot be
3127 * delivered asynchronously.
3128 */
3129 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
3130 uint32_t const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel);
3131 if (!fAlreadySet)
3132 {
3133 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for level-triggered intr. uVector=%#x\n", uVector));
3134 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3135 }
3136 }
3137 }
3138 else
3139 {
3140 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u. Vector %#x Already in IRR, skipping\n", VMMGetCpuId(pVM),
3141 pVCpu->idCpu, uVector));
3142 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
3143 }
3144 }
3145 else
3146 {
3147 fAccepted = false;
3148 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
3149 }
3150
3151 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
3152 return fAccepted;
3153}
3154
3155
3156/**
3157 * Starts the APIC timer.
3158 *
3159 * @param pVCpu The cross context virtual CPU structure.
3160 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
3161 * 0.
3162 * @thread Any.
3163 */
3164void apicStartTimer(PVMCPUCC pVCpu, uint32_t uInitialCount)
3165{
3166 Assert(pVCpu);
3167 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3168 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
3169 Assert(PDMDevHlpTimerIsLockOwner(pDevIns, pApicCpu->hTimer));
3170 Assert(uInitialCount > 0);
3171
3172 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
3173 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
3174 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
3175
3176 Log2(("APIC%u: apicStartTimer: uInitialCount=%#RX32 uTimerShift=%u cTicksToNext=%RU64\n", pVCpu->idCpu, uInitialCount,
3177 uTimerShift, cTicksToNext));
3178
3179 /*
3180 * The assumption here is that the timer doesn't tick during this call
3181 * and thus setting a relative time to fire next is accurate. The advantage
3182 * however is updating u64TimerInitial 'atomically' while setting the next
3183 * tick.
3184 */
3185 PDMDevHlpTimerSetRelative(pDevIns, pApicCpu->hTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
3186 apicHintTimerFreq(pDevIns, pApicCpu, uInitialCount, uTimerShift);
3187}
3188
3189
3190/**
3191 * Stops the APIC timer.
3192 *
3193 * @param pVCpu The cross context virtual CPU structure.
3194 * @thread Any.
3195 */
3196static void apicStopTimer(PVMCPUCC pVCpu)
3197{
3198 Assert(pVCpu);
3199 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3200 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
3201 Assert(PDMDevHlpTimerIsLockOwner(pDevIns, pApicCpu->hTimer));
3202
3203 Log2(("APIC%u: apicStopTimer\n", pVCpu->idCpu));
3204
3205 PDMDevHlpTimerStop(pDevIns, pApicCpu->hTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
3206 pApicCpu->uHintedTimerInitialCount = 0;
3207 pApicCpu->uHintedTimerShift = 0;
3208}
3209
3210
3211/**
3212 * Queues a pending interrupt as in-service.
3213 *
3214 * This function should only be needed without virtualized APIC
3215 * registers. With virtualized APIC registers, it's sufficient to keep
3216 * the interrupts pending in the IRR as the hardware takes care of
3217 * virtual interrupt delivery.
3218 *
3219 * @returns true if the interrupt was queued to in-service interrupts,
3220 * false otherwise.
3221 * @param pVCpu The cross context virtual CPU structure.
3222 * @param u8PendingIntr The pending interrupt to queue as
3223 * in-service.
3224 *
3225 * @remarks This assumes the caller has done the necessary checks and
3226 * is ready to take actually service the interrupt (TPR,
3227 * interrupt shadow etc.)
3228 */
3229VMM_INT_DECL(bool) APICQueueInterruptToService(PVMCPUCC pVCpu, uint8_t u8PendingIntr)
3230{
3231 VMCPU_ASSERT_EMT(pVCpu);
3232
3233 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3234 PAPIC pApic = VM_TO_APIC(pVM);
3235 Assert(!pApic->fVirtApicRegsEnabled);
3236 NOREF(pApic);
3237
3238 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3239 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
3240 if (fIsPending)
3241 {
3242 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
3243 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
3244 apicUpdatePpr(pVCpu);
3245 return true;
3246 }
3247 return false;
3248}
3249
3250
3251/**
3252 * De-queues a pending interrupt from in-service.
3253 *
3254 * This undoes APICQueueInterruptToService() for premature VM-exits before event
3255 * injection.
3256 *
3257 * @param pVCpu The cross context virtual CPU structure.
3258 * @param u8PendingIntr The pending interrupt to de-queue from
3259 * in-service.
3260 */
3261VMM_INT_DECL(void) APICDequeueInterruptFromService(PVMCPUCC pVCpu, uint8_t u8PendingIntr)
3262{
3263 VMCPU_ASSERT_EMT(pVCpu);
3264
3265 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3266 PAPIC pApic = VM_TO_APIC(pVM);
3267 Assert(!pApic->fVirtApicRegsEnabled);
3268 NOREF(pApic);
3269
3270 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3271 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
3272 if (fInService)
3273 {
3274 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
3275 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
3276 apicUpdatePpr(pVCpu);
3277 }
3278}
3279
3280
3281/**
3282 * Updates pending interrupts from the pending-interrupt bitmaps to the IRR.
3283 *
3284 * @param pVCpu The cross context virtual CPU structure.
3285 *
3286 * @note NEM/win is ASSUMING the an up to date TPR is not required here.
3287 */
3288VMMDECL(void) APICUpdatePendingInterrupts(PVMCPUCC pVCpu)
3289{
3290 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3291
3292 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3293 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3294 bool fHasPendingIntrs = false;
3295
3296 Log3(("APIC%u: APICUpdatePendingInterrupts:\n", pVCpu->idCpu));
3297 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
3298
3299 /* Update edge-triggered pending interrupts. */
3300 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
3301 for (;;)
3302 {
3303 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3304 if (!fAlreadySet)
3305 break;
3306
3307 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3308 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3309 {
3310 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3311 if (u64Fragment)
3312 {
3313 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3314 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3315
3316 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3317 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3318
3319 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
3320 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
3321 fHasPendingIntrs = true;
3322 }
3323 }
3324 }
3325
3326 /* Update level-triggered pending interrupts. */
3327 pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
3328 for (;;)
3329 {
3330 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
3331 if (!fAlreadySet)
3332 break;
3333
3334 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3335 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3336 {
3337 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3338 if (u64Fragment)
3339 {
3340 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3341 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3342
3343 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3344 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3345
3346 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
3347 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3348 fHasPendingIntrs = true;
3349 }
3350 }
3351 }
3352
3353 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
3354 Log3(("APIC%u: APICUpdatePendingInterrupts: fHasPendingIntrs=%RTbool\n", pVCpu->idCpu, fHasPendingIntrs));
3355
3356 if ( fHasPendingIntrs
3357 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))
3358 apicSignalNextPendingIntr(pVCpu);
3359}
3360
3361
3362/**
3363 * Gets the highest priority pending interrupt.
3364 *
3365 * @returns true if any interrupt is pending, false otherwise.
3366 * @param pVCpu The cross context virtual CPU structure.
3367 * @param pu8PendingIntr Where to store the interrupt vector if the
3368 * interrupt is pending.
3369 */
3370VMM_INT_DECL(bool) APICGetHighestPendingInterrupt(PVMCPUCC pVCpu, uint8_t *pu8PendingIntr)
3371{
3372 VMCPU_ASSERT_EMT(pVCpu);
3373 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
3374}
3375
3376
3377/**
3378 * Posts an interrupt to a target APIC, Hyper-V interface.
3379 *
3380 * @returns true if the interrupt was accepted, false otherwise.
3381 * @param pVCpu The cross context virtual CPU structure.
3382 * @param uVector The vector of the interrupt to be posted.
3383 * @param fAutoEoi Whether this interrupt has automatic EOI
3384 * treatment.
3385 * @param enmTriggerMode The trigger mode of the interrupt.
3386 *
3387 * @thread Any.
3388 */
3389VMM_INT_DECL(void) APICHvSendInterrupt(PVMCPUCC pVCpu, uint8_t uVector, bool fAutoEoi, XAPICTRIGGERMODE enmTriggerMode)
3390{
3391 Assert(pVCpu);
3392 Assert(!fAutoEoi); /** @todo AutoEOI. */
3393 RT_NOREF(fAutoEoi);
3394 apicPostInterrupt(pVCpu, uVector, enmTriggerMode, 0 /* uSrcTag */);
3395}
3396
3397
3398/**
3399 * Sets the Task Priority Register (TPR), Hyper-V interface.
3400 *
3401 * @returns Strict VBox status code.
3402 * @param pVCpu The cross context virtual CPU structure.
3403 * @param uTpr The TPR value to set.
3404 *
3405 * @remarks Validates like in x2APIC mode.
3406 */
3407VMM_INT_DECL(VBOXSTRICTRC) APICHvSetTpr(PVMCPUCC pVCpu, uint8_t uTpr)
3408{
3409 Assert(pVCpu);
3410 VMCPU_ASSERT_EMT(pVCpu);
3411 return apicSetTprEx(pVCpu, uTpr, true /* fForceX2ApicBehaviour */);
3412}
3413
3414
3415/**
3416 * Gets the Task Priority Register (TPR), Hyper-V interface.
3417 *
3418 * @returns The TPR value.
3419 * @param pVCpu The cross context virtual CPU structure.
3420 */
3421VMM_INT_DECL(uint8_t) APICHvGetTpr(PVMCPUCC pVCpu)
3422{
3423 Assert(pVCpu);
3424 VMCPU_ASSERT_EMT(pVCpu);
3425
3426 /*
3427 * The APIC could be operating in xAPIC mode and thus we should not use the apicReadMsr()
3428 * interface which validates the APIC mode and will throw a #GP(0) if not in x2APIC mode.
3429 * We could use the apicReadRegister() MMIO interface, but why bother getting the PDMDEVINS
3430 * pointer, so just directly read the APIC page.
3431 */
3432 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3433 return apicReadRaw32(pXApicPage, XAPIC_OFF_TPR);
3434}
3435
3436
3437/**
3438 * Sets the Interrupt Command Register (ICR), Hyper-V interface.
3439 *
3440 * @returns Strict VBox status code.
3441 * @param pVCpu The cross context virtual CPU structure.
3442 * @param uIcr The ICR value to set.
3443 */
3444VMM_INT_DECL(VBOXSTRICTRC) APICHvSetIcr(PVMCPUCC pVCpu, uint64_t uIcr)
3445{
3446 Assert(pVCpu);
3447 VMCPU_ASSERT_EMT(pVCpu);
3448 return apicSetIcr(pVCpu, uIcr, VINF_CPUM_R3_MSR_WRITE);
3449}
3450
3451
3452/**
3453 * Gets the Interrupt Command Register (ICR), Hyper-V interface.
3454 *
3455 * @returns The ICR value.
3456 * @param pVCpu The cross context virtual CPU structure.
3457 */
3458VMM_INT_DECL(uint64_t) APICHvGetIcr(PVMCPUCC pVCpu)
3459{
3460 Assert(pVCpu);
3461 VMCPU_ASSERT_EMT(pVCpu);
3462 return apicGetIcrNoCheck(pVCpu);
3463}
3464
3465
3466/**
3467 * Sets the End-Of-Interrupt (EOI) register, Hyper-V interface.
3468 *
3469 * @returns Strict VBox status code.
3470 * @param pVCpu The cross context virtual CPU structure.
3471 * @param uEoi The EOI value.
3472 */
3473VMM_INT_DECL(VBOXSTRICTRC) APICHvSetEoi(PVMCPUCC pVCpu, uint32_t uEoi)
3474{
3475 Assert(pVCpu);
3476 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3477 return apicSetEoi(pVCpu, uEoi, VINF_CPUM_R3_MSR_WRITE, true /* fForceX2ApicBehaviour */);
3478}
3479
3480
3481/**
3482 * Gets the APIC page pointers for the specified VCPU.
3483 *
3484 * @returns VBox status code.
3485 * @param pVCpu The cross context virtual CPU structure.
3486 * @param pHCPhys Where to store the host-context physical address.
3487 * @param pR0Ptr Where to store the ring-0 address.
3488 * @param pR3Ptr Where to store the ring-3 address (optional).
3489 */
3490VMM_INT_DECL(int) APICGetApicPageForCpu(PCVMCPUCC pVCpu, PRTHCPHYS pHCPhys, PRTR0PTR pR0Ptr, PRTR3PTR pR3Ptr)
3491{
3492 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
3493 AssertReturn(pHCPhys, VERR_INVALID_PARAMETER);
3494 AssertReturn(pR0Ptr, VERR_INVALID_PARAMETER);
3495
3496 Assert(PDMHasApic(pVCpu->CTX_SUFF(pVM)));
3497
3498 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3499 *pHCPhys = pApicCpu->HCPhysApicPage;
3500 *pR0Ptr = pApicCpu->pvApicPageR0;
3501 if (pR3Ptr)
3502 *pR3Ptr = pApicCpu->pvApicPageR3;
3503 return VINF_SUCCESS;
3504}
3505
3506#ifndef IN_RING3
3507
3508/**
3509 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
3510 */
3511static DECLCALLBACK(int) apicRZConstruct(PPDMDEVINS pDevIns)
3512{
3513 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
3514 PAPICDEV pThis = PDMDEVINS_2_DATA(pDevIns, PAPICDEV);
3515 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
3516
3517 pVM->apicr0.s.pDevInsR0 = pDevIns;
3518
3519 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
3520 AssertRCReturn(rc, rc);
3521
3522 rc = PDMDevHlpApicSetUpContext(pDevIns);
3523 AssertRCReturn(rc, rc);
3524
3525 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, apicWriteMmio, apicReadMmio, NULL /*pvUser*/);
3526 AssertRCReturn(rc, rc);
3527
3528 return VINF_SUCCESS;
3529}
3530#endif /* !IN_RING3 */
3531
3532/**
3533 * APIC device registration structure.
3534 */
3535const PDMDEVREG g_DeviceAPIC =
3536{
3537 /* .u32Version = */ PDM_DEVREG_VERSION,
3538 /* .uReserved0 = */ 0,
3539 /* .szName = */ "apic",
3540 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE
3541 | PDM_DEVREG_FLAGS_REQUIRE_R0 | PDM_DEVREG_FLAGS_REQUIRE_RC,
3542 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
3543 /* .cMaxInstances = */ 1,
3544 /* .uSharedVersion = */ 42,
3545 /* .cbInstanceShared = */ sizeof(APICDEV),
3546 /* .cbInstanceCC = */ 0,
3547 /* .cbInstanceRC = */ 0,
3548 /* .cMaxPciDevices = */ 0,
3549 /* .cMaxMsixVectors = */ 0,
3550 /* .pszDescription = */ "Advanced Programmable Interrupt Controller",
3551#if defined(IN_RING3)
3552 /* .szRCMod = */ "VMMRC.rc",
3553 /* .szR0Mod = */ "VMMR0.r0",
3554 /* .pfnConstruct = */ apicR3Construct,
3555 /* .pfnDestruct = */ apicR3Destruct,
3556 /* .pfnRelocate = */ apicR3Relocate,
3557 /* .pfnMemSetup = */ NULL,
3558 /* .pfnPowerOn = */ NULL,
3559 /* .pfnReset = */ apicR3Reset,
3560 /* .pfnSuspend = */ NULL,
3561 /* .pfnResume = */ NULL,
3562 /* .pfnAttach = */ NULL,
3563 /* .pfnDetach = */ NULL,
3564 /* .pfnQueryInterface = */ NULL,
3565 /* .pfnInitComplete = */ apicR3InitComplete,
3566 /* .pfnPowerOff = */ NULL,
3567 /* .pfnSoftReset = */ NULL,
3568 /* .pfnReserved0 = */ NULL,
3569 /* .pfnReserved1 = */ NULL,
3570 /* .pfnReserved2 = */ NULL,
3571 /* .pfnReserved3 = */ NULL,
3572 /* .pfnReserved4 = */ NULL,
3573 /* .pfnReserved5 = */ NULL,
3574 /* .pfnReserved6 = */ NULL,
3575 /* .pfnReserved7 = */ NULL,
3576#elif defined(IN_RING0)
3577 /* .pfnEarlyConstruct = */ NULL,
3578 /* .pfnConstruct = */ apicRZConstruct,
3579 /* .pfnDestruct = */ NULL,
3580 /* .pfnFinalDestruct = */ NULL,
3581 /* .pfnRequest = */ NULL,
3582 /* .pfnReserved0 = */ NULL,
3583 /* .pfnReserved1 = */ NULL,
3584 /* .pfnReserved2 = */ NULL,
3585 /* .pfnReserved3 = */ NULL,
3586 /* .pfnReserved4 = */ NULL,
3587 /* .pfnReserved5 = */ NULL,
3588 /* .pfnReserved6 = */ NULL,
3589 /* .pfnReserved7 = */ NULL,
3590#elif defined(IN_RC)
3591 /* .pfnConstruct = */ apicRZConstruct,
3592 /* .pfnReserved0 = */ NULL,
3593 /* .pfnReserved1 = */ NULL,
3594 /* .pfnReserved2 = */ NULL,
3595 /* .pfnReserved3 = */ NULL,
3596 /* .pfnReserved4 = */ NULL,
3597 /* .pfnReserved5 = */ NULL,
3598 /* .pfnReserved6 = */ NULL,
3599 /* .pfnReserved7 = */ NULL,
3600#else
3601# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3602#endif
3603 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3604};
3605
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette