VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 60404

Last change on this file since 60404 was 60377, checked in by vboxsync, 9 years ago

VMM: Fix APIC, CPUM init ordering for the new APIC code while still retaining the old code. Namely, consistent MSR APIC base caching and APIC page dependency on PDM construction, see bugref:8245:46 for more details.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 80.6 KB
Line 
1/* $Id: APICAll.cpp 60377 2016-04-07 15:53:36Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/vmcpuset.h>
27
28/*********************************************************************************************************************************
29* Global Variables *
30*********************************************************************************************************************************/
31#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
32/** An ordered array of valid LVT masks. */
33static const uint32_t g_au32LvtValidMasks[] =
34{
35 XAPIC_LVT_TIMER,
36 XAPIC_LVT_THERMAL,
37 XAPIC_LVT_PERF,
38 XAPIC_LVT_LINT0,
39 XAPIC_LVT_LINT1,
40 XAPIC_LVT_ERROR
41};
42#endif
43
44#if 0
45/** @todo CMCI */
46static const uint32_t g_au32LvtExtValidMask[] =
47{
48 XAPIC_LVT_CMCI
49};
50#endif
51
52
53/**
54 * Checks if a vector is set in an APIC 256-bit sparse register.
55 *
56 * @returns true if the specified vector is set, false otherwise.
57 * @param pApicReg The APIC 256-bit spare register.
58 * @param uVector The vector to check if set.
59 */
60DECLINLINE(bool) apicTestVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
61{
62 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
63 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
64}
65
66
67/**
68 * Sets the vector in an APIC 256-bit sparse register.
69 *
70 * @param pApicReg The APIC 256-bit spare register.
71 * @param uVector The vector to set.
72 */
73DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
74{
75 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
76 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
77}
78
79
80/**
81 * Clears the vector in an APIC 256-bit sparse register.
82 *
83 * @param pApicReg The APIC 256-bit spare register.
84 * @param uVector The vector to clear.
85 */
86DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
87{
88 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
89 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
90}
91
92
93/**
94 * Checks if a vector is set in an APIC Pending Interrupt Bitmap (PIB).
95 *
96 * @returns true if the specified vector is set, false otherwise.
97 * @param pvPib Opaque pointer to the PIB.
98 * @param uVector The vector to check if set.
99 */
100DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
101{
102 return ASMBitTest(pvPib, uVector);
103}
104
105
106/**
107 * Atomically tests and sets the PIB notification bit.
108 *
109 * @returns true if the bit was already set, false otherwise.
110 * @param pvPib Opaque pointer to the PIB.
111 */
112DECLINLINE(bool) apicSetNotificationBitInPib(volatile void *pvPib)
113{
114 return ASMAtomicBitTestAndSet(pvPib, XAPIC_PIB_NOTIFICATION_BIT);
115}
116
117
118/**
119 * Atomically tests and clears the PIB notification bit.
120 *
121 * @returns true if the bit was already set, false otherwise.
122 */
123DECLINLINE(bool) apicClearNotificationBitInPib(volatile void *pvPib)
124{
125 return ASMAtomicBitTestAndClear(pvPib, XAPIC_PIB_NOTIFICATION_BIT);
126}
127
128
129/**
130 * Sets the vector in an APIC Pending Interrupt Bitmap (PIB).
131 *
132 * @param pvPib Opaque pointer to the PIB.
133 * @param uVector The vector to set.
134 */
135DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
136{
137 ASMAtomicBitSet(pvPib, uVector);
138}
139
140
141/**
142 * Clears the vector in an APIC Pending Interrupt Bitmap (PIB).
143 *
144 * @param pvPib Opaque pointer to the PIB.
145 * @param uVector The vector to clear.
146 */
147DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
148{
149 ASMAtomicBitClear(pvPib, uVector);
150}
151
152
153/**
154 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
155 * register.
156 *
157 * @param pApicReg The APIC 256-bit spare register.
158 * @param idxFragment The index of the 32-bit fragment in @a
159 * pApicReg.
160 * @param u32Fragment The 32-bit vector fragment.
161 */
162DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
163{
164 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
165 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
166}
167
168
169/**
170 * Reports and returns appropriate error code for invalid MSR accesses.
171 *
172 * @returns Strict VBox status code.
173 * @retval VINF_CPUM_R3_MSR_WRITE if the MSR write could not be serviced in the
174 * current context (raw-mode or ring-0).
175 * @retval VINF_CPUM_R3_MSR_READ if the MSR read could not be serviced in the
176 * current context (raw-mode or ring-0).
177 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
178 * appropriate actions.
179 *
180 * @param pVCpu The cross context virtual CPU structure.
181 * @param u32Reg The MSR being accessed.
182 * @param enmAccess The invalid-access type.
183 */
184static VBOXSTRICTRC apicMsrAccessError(PVMCPU pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
185{
186 static struct
187 {
188 const char *pszBefore; /* The error message before printing the MSR index */
189 const char *pszAfter; /* The error message after printing the MSR index */
190 int rcR0; /* The ring-0 error code */
191 } const s_aAccess[] =
192 {
193 { "read MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_READ },
194 { "write MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_WRITE },
195 { "read reserved/unknown MSR", "", VINF_CPUM_R3_MSR_READ },
196 { "write reserved/unknown MSR", "", VINF_CPUM_R3_MSR_WRITE },
197 { "read write-only MSR", "", VINF_CPUM_R3_MSR_READ },
198 { "write read-only MSR", "", VINF_CPUM_R3_MSR_WRITE },
199 { "read reserved bits of MSR", "", VINF_CPUM_R3_MSR_READ },
200 { "write reserved bits of MSR", "", VINF_CPUM_R3_MSR_WRITE },
201 { "write an invalid value to MSR", "", VINF_CPUM_R3_MSR_WRITE }
202 };
203 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
204
205 size_t const i = enmAccess;
206 Assert(i < RT_ELEMENTS(s_aAccess));
207#ifdef IN_RING3
208 LogRelMax(5, ("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg,
209 s_aAccess[i].pszAfter));
210 return VERR_CPUM_RAISE_GP_0;
211#else
212 return s_aAccess[i].rcR0;
213#endif
214}
215
216
217/**
218 * Gets the APIC mode given the base MSR value.
219 *
220 * @returns The APIC mode.
221 * @param uApicBaseMsr The APIC Base MSR value.
222 */
223static APICMODE apicGetMode(uint64_t uApicBaseMsr)
224{
225 uint32_t const uMode = MSR_APICBASE_GET_MODE(uApicBaseMsr);
226 APICMODE const enmMode = (APICMODE)uMode;
227#ifdef VBOX_STRICT
228 /* Paranoia. */
229 switch (uMode)
230 {
231 case APICMODE_DISABLED:
232 case APICMODE_INVALID:
233 case APICMODE_XAPIC:
234 case APICMODE_X2APIC:
235 break;
236 default:
237 AssertMsgFailed(("Invalid mode"));
238 }
239#endif
240 return enmMode;
241}
242
243
244/**
245 * Returns whether the APIC is hardware enabled or not.
246 *
247 * @returns true if enabled, false otherwise.
248 */
249DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
250{
251 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
252 return MSR_APICBASE_IS_ENABLED(pApicCpu->uApicBaseMsr);
253}
254
255
256/**
257 * Finds the most significant set bit in an APIC 256-bit sparse register.
258 *
259 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
260 * @param pReg The APIC 256-bit sparse register.
261 * @param rcNotFound What to return when no bit is set.
262 */
263static int apicGetLastSetBit(volatile const XAPIC256BITREG *pReg, int rcNotFound)
264{
265 unsigned const cBitsPerFragment = sizeof(pReg->u[0].u32Reg) * 8;
266 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
267 for (ssize_t i = cFragments - 1; i >= 0; i--)
268 {
269 uint32_t const uFragment = pReg->u[i].u32Reg;
270 if (uFragment)
271 {
272 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
273 --idxSetBit;
274 idxSetBit += (i * cBitsPerFragment);
275 return idxSetBit;
276 }
277 }
278 return rcNotFound;
279}
280
281
282/**
283 * Gets the highest priority pending interrupt.
284 *
285 * @returns true if any interrupt is pending, false otherwise.
286 * @param pVCpu The cross context virtual CPU structure.
287 * @param pu8PendingIntr Where to store the interrupt vector if the
288 * interrupt is pending.
289 */
290static bool apicGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
291{
292 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
293 int const irrv = apicGetLastSetBit(&pXApicPage->irr, -1);
294 if (irrv >= 0)
295 {
296 Assert(irrv <= (int)UINT8_MAX);
297 *pu8PendingIntr = (uint8_t)irrv;
298 return true;
299 }
300 return false;
301}
302
303
304/**
305 * Reads a 32-bit register at a specified offset.
306 *
307 * @returns The value at the specified offset.
308 * @param pXApicPage The xAPIC page.
309 * @param offReg The offset of the register being read.
310 */
311DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
312{
313 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
314 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
315 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
316 return uValue;
317}
318
319
320/**
321 * Writes a 32-bit register at a specified offset.
322 *
323 * @param pXApicPage The xAPIC page.
324 * @param offReg The offset of the register being written.
325 * @param uReg The value of the register.
326 */
327DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
328{
329 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
330 uint8_t *pbXApic = (uint8_t *)pXApicPage;
331 *(uint32_t *)(pbXApic + offReg) = uReg;
332}
333
334
335/**
336 * Sets an error in the internal ESR of the specified APIC.
337 *
338 * @param pVCpu The cross context virtual CPU structure.
339 * @param uError The error.
340 * @thread Any.
341 */
342DECLINLINE(void) apicSetError(PVMCPU pVCpu, uint32_t uError)
343{
344 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
345 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
346}
347
348
349/**
350 * Clears all errors in the internal ESR.
351 *
352 * @returns The value of the internal ESR before clearing.
353 * @param pVCpu The cross context virtual CPU structure.
354 */
355DECLINLINE(uint32_t) apicClearAllErrors(PVMCPU pVCpu)
356{
357 VMCPU_ASSERT_EMT(pVCpu);
358 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
359 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
360}
361
362
363/**
364 * Signals the guest if a pending interrupt is ready to be serviced.
365 *
366 * @param pVCpu The cross context virtual CPU structure.
367 */
368static void apicSignalNextPendingIntr(PVMCPU pVCpu)
369{
370 VMCPU_ASSERT_EMT(pVCpu);
371
372 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
373 if (pXApicPage->svr.u.fApicSoftwareEnable)
374 {
375 int const irrv = apicGetLastSetBit(&pXApicPage->irr, VERR_NOT_FOUND);
376 if (irrv >= 0)
377 {
378 Assert(irrv <= (int)UINT8_MAX);
379 uint8_t const uVector = irrv;
380 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
381 if ( !uPpr
382 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
383 {
384 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
385 }
386 }
387 }
388 else
389 APICClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
390}
391
392
393/**
394 * Sets the Spurious-Interrupt Vector Register (SVR).
395 *
396 * @returns Strict VBox status code.
397 * @param pVCpu The cross context virtual CPU structure.
398 * @param uSvr The SVR value.
399 */
400static VBOXSTRICTRC apicSetSvr(PVMCPU pVCpu, uint32_t uSvr)
401{
402 VMCPU_ASSERT_EMT(pVCpu);
403
404 uint32_t uValidMask = XAPIC_SVR;
405 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
406 if (pXApicPage->version.u.fEoiBroadcastSupression)
407 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
408
409 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
410 && (uSvr & ~uValidMask))
411 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
412
413 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
414 if (!pXApicPage->svr.u.fApicSoftwareEnable)
415 {
416 /** @todo CMCI. */
417 pXApicPage->lvt_timer.u.u1Mask = 1;
418#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
419 pXApicPage->lvt_thermal.u.u1Mask = 1;
420#endif
421 pXApicPage->lvt_perf.u.u1Mask = 1;
422 pXApicPage->lvt_lint0.u.u1Mask = 1;
423 pXApicPage->lvt_lint1.u.u1Mask = 1;
424 pXApicPage->lvt_error.u.u1Mask = 1;
425 }
426 return VINF_SUCCESS;
427}
428
429
430/**
431 * Sends an interrupt to one or more APICs.
432 *
433 * @returns Strict VBox status code.
434 * @param pVCpu The cross context virtual CPU structure.
435 * @param uVector The interrupt vector.
436 * @param enmTriggerMode The trigger mode.
437 * @param enmDeliveryMode The delivery mode.
438 * @param pDestCpuSet The destination CPU set.
439 * @param rcRZ The return code if the operation cannot be
440 * performed in the current context.
441 */
442static VBOXSTRICTRC apicSendIntr(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
443 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, int rcRZ)
444{
445 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
446 PVM pVM = pVCpu->CTX_SUFF(pVM);
447 VMCPUID const cCpus = pVM->cCpus;
448 switch (enmDeliveryMode)
449 {
450 case XAPICDELIVERYMODE_FIXED:
451 {
452 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
453 {
454 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
455 && apicIsEnabled(&pVM->aCpus[idCpu]))
456 APICPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
457 }
458 break;
459 }
460
461 case XAPICDELIVERYMODE_LOWEST_PRIO:
462 {
463 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
464 if ( idCpu < pVM->cCpus
465 && apicIsEnabled(&pVM->aCpus[idCpu]))
466 APICPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
467 break;
468 }
469
470 case XAPICDELIVERYMODE_SMI:
471 {
472 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
473 {
474 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
475 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_SMI);
476 }
477 break;
478 }
479
480 case XAPICDELIVERYMODE_NMI:
481 {
482 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
483 {
484 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
485 && apicIsEnabled(&pVM->aCpus[idCpu]))
486 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_NMI);
487 }
488 break;
489 }
490
491 case XAPICDELIVERYMODE_INIT:
492 {
493#ifdef IN_RING3
494 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
495 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
496 VMMR3SendInitIpi(pVM, idCpu);
497#else
498 /* We need to return to ring-3 to deliver the INIT. */
499 rcStrict = rcRZ;
500#endif
501 break;
502 }
503
504 case XAPICDELIVERYMODE_STARTUP:
505 {
506#ifdef IN_RING3
507 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
508 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
509 VMMR3SendStartupIpi(pVM, idCpu, uVector);
510#else
511 /* We need to return to ring-3 to deliver the SIPI. */
512 rcStrict = rcRZ;
513#endif
514 break;
515 }
516
517 case XAPICDELIVERYMODE_EXTINT:
518 {
519 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
520 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
521 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_EXTINT);
522 break;
523 }
524
525 default:
526 {
527 AssertMsgFailed(("APIC: apicSendIntr: Unknown delivery mode %#x\n", enmDeliveryMode));
528 break;
529 }
530 }
531
532 /*
533 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
534 * interrupt is being sent by an APIC.
535 *
536 * The 'receive illegal vector' will be set on the target APIC when the interrupt
537 * gets generated, see APICPostInterrupt().
538 *
539 * See Intel spec. 10.5.3 "Error Handling".
540 */
541 if ( rcStrict != rcRZ
542 && pVCpu)
543 {
544 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
545 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
546 }
547 return rcStrict;
548}
549
550
551/**
552 * Checks if this APIC belongs to a logical destination.
553 *
554 * @returns true if the APIC belongs to the logical
555 * destination, false otherwise.
556 * @param pVCpu The cross context virtual CPU structure.
557 * @param fDest The destination mask.
558 *
559 * @thread Any.
560 */
561static bool apicIsLogicalDest(PVMCPU pVCpu, uint32_t fDest)
562{
563 if (XAPIC_IN_X2APIC_MODE(pVCpu))
564 {
565 /*
566 * Flat logical mode is not supported in x2APIC mode.
567 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
568 * - High 16 bits is the cluster ID.
569 * - Low 16 bits: each bit represents a unique APIC within the cluster.
570 */
571 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
572 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
573 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
574 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
575 return false;
576 }
577
578#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
579 /*
580 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
581 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
582 */
583 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
584 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
585 return true;
586
587 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
588 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
589 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
590 {
591 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
592 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
593 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
594 }
595 else
596 {
597 /*
598 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
599 * - High 4 bits is the cluster ID.
600 * - Low 4 bits: each bit represents a unique APIC within the cluster.
601 */
602 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
603 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
604 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
605 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
606 return false;
607 }
608#else
609# error "Implement Pentium and P6 family APIC architectures"
610#endif
611}
612
613
614/**
615 * Figures out the set of destination CPUs for a given destination mode, format
616 * and delivery mode setting.
617 *
618 * @param pVM The cross context VM structure.
619 * @param fDestMask The destination mask.
620 * @param fBroadcastMask The broadcast mask.
621 * @param enmDestMode The destination mode.
622 * @param enmDeliveryMode The delivery mode.
623 * @param pDestCpuSet The destination CPU set to update.
624 */
625static void apicGetDestCpuSet(PVM pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
626 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
627{
628 VMCPUSET_EMPTY(pDestCpuSet);
629
630 /*
631 * Physical destination mode only supports either a broadcast or a single target.
632 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
633 * as a regular broadcast like in fixed delivery mode.
634 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
635 * to the target like in fixed delivery mode.
636 *
637 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
638 */
639 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
640 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
641 {
642 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
643 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
644 }
645
646 uint32_t const cCpus = pVM->cCpus;
647 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
648 {
649 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
650#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
651 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
652 uint8_t u8LowestTpr = UINT8_C(0xff);
653 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
654 {
655 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
656 if (apicIsLogicalDest(pVCpuDest, fDestMask))
657 {
658 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
659 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
660
661 /* If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
662 See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration". */
663 if (u8LowestTpr <= u8Tpr)
664 {
665 u8LowestTpr = u8Tpr;
666 idCpuLowestTpr = idCpu;
667 }
668 }
669 }
670 if (idCpuLowestTpr != NIL_VMCPUID)
671 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
672#else
673# error "Implement Pentium and P6 family APIC architectures"
674#endif
675 return;
676 }
677
678 /*
679 * x2APIC:
680 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
681 * xAPIC:
682 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
683 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
684 *
685 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
686 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
687 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
688 */
689 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
690 {
691 VMCPUSET_FILL(pDestCpuSet);
692 return;
693 }
694
695 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
696 {
697 /* The destination mask is interpreted as the physical APIC ID of a single target. */
698#if 1
699 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
700 if (RT_LIKELY(fDestMask < cCpus))
701 VMCPUSET_ADD(pDestCpuSet, fDestMask);
702#else
703 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
704 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
705 {
706 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
707 if (XAPIC_IN_X2APIC_MODE(pVCpuDest))
708 {
709 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDest);
710 if (pX2ApicPage->id.u32ApicId == fDestMask)
711 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
712 }
713 else
714 {
715 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
716 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
717 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
718 }
719 }
720#endif
721 }
722 else
723 {
724 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
725
726 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
727 if (RT_UNLIKELY(!fDestMask))
728 return;
729
730 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
731 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
732 {
733 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
734 if (apicIsLogicalDest(pVCpuDest, fDestMask))
735 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
736 }
737 }
738}
739
740
741/**
742 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
743 * Command Register (ICR).
744 *
745 * @returns VBox status code.
746 * @param pVCpu The cross context virtual CPU structure.
747 * @param rcRZ The return code if the operation cannot be
748 * performed in the current context.
749 */
750static VBOXSTRICTRC apicSendIpi(PVMCPU pVCpu, int rcRZ)
751{
752 VMCPU_ASSERT_EMT(pVCpu);
753
754 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
755 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
756 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
757 XAPICINITLEVEL enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
758 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
759 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
760 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
761
762 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
763 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
764
765#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
766 /*
767 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
768 */
769 if (RT_UNLIKELY( enmDeliveryMode == XAPICDELIVERYMODE_INIT_LEVEL_DEASSERT
770 && enmInitLevel == XAPICINITLEVEL_DEASSERT
771 && enmTriggerMode == XAPICTRIGGERMODE_LEVEL))
772 {
773 return VINF_SUCCESS;
774 }
775
776 enmInitLevel = XAPICINITLEVEL_ASSERT;
777 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
778#else
779# error "Implement Pentium and P6 family APIC architectures"
780#endif
781
782 /*
783 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
784 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
785 */
786 VMCPUSET DestCpuSet;
787 switch (enmDestShorthand)
788 {
789 case XAPICDESTSHORTHAND_NONE:
790 {
791 PVM pVM = pVCpu->CTX_SUFF(pVM);
792 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
793 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
794 break;
795 }
796
797 case XAPICDESTSHORTHAND_SELF:
798 {
799 VMCPUSET_EMPTY(&DestCpuSet);
800 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
801 break;
802 }
803
804 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
805 {
806 VMCPUSET_FILL(&DestCpuSet);
807 break;
808 }
809
810 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
811 {
812 VMCPUSET_FILL(&DestCpuSet);
813 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
814 break;
815 }
816 }
817
818 return apicSendIntr(pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet, rcRZ);
819}
820
821
822/**
823 * Sets the Interrupt Command Register (ICR) high dword.
824 *
825 * @returns Strict VBox status code.
826 * @param pVCpu The cross context virtual CPU structure.
827 * @param uIcrHi The ICR high dword.
828 */
829static VBOXSTRICTRC apicSetIcrHi(PVMCPU pVCpu, uint32_t uIcrHi)
830{
831 VMCPU_ASSERT_EMT(pVCpu);
832 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
833
834 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
835 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
836 return VINF_SUCCESS;
837}
838
839
840/**
841 * Sets the Interrupt Command Register (ICR) low dword.
842 *
843 * @returns Strict VBox status code.
844 * @param pVCpu The cross context virtual CPU structure.
845 * @param uIcrLo The ICR low dword.
846 * @param rcRZ The return code if the operation cannot be performed
847 * in the current context.
848 */
849static VBOXSTRICTRC apicSetIcrLo(PVMCPU pVCpu, uint32_t uIcrLo, int rcRZ)
850{
851 VMCPU_ASSERT_EMT(pVCpu);
852
853 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
854 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR;
855
856 apicSendIpi(pVCpu, rcRZ);
857 return VINF_SUCCESS;
858}
859
860
861/**
862 * Sets the Interrupt Command Register (ICR).
863 *
864 * @returns Strict VBox status code.
865 * @param pVCpu The cross context virtual CPU structure.
866 * @param u64Icr The ICR (High and Low combined).
867 * @param rcRZ The return code if the operation cannot be performed
868 * in the current context.
869 */
870static VBOXSTRICTRC apicSetIcr(PVMCPU pVCpu, uint64_t u64Icr, int rcRZ)
871{
872 VMCPU_ASSERT_EMT(pVCpu);
873 Assert(XAPIC_IN_X2APIC_MODE(pVCpu));
874
875 /* Validate. */
876 uint32_t const uLo = RT_LO_U32(u64Icr);
877 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR)))
878 {
879 /* Update high dword first, then update the low dword which sends the IPI. */
880 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
881 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
882 return apicSetIcrLo(pVCpu, uLo, rcRZ);
883 }
884 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
885}
886
887
888/**
889 * Sets the Error Status Register (ESR).
890 *
891 * @returns Strict VBox status code.
892 * @param pVCpu The cross context virtual CPU structure.
893 * @param uValue The ESR value.
894 */
895static VBOXSTRICTRC apicSetEsr(PVMCPU pVCpu, uint32_t uValue)
896{
897 VMCPU_ASSERT_EMT(pVCpu);
898 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
899 && (uValue & ~XAPIC_ESR_WO))
900 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
901
902 /*
903 * Writes to the ESR causes the internal state to be updated in the register,
904 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
905 */
906 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
907 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
908 return VINF_SUCCESS;
909}
910
911
912/**
913 * Updates the Processor Priority Register (PPR).
914 *
915 * @param pVCpu The cross context virtual CPU structure.
916 */
917static void apicUpdatePpr(PVMCPU pVCpu)
918{
919 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
920
921 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
922 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
923 uint8_t const uIsrv = apicGetLastSetBit(&pXApicPage->isr, 0 /* rcNotFound */);
924 uint8_t uPpr;
925 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
926 uPpr = pXApicPage->tpr.u8Tpr;
927 else
928 uPpr = XAPIC_PPR_GET_PP(uIsrv);
929 pXApicPage->ppr.u8Ppr = uPpr;
930}
931
932
933/**
934 * Gets the Processor Priority Register (PPR).
935 *
936 * @returns The PPR value.
937 * @param pVCpu The cross context virtual CPU structure.
938 */
939static uint8_t apicGetPpr(PVMCPU pVCpu)
940{
941 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
942
943 /*
944 * With virtualized APIC registers or with TPR virtualization, the hardware may
945 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
946 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
947 */
948 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
949 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
950 apicUpdatePpr(pVCpu);
951 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
952 return pXApicPage->ppr.u8Ppr;
953}
954
955
956/**
957 * Sets the Task Priority Register (TPR).
958 *
959 * @returns Strict VBox status code.
960 * @param pVCpu The cross context virtual CPU structure.
961 * @param uTpr The TPR value.
962 */
963static VBOXSTRICTRC apicSetTpr(PVMCPU pVCpu, uint32_t uTpr)
964{
965 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
966
967 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
968 && (uTpr & ~XAPIC_TPR))
969 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
970
971 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
972 pXApicPage->tpr.u8Tpr = uTpr;
973 apicUpdatePpr(pVCpu);
974 apicSignalNextPendingIntr(pVCpu);
975 return VINF_SUCCESS;
976}
977
978
979/**
980 * Sets the End-Of-Interrupt (EOI) register.
981 *
982 * @returns Strict VBox status code.
983 * @param pVCpu The cross context virtual CPU structure.
984 * @param uEoi The EOI value.
985 */
986static VBOXSTRICTRC apicSetEoi(PVMCPU pVCpu, uint32_t uEoi)
987{
988 VMCPU_ASSERT_EMT(pVCpu);
989
990 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
991 && (uEoi & ~XAPIC_EOI_WO))
992 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
993
994 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
995 int isrv = apicGetLastSetBit(&pXApicPage->isr, VERR_NOT_FOUND);
996 if (isrv >= 0)
997 {
998 /*
999 * Dispensing the spurious-interrupt vector does not affect the ISR.
1000 * See Intel spec. 10.9 "Spurious Interrupt".
1001 */
1002 uint8_t const uVector = isrv;
1003 if (uVector != pXApicPage->svr.u.u8SpuriousVector)
1004 {
1005 apicClearVectorInReg(&pXApicPage->isr, uVector);
1006 apicUpdatePpr(pVCpu);
1007 bool fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1008 if (fLevelTriggered)
1009 {
1010 /** @todo We need to broadcast EOI to IO APICs here. */
1011 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1012 }
1013
1014 apicSignalNextPendingIntr(pVCpu);
1015 }
1016 }
1017
1018 return VINF_SUCCESS;
1019}
1020
1021
1022/**
1023 * Sets the Logical Destination Register (LDR).
1024 *
1025 * @returns Strict VBox status code.
1026 * @param pVCpu The cross context virtual CPU structure.
1027 * @param uLdr The LDR value.
1028 *
1029 * @remarks LDR is read-only in x2APIC mode.
1030 */
1031static VBOXSTRICTRC apicSetLdr(PVMCPU pVCpu, uint32_t uLdr)
1032{
1033 VMCPU_ASSERT_EMT(pVCpu);
1034 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1035
1036 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1037 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR);
1038 return VINF_SUCCESS;
1039}
1040
1041
1042/**
1043 * Sets the Destination Format Register (DFR).
1044 *
1045 * @returns Strict VBox status code.
1046 * @param pVCpu The cross context virtual CPU structure.
1047 * @param uDfr The DFR value.
1048 *
1049 * @remarks DFR is not available in x2APIC mode.
1050 */
1051static VBOXSTRICTRC apicSetDfr(PVMCPU pVCpu, uint32_t uDfr)
1052{
1053 VMCPU_ASSERT_EMT(pVCpu);
1054 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1055
1056 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1057 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr & XAPIC_DFR);
1058 return VINF_SUCCESS;
1059}
1060
1061
1062/**
1063 * Sets the Timer Divide Configuration Register (DCR).
1064 *
1065 * @returns Strict VBox status code.
1066 * @param pVCpu The cross context virtual CPU structure.
1067 * @param uTimerDcr The timer DCR value.
1068 */
1069static VBOXSTRICTRC apicSetTimerDcr(PVMCPU pVCpu, uint32_t uTimerDcr)
1070{
1071 VMCPU_ASSERT_EMT(pVCpu);
1072 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1073 && (uTimerDcr & ~XAPIC_TIMER_DCR))
1074 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1075
1076 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1077 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1078 return VINF_SUCCESS;
1079}
1080
1081
1082/**
1083 * Gets the timer's Current Count Register (CCR).
1084 *
1085 * @returns VBox status code.
1086 * @param pVCpu The cross context virtual CPU structure.
1087 * @param rcBusy The busy return code for the timer critical section.
1088 * @param puValue Where to store the LVT timer CCR.
1089 */
1090static VBOXSTRICTRC apicGetTimerCcr(PVMCPU pVCpu, int rcBusy, uint32_t *puValue)
1091{
1092 VMCPU_ASSERT_EMT(pVCpu);
1093 Assert(puValue);
1094
1095 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1096 *puValue = 0;
1097
1098 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1099 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1100 return VINF_SUCCESS;
1101
1102 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1103 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1104 if (!uInitialCount)
1105 return VINF_SUCCESS;
1106
1107 /*
1108 * Reading the virtual-sync clock requires locking its timer because it's not
1109 * a simple atomic operation, see tmVirtualSyncGetEx().
1110 *
1111 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1112 */
1113 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1114 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
1115
1116 int rc = TMTimerLock(pTimer, rcBusy);
1117 if (rc == VINF_SUCCESS)
1118 {
1119 /* If the current-count register is 0, it implies the timer expired. */
1120 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1121 if (uCurrentCount)
1122 {
1123 uint64_t const cTicksElapsed = TMTimerGet(pApicCpu->CTX_SUFF(pTimer)) - pApicCpu->u64TimerInitial;
1124 TMTimerUnlock(pTimer);
1125 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1126 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1127 if (uInitialCount > uDelta)
1128 *puValue = uInitialCount - uDelta;
1129 }
1130 else
1131 TMTimerUnlock(pTimer);
1132 }
1133 return rc;
1134}
1135
1136
1137/**
1138 * Sets the timer's Initial-Count Register (ICR).
1139 *
1140 * @returns Strict VBox status code.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param rcBusy The busy return code for the timer critical section.
1143 * @param uInitialCount The timer ICR.
1144 */
1145static VBOXSTRICTRC apicSetTimerIcr(PVMCPU pVCpu, int rcBusy, uint32_t uInitialCount)
1146{
1147 VMCPU_ASSERT_EMT(pVCpu);
1148
1149 PAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
1150 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1151 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1152 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
1153
1154 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1155 if ( pApic->fSupportsTscDeadline
1156 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1157 return VINF_SUCCESS;
1158
1159 /*
1160 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1161 * so obtain the lock -before- updating it here to be consistent with the
1162 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1163 */
1164 int rc = TMTimerLock(pTimer, rcBusy);
1165 if (rc == VINF_SUCCESS)
1166 {
1167 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1168 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1169 if (uInitialCount)
1170 APICStartTimer(pApicCpu, uInitialCount);
1171 else
1172 APICStopTimer(pApicCpu);
1173 TMTimerUnlock(pTimer);
1174 }
1175 return rc;
1176}
1177
1178
1179/**
1180 * Sets an LVT entry.
1181 *
1182 * @returns Strict VBox status code.
1183 * @param pVCpu The cross context virtual CPU structure.
1184 * @param offLvt The LVT entry offset in the xAPIC page.
1185 * @param uLvt The LVT value to set.
1186 */
1187static VBOXSTRICTRC apicSetLvtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1188{
1189#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1190 VMCPU_ASSERT_EMT(pVCpu);
1191 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1192 || offLvt == XAPIC_OFF_LVT_THERMAL
1193 || offLvt == XAPIC_OFF_LVT_PERF
1194 || offLvt == XAPIC_OFF_LVT_LINT0
1195 || offLvt == XAPIC_OFF_LVT_LINT1
1196 || offLvt == XAPIC_OFF_LVT_ERROR,
1197 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#x, uLvt=%#x\n", pVCpu->idCpu, offLvt, uLvt));
1198
1199 /*
1200 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1201 * and raise #GP(0) in x2APIC mode.
1202 */
1203 PCAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
1204 if (offLvt == XAPIC_OFF_LVT_TIMER)
1205 {
1206 if ( !pApic->fSupportsTscDeadline
1207 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1208 {
1209 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1210 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1211 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1212 /** @todo TSC-deadline timer mode transition */
1213 }
1214 }
1215
1216 /*
1217 * Validate rest of the LVT bits.
1218 */
1219 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1220 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1221
1222 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1223 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1224 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1225
1226 uLvt &= g_au32LvtValidMasks[idxLvt];
1227
1228 /*
1229 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1230 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1231 */
1232 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1233 AssertCompile(RT_OFFSETOF(XAPICPAGE, svr) == RT_OFFSETOF(X2APICPAGE, svr));
1234 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1235 uLvt |= XAPIC_LVT_MASK;
1236
1237 /*
1238 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1239 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signaling the
1240 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1241 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1242 * the interrupt for the vector happens to be generated, see APICPostInterrupt().
1243 *
1244 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1245 */
1246 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1247 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1248 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1249
1250 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1251 return VINF_SUCCESS;
1252#else
1253# error "Implement Pentium and P6 family APIC architectures"
1254#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1255}
1256
1257
1258#if 0
1259/**
1260 * Sets an LVT entry in the extended LVT range.
1261 *
1262 * @returns VBox status code.
1263 * @param pVCpu The cross context virtual CPU structure.
1264 * @param offLvt The LVT entry offset in the xAPIC page.
1265 * @param uValue The LVT value to set.
1266 */
1267static int apicSetLvtExtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1268{
1269 VMCPU_ASSERT_EMT(pVCpu);
1270 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#x\n", pVCpu->idCpu, offLvt));
1271
1272 /** @todo support CMCI. */
1273 return VERR_NOT_IMPLEMENTED;
1274}
1275#endif
1276
1277
1278/**
1279 * Hints TM about the APIC timer frequency.
1280 *
1281 * @param pApicCpu The APIC CPU state.
1282 * @param uInitialCount The new initial count.
1283 * @param uTimerShift The new timer shift.
1284 * @thread Any.
1285 */
1286static void apicHintTimerFreq(PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1287{
1288 Assert(pApicCpu);
1289 Assert(TMTimerIsLockOwner(CTX_SUFF(pApicCpu->pTimer)));
1290
1291 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1292 || pApicCpu->uHintedTimerShift != uTimerShift)
1293 {
1294 uint32_t uHz;
1295 if (uInitialCount)
1296 {
1297 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1298 uHz = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer)) / cTicksPerPeriod;
1299 }
1300 else
1301 uHz = 0;
1302
1303 TMTimerSetFrequencyHint(pApicCpu->CTX_SUFF(pTimer), uHz);
1304 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1305 pApicCpu->uHintedTimerShift = uTimerShift;
1306 }
1307}
1308
1309
1310/**
1311 * Reads an APIC register.
1312 *
1313 * @returns VBox status code.
1314 * @param pApicDev The APIC device instance.
1315 * @param pVCpu The cross context virtual CPU structure.
1316 * @param offReg The offset of the register being read.
1317 * @param puValue Where to store the register value.
1318 */
1319static int apicReadRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t *puValue)
1320{
1321 VMCPU_ASSERT_EMT(pVCpu);
1322 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1323
1324 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1325 uint32_t uValue = 0;
1326 int rc = VINF_SUCCESS;
1327 switch (offReg)
1328 {
1329 case XAPIC_OFF_ID:
1330 case XAPIC_OFF_VERSION:
1331 case XAPIC_OFF_TPR:
1332 case XAPIC_OFF_EOI:
1333 case XAPIC_OFF_RRD:
1334 case XAPIC_OFF_LDR:
1335 case XAPIC_OFF_DFR:
1336 case XAPIC_OFF_SVR:
1337 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1338 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1339 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1340 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1341 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1342 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1343 case XAPIC_OFF_ESR:
1344 case XAPIC_OFF_ICR_LO:
1345 case XAPIC_OFF_ICR_HI:
1346 case XAPIC_OFF_LVT_TIMER:
1347#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1348 case XAPIC_OFF_LVT_THERMAL:
1349#endif
1350 case XAPIC_OFF_LVT_PERF:
1351 case XAPIC_OFF_LVT_LINT0:
1352 case XAPIC_OFF_LVT_LINT1:
1353 case XAPIC_OFF_LVT_ERROR:
1354 case XAPIC_OFF_TIMER_ICR:
1355 case XAPIC_OFF_TIMER_DCR:
1356 {
1357 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1358 || ( offReg != XAPIC_OFF_DFR
1359 && offReg != XAPIC_OFF_ICR_HI
1360 && offReg != XAPIC_OFF_EOI));
1361 uValue = apicReadRaw32(pXApicPage, offReg);
1362 break;
1363 }
1364
1365 case XAPIC_OFF_PPR:
1366 {
1367 uValue = apicGetPpr(pVCpu);
1368 break;
1369 }
1370
1371 case XAPIC_OFF_TIMER_CCR:
1372 {
1373 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1374 rc = VBOXSTRICTRC_VAL(apicGetTimerCcr(pVCpu, VINF_IOM_R3_MMIO_READ, &uValue));
1375 break;
1376 }
1377
1378 case XAPIC_OFF_APR:
1379 {
1380#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1381 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1382 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1383#else
1384# error "Implement Pentium and P6 family APIC architectures"
1385#endif
1386 break;
1387 }
1388
1389 default:
1390 {
1391 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1392 rc = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "offReg=%#x Id=%u\n", offReg, pVCpu->idCpu);
1393 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1394 break;
1395 }
1396 }
1397
1398 *puValue = uValue;
1399 return rc;
1400}
1401
1402
1403/**
1404 * Writes an APIC register.
1405 *
1406 * @returns Strict VBox status code.
1407 * @param pApicDev The APIC device instance.
1408 * @param pVCpu The cross context virtual CPU structure.
1409 * @param offReg The offset of the register being written.
1410 * @param uValue The register value.
1411 */
1412static VBOXSTRICTRC apicWriteRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t uValue)
1413{
1414 VMCPU_ASSERT_EMT(pVCpu);
1415 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1416 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1417
1418 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1419 switch (offReg)
1420 {
1421 case XAPIC_OFF_TPR:
1422 {
1423 rcStrict = apicSetTpr(pVCpu, uValue);
1424 break;
1425 }
1426
1427 case XAPIC_OFF_LVT_TIMER:
1428#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1429 case XAPIC_OFF_LVT_THERMAL:
1430#endif
1431 case XAPIC_OFF_LVT_PERF:
1432 case XAPIC_OFF_LVT_LINT0:
1433 case XAPIC_OFF_LVT_LINT1:
1434 case XAPIC_OFF_LVT_ERROR:
1435 {
1436 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1437 break;
1438 }
1439
1440 case XAPIC_OFF_TIMER_ICR:
1441 {
1442 rcStrict = apicSetTimerIcr(pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1443 break;
1444 }
1445
1446 case XAPIC_OFF_EOI:
1447 {
1448 rcStrict = apicSetEoi(pVCpu, uValue);
1449 break;
1450 }
1451
1452 case XAPIC_OFF_LDR:
1453 {
1454 rcStrict = apicSetLdr(pVCpu, uValue);
1455 break;
1456 }
1457
1458 case XAPIC_OFF_DFR:
1459 {
1460 rcStrict = apicSetDfr(pVCpu, uValue);
1461 break;
1462 }
1463
1464 case XAPIC_OFF_SVR:
1465 {
1466 rcStrict = apicSetSvr(pVCpu, uValue);
1467 break;
1468 }
1469
1470 case XAPIC_OFF_ICR_LO:
1471 {
1472 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE);
1473 break;
1474 }
1475
1476 case XAPIC_OFF_ICR_HI:
1477 {
1478 rcStrict = apicSetIcrHi(pVCpu, uValue);
1479 break;
1480 }
1481
1482 case XAPIC_OFF_TIMER_DCR:
1483 {
1484 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1485 break;
1486 }
1487
1488 case XAPIC_OFF_ESR:
1489 {
1490 rcStrict = apicSetEsr(pVCpu, uValue);
1491 break;
1492 }
1493
1494 case XAPIC_OFF_APR:
1495 case XAPIC_OFF_RRD:
1496 {
1497#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1498 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1499#else
1500# error "Implement Pentium and P6 family APIC architectures"
1501#endif
1502 break;
1503 }
1504
1505 /* Unavailable/reserved in xAPIC mode: */
1506 case X2APIC_OFF_SELF_IPI:
1507 /* Read-only registers: */
1508 case XAPIC_OFF_ID:
1509 case XAPIC_OFF_VERSION:
1510 case XAPIC_OFF_PPR:
1511 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1512 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1513 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1514 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1515 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1516 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1517 case XAPIC_OFF_TIMER_CCR:
1518 default:
1519 {
1520 rcStrict = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "APIC%u: offReg=%#x\n", pVCpu->idCpu, offReg);
1521 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1522 break;
1523 }
1524 }
1525
1526 return rcStrict;
1527}
1528
1529
1530/**
1531 * @interface_method_impl{PDMAPICREG,pfnReadMsrR3}
1532 */
1533VMMDECL(VBOXSTRICTRC) APICReadMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1534{
1535 /*
1536 * Validate.
1537 */
1538 VMCPU_ASSERT_EMT(pVCpu);
1539 Assert(u32Reg >= MSR_IA32_X2APIC_START && u32Reg <= MSR_IA32_X2APIC_END);
1540 Assert(pu64Value);
1541
1542 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1543 if (pApic->fRZEnabled)
1544 { /* likely */}
1545 else
1546 return VINF_CPUM_R3_MSR_READ;
1547
1548 STAM_COUNTER_INC(&VMCPU_TO_APICCPU(pVCpu)->StatMsrWrite);
1549
1550 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1551 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1552 {
1553 switch (u32Reg)
1554 {
1555 /* Special handling for x2APIC: */
1556 case MSR_IA32_X2APIC_ICR:
1557 {
1558 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1559 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1560 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1561 *pu64Value = RT_MAKE_U64(uLo, uHi);
1562 break;
1563 }
1564
1565 /* Special handling, compatible with xAPIC: */
1566 case MSR_IA32_X2APIC_TIMER_CCR:
1567 {
1568 uint32_t uValue;
1569 rcStrict = apicGetTimerCcr(pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1570 *pu64Value = uValue;
1571 break;
1572 }
1573
1574 /* Special handling, compatible with xAPIC: */
1575 case MSR_IA32_X2APIC_PPR:
1576 {
1577 *pu64Value = apicGetPpr(pVCpu);
1578 break;
1579 }
1580
1581 /* Raw read, compatible with xAPIC: */
1582 case MSR_IA32_X2APIC_ID:
1583 case MSR_IA32_X2APIC_VERSION:
1584 case MSR_IA32_X2APIC_TPR:
1585 case MSR_IA32_X2APIC_LDR:
1586 case MSR_IA32_X2APIC_SVR:
1587 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1588 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1589 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1590 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1591 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1592 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1593 case MSR_IA32_X2APIC_ESR:
1594 case MSR_IA32_X2APIC_LVT_TIMER:
1595 case MSR_IA32_X2APIC_LVT_THERMAL:
1596 case MSR_IA32_X2APIC_LVT_PERF:
1597 case MSR_IA32_X2APIC_LVT_LINT0:
1598 case MSR_IA32_X2APIC_LVT_LINT1:
1599 case MSR_IA32_X2APIC_LVT_ERROR:
1600 case MSR_IA32_X2APIC_TIMER_ICR:
1601 case MSR_IA32_X2APIC_TIMER_DCR:
1602 {
1603 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1604 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1605 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1606 break;
1607 }
1608
1609 /* Write-only MSRs: */
1610 case MSR_IA32_X2APIC_SELF_IPI:
1611 case MSR_IA32_X2APIC_EOI:
1612 {
1613 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1614 break;
1615 }
1616
1617 /* Reserved MSRs: */
1618 case MSR_IA32_X2APIC_LVT_CMCI:
1619 default:
1620 {
1621 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1622 break;
1623 }
1624 }
1625 }
1626 else
1627 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1628
1629 return rcStrict;
1630}
1631
1632
1633/**
1634 * @interface_method_impl{PDMAPICREG,pfnWriteMsrR3}
1635 */
1636VMMDECL(VBOXSTRICTRC) APICWriteMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t u64Value)
1637{
1638 /*
1639 * Validate.
1640 */
1641 VMCPU_ASSERT_EMT(pVCpu);
1642 Assert(u32Reg >= MSR_IA32_X2APIC_START && u32Reg <= MSR_IA32_X2APIC_END);
1643
1644 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1645 if (pApic->fRZEnabled)
1646 { /* likely */ }
1647 else
1648 return VINF_CPUM_R3_MSR_WRITE;
1649
1650 STAM_COUNTER_INC(&VMCPU_TO_APICCPU(pVCpu)->StatMsrWrite);
1651
1652 /*
1653 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
1654 * accesses where they are ignored. Hence, we need to validate each register before
1655 * invoking the generic/xAPIC write functions.
1656 *
1657 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
1658 * case first and handle validating the remaining bits on a per-register basis.
1659 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
1660 */
1661 if ( u32Reg != MSR_IA32_X2APIC_ICR
1662 && RT_HI_U32(u64Value))
1663 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
1664
1665 uint32_t u32Value = RT_LO_U32(u64Value);
1666 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1667 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1668 {
1669 switch (u32Reg)
1670 {
1671 case MSR_IA32_X2APIC_TPR:
1672 {
1673 rcStrict = apicSetTpr(pVCpu, u32Value);
1674 break;
1675 }
1676
1677 case MSR_IA32_X2APIC_ICR:
1678 {
1679 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
1680 break;
1681 }
1682
1683 case MSR_IA32_X2APIC_SVR:
1684 {
1685 rcStrict = apicSetSvr(pVCpu, u32Value);
1686 break;
1687 }
1688
1689 case MSR_IA32_X2APIC_ESR:
1690 {
1691 rcStrict = apicSetEsr(pVCpu, u32Value);
1692 break;
1693 }
1694
1695 case MSR_IA32_X2APIC_TIMER_DCR:
1696 {
1697 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
1698 break;
1699 }
1700
1701 case MSR_IA32_X2APIC_LVT_TIMER:
1702 case MSR_IA32_X2APIC_LVT_THERMAL:
1703 case MSR_IA32_X2APIC_LVT_PERF:
1704 case MSR_IA32_X2APIC_LVT_LINT0:
1705 case MSR_IA32_X2APIC_LVT_LINT1:
1706 case MSR_IA32_X2APIC_LVT_ERROR:
1707 {
1708 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
1709 break;
1710 }
1711
1712 case MSR_IA32_X2APIC_TIMER_ICR:
1713 {
1714 rcStrict = apicSetTimerIcr(pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
1715 break;
1716 }
1717
1718 /* Write-only MSRs: */
1719 case MSR_IA32_X2APIC_SELF_IPI:
1720 {
1721 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
1722 APICPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE);
1723 rcStrict = VINF_SUCCESS;
1724 break;
1725 }
1726
1727 case MSR_IA32_X2APIC_EOI:
1728 {
1729 rcStrict = apicSetEoi(pVCpu, u32Value);
1730 break;
1731 }
1732
1733 /* Read-only MSRs: */
1734 case MSR_IA32_X2APIC_ID:
1735 case MSR_IA32_X2APIC_VERSION:
1736 case MSR_IA32_X2APIC_PPR:
1737 case MSR_IA32_X2APIC_LDR:
1738 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1739 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1740 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1741 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1742 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1743 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1744 case MSR_IA32_X2APIC_TIMER_CCR:
1745 {
1746 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
1747 break;
1748 }
1749
1750 /* Reserved MSRs: */
1751 case MSR_IA32_X2APIC_LVT_CMCI:
1752 default:
1753 {
1754 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
1755 break;
1756 }
1757 }
1758 }
1759 else
1760 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
1761
1762 return rcStrict;
1763}
1764
1765
1766/**
1767 * @interface_method_impl{PDMAPICREG,pfnSetBaseMsrR3}
1768 */
1769VMMDECL(VBOXSTRICTRC) APICSetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint64_t u64BaseMsr)
1770{
1771 Assert(pVCpu);
1772 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1773 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1774 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
1775 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
1776 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
1777
1778 /** @todo probably go back to ring-3 for all cases regardless of
1779 * fRZEnabled. Writing this MSR is not something guests
1780 * typically do often, and therefore is not performance
1781 * critical. We'll have better diagnostics in ring-3. */
1782 if (!pApic->fRZEnabled)
1783 return VINF_CPUM_R3_MSR_WRITE;
1784
1785 /*
1786 * We do not support re-mapping the APIC base address because:
1787 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
1788 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
1789 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
1790 * region remains mapped but doesn't belong to the called VCPU's APIC).
1791 */
1792 /** @todo Handle per-VCPU APIC base relocation. */
1793 if (MSR_APICBASE_GET_PHYSADDR(uBaseMsr) != XAPIC_APICBASE_PHYSADDR)
1794 {
1795#ifdef IN_RING3
1796 LogRelMax(5, ("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
1797 MSR_APICBASE_GET_PHYSADDR(uBaseMsr)));
1798 return VERR_CPUM_RAISE_GP_0;
1799#else
1800 return VINF_CPUM_R3_MSR_WRITE;
1801#endif
1802 }
1803
1804 /*
1805 * Act on state transition.
1806 */
1807 /** @todo We need to update the CPUID according to the state, which we
1808 * currently don't do as CPUMSetGuestCpuIdFeature() is setting
1809 * per-VM CPUID bits while we need per-VCPU specific bits. */
1810 if (enmNewMode != enmOldMode)
1811 {
1812 switch (enmNewMode)
1813 {
1814 case APICMODE_DISABLED:
1815 {
1816#ifdef IN_RING3
1817 /*
1818 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
1819 * is different). We can start with a clean slate identical to the state after a power-up/reset.
1820 *
1821 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
1822 */
1823 APICR3Reset(pVCpu);
1824 uBaseMsr &= ~(MSR_APICBASE_XAPIC_ENABLE_BIT | MSR_APICBASE_X2APIC_ENABLE_BIT);
1825#else
1826 return VINF_CPUM_R3_MSR_WRITE;
1827#endif
1828 break;
1829 }
1830
1831 case APICMODE_XAPIC:
1832 {
1833 if (enmOldMode != APICMODE_DISABLED)
1834 {
1835 Log(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
1836 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
1837 }
1838 uBaseMsr |= MSR_APICBASE_XAPIC_ENABLE_BIT;
1839 break;
1840 }
1841
1842 case APICMODE_X2APIC:
1843 {
1844 uBaseMsr |= MSR_APICBASE_X2APIC_ENABLE_BIT;
1845
1846 /*
1847 * The APIC ID needs updating when entering x2APIC mode.
1848 * Software written APIC ID in xAPIC mode isn't preseved.
1849 * The APIC ID becomes read-only to software in x2APIC mode.
1850 *
1851 * See Intel spec. 10.12.5.1 "x2APIC States".
1852 */
1853 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1854 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
1855 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
1856
1857 /*
1858 * LDR initialization occurs when entering x2APIC mode.
1859 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
1860 */
1861 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
1862 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
1863 break;
1864 }
1865
1866 case APICMODE_INVALID:
1867 default:
1868 {
1869 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
1870 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
1871 }
1872 }
1873 }
1874
1875 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
1876 return VINF_SUCCESS;
1877}
1878
1879
1880/**
1881 * @interface_method_impl{PDMAPICREG,pfnGetBaseMsrR3}
1882 */
1883VMMDECL(uint64_t) APICGetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
1884{
1885 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
1886
1887 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1888 return pApicCpu->uApicBaseMsr;
1889}
1890
1891
1892/**
1893 * @interface_method_impl{PDMAPICREG,pfnSetTprR3}
1894 */
1895VMMDECL(void) APICSetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Tpr)
1896{
1897 apicSetTpr(pVCpu, u8Tpr);
1898}
1899
1900
1901/**
1902 * @interface_method_impl{PDMAPICREG,pfnGetTprR3}
1903 */
1904VMMDECL(uint8_t) APICGetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
1905{
1906 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
1907 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1908 return pXApicPage->tpr.u8Tpr;
1909}
1910
1911
1912/**
1913 * @interface_method_impl{PDMAPICREG,pfnGetTimerFreqR3}
1914 */
1915VMMDECL(uint64_t) APICGetTimerFreq(PPDMDEVINS pDevIns)
1916{
1917 PVM pVM = PDMDevHlpGetVM(pDevIns);
1918 PVMCPU pVCpu = &pVM->aCpus[0];
1919 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1920 uint64_t uTimer = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer));
1921 return uTimer;
1922}
1923
1924
1925/**
1926 * @interface_method_impl{PDMAPICREG,pfnBusDeliverR3}
1927 * @remarks This is a private interface between the IOAPIC and the APIC.
1928 */
1929VMMDECL(int) APICBusDeliver(PPDMDEVINS pDevIns, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
1930 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uTagSrc)
1931{
1932 NOREF(uPolarity);
1933 NOREF(uTagSrc);
1934 PVM pVM = PDMDevHlpGetVM(pDevIns);
1935
1936 /*
1937 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
1938 * Hence, the broadcast mask is 0xff.
1939 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
1940 */
1941 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
1942 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
1943 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
1944 uint32_t fDestMask = uDest;
1945 uint32_t fBroadcastMask = UINT32_C(0xff);
1946
1947 VMCPUSET DestCpuSet;
1948 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1949 VBOXSTRICTRC rcStrict = apicSendIntr(NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet, VINF_SUCCESS);
1950 return VBOXSTRICTRC_VAL(rcStrict);
1951}
1952
1953
1954/**
1955 * @interface_method_impl{PDMAPICREG,pfnLocalInterruptR3}
1956 * @remarks This is a private interface between the PIC and the APIC.
1957 */
1958VMMDECL(VBOXSTRICTRC) APICLocalInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
1959{
1960 NOREF(pDevIns);
1961 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
1962 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
1963
1964 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1965 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1966
1967 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
1968 if (apicIsEnabled(pVCpu))
1969 {
1970 /* Pick the LVT entry corresponding to the interrupt pin. */
1971 static const uint16_t s_au16LvtOffsets[] =
1972 {
1973 XAPIC_OFF_LVT_LINT0,
1974 XAPIC_OFF_LVT_LINT1
1975 };
1976 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
1977 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
1978 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
1979
1980 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
1981 if (!XAPIC_LVT_IS_MASKED(uLvt))
1982 {
1983 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
1984 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
1985
1986 switch (enmDeliveryMode)
1987 {
1988 case XAPICDELIVERYMODE_FIXED:
1989 {
1990 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
1991 if (offLvt == XAPIC_OFF_LVT_LINT1)
1992 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
1993 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
1994 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
1995 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
1996 means. */
1997 /* fallthru */
1998 }
1999 case XAPICDELIVERYMODE_SMI:
2000 case XAPICDELIVERYMODE_NMI:
2001 case XAPICDELIVERYMODE_INIT: /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2002 case XAPICDELIVERYMODE_EXTINT:
2003 {
2004 VMCPUSET DestCpuSet;
2005 VMCPUSET_EMPTY(&DestCpuSet);
2006 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2007 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2008 rcStrict = apicSendIntr(pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet, rcRZ);
2009 break;
2010 }
2011
2012 /* Reserved/unknown delivery modes: */
2013 case XAPICDELIVERYMODE_LOWEST_PRIO:
2014 case XAPICDELIVERYMODE_STARTUP:
2015 default:
2016 {
2017 rcStrict = VERR_INTERNAL_ERROR_3;
2018 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x on LINT%d\n", pVCpu->idCpu,
2019 enmDeliveryMode, u8Pin));
2020 break;
2021 }
2022 }
2023 }
2024 }
2025 else
2026 {
2027 /* The APIC is disabled, pass it through the CPU. */
2028 if (u8Level)
2029 APICSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2030 else
2031 APICClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2032 }
2033
2034 return rcStrict;
2035}
2036
2037
2038/**
2039 * @interface_method_impl{PDMAPICREG,pfnHasPendingIrqR3}
2040 */
2041VMMDECL(bool) APICHasPendingIrq(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t *pu8PendingIrq)
2042{
2043 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIrq);
2044}
2045
2046
2047/**
2048 * @interface_method_impl{PDMAPICREG,pfnGetInterruptR3}
2049 */
2050VMMDECL(int) APICGetInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t *puTagSrc)
2051{
2052 VMCPU_ASSERT_EMT(pVCpu);
2053
2054 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2055 if ( apicIsEnabled(pVCpu)
2056 && pXApicPage->svr.u.fApicSoftwareEnable)
2057 {
2058 APICUpdatePendingInterrupts(pVCpu);
2059 int const irrv = apicGetLastSetBit(&pXApicPage->irr, -1);
2060 if (irrv >= 0)
2061 {
2062 Assert(irrv <= (int)UINT8_MAX);
2063 uint8_t const uVector = irrv;
2064
2065 /** @todo this cannot possibly happen for anything other than ExtINT
2066 * interrupts right? */
2067 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2068 if (uTpr > 0 && uVector <= uTpr)
2069 return pXApicPage->svr.u.u8SpuriousVector;
2070
2071 apicClearVectorInReg(&pXApicPage->irr, uVector);
2072 apicSetVectorInReg(&pXApicPage->isr, uVector);
2073 apicUpdatePpr(pVCpu);
2074 apicSignalNextPendingIntr(pVCpu);
2075 return uVector;
2076 }
2077 }
2078
2079 return -1;
2080}
2081
2082
2083/**
2084 * @callback_method_impl{FNIOMMMIOREAD}
2085 */
2086VMMDECL(int) APICReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
2087{
2088 NOREF(pvUser);
2089 Assert(!(GCPhysAddr & 0xf));
2090 Assert(cb == 4);
2091
2092 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2093 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2094 uint16_t offReg = (GCPhysAddr & 0xff0);
2095 uint32_t uValue = 0;
2096#ifdef VBOX_WITH_STATISTICS
2097 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2098 STAM_COUNTER_INC(&CTXSUFF(pApicCpu->StatMmioRead));
2099#endif
2100 int rc = apicReadRegister(pApicDev, pVCpu, offReg, &uValue);
2101 *(uint32_t *)pv = uValue;
2102 return rc;
2103}
2104
2105
2106/**
2107 * @callback_method_impl{FNIOMMMIOWRITE}
2108 */
2109VMMDECL(int) APICWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
2110{
2111 NOREF(pvUser);
2112 Assert(!(GCPhysAddr & 0xf));
2113 Assert(cb == 4);
2114
2115 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2116 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2117 uint16_t offReg = (GCPhysAddr & 0xff0);
2118 uint32_t uValue = *(uint32_t *)pv;
2119#ifdef VBOX_WITH_STATISTICS
2120 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2121 STAM_COUNTER_INC(&CTXSUFF(pApicCpu->StatMmioWrite));
2122#endif
2123 int rc = VBOXSTRICTRC_VAL(apicWriteRegister(pApicDev, pVCpu, offReg, uValue));
2124 return rc;
2125}
2126
2127
2128/**
2129 * Sets the interrupt pending force-flag and pokes the EMT if required.
2130 *
2131 * @param pVCpu The cross context virtual CPU structure.
2132 * @param enmType The IRQ type.
2133 */
2134VMMDECL(void) APICSetInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2135{
2136 PVM pVM = CTX_SUFF(pVCpu->pVM);
2137 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2138 CTX_SUFF(pApicDev->pApicHlp)->pfnSetInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2139}
2140
2141
2142/**
2143 * Clears the interrupt pending force-flag.
2144 *
2145 * @param pVCpu The cross context virtual CPU structure.
2146 * @param enmType The IRQ type.
2147 */
2148VMMDECL(void) APICClearInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2149{
2150 PVM pVM = CTX_SUFF(pVCpu->pVM);
2151 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2152 CTX_SUFF(pApicDev->pApicHlp)->pfnClearInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2153}
2154
2155
2156/**
2157 * Posts an interrupt to a target APIC.
2158 *
2159 * This function handles interrupts received from the system bus or
2160 * interrupts generated locally from the LVT or via a self IPI.
2161 *
2162 * Don't use this function to try and deliver ExtINT style interrupts.
2163 *
2164 * @param pVCpu The cross context virtual CPU structure.
2165 * @param uVector The vector of the interrupt to be posted.
2166 * @param enmTriggerMode The trigger mode of the interrupt.
2167 *
2168 * @thread Any.
2169 */
2170VMM_INT_DECL(void) APICPostInterrupt(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode)
2171{
2172 Assert(pVCpu);
2173
2174 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2175 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2176 /* Validate the vector. See Intel spec. 10.5.2 "Valid Interrupt Vectors". */
2177 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
2178 {
2179 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2180 {
2181 apicSetVectorInPib(CTX_SUFF(pApicCpu->pvApicPib), uVector);
2182 bool const fAlreadySet = apicSetNotificationBitInPib(CTX_SUFF(pApicCpu->pvApicPib));
2183 if (fAlreadySet)
2184 return;
2185
2186 if (pApic->fPostedIntrsEnabled)
2187 { /** @todo posted-interrupt call to hardware */ }
2188 else
2189 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
2190 }
2191 else
2192 {
2193 /*
2194 * Level-triggered interrupts requires updating of the TMR and thus cannot be
2195 * delivered asynchronously.
2196 */
2197 apicSetVectorInPib(&pApicCpu->ApicPibLevel.aVectorBitmap[0], uVector);
2198 bool const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel.aVectorBitmap[0]);
2199 if (fAlreadySet)
2200 return;
2201
2202 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
2203 }
2204 }
2205 else
2206 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
2207}
2208
2209
2210/**
2211 * Starts the APIC timer.
2212 *
2213 * @param pApicCpu The APIC CPU state.
2214 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
2215 * 0.
2216 * @thread Any.
2217 */
2218VMM_INT_DECL(void) APICStartTimer(PAPICCPU pApicCpu, uint32_t uInitialCount)
2219{
2220 Assert(pApicCpu);
2221 Assert(TMTimerIsLockOwner(CTX_SUFF(pApicCpu->pTimer)));
2222 Assert(uInitialCount > 0);
2223
2224 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
2225 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
2226 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
2227
2228 /*
2229 * The assumption here is that the timer doesn't tick during this call
2230 * and thus setting a relative time to fire next is accurate. The advantage
2231 * however is updating u64TimerInitial 'atomically' while setting the next
2232 * tick.
2233 */
2234 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
2235 TMTimerSetRelative(pTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
2236 apicHintTimerFreq(pApicCpu, uInitialCount, uTimerShift);
2237}
2238
2239
2240/**
2241 * Stops the APIC timer.
2242 *
2243 * @param pApicCpu The APIC CPU state.
2244 * @thread Any.
2245 */
2246VMM_INT_DECL(void) APICStopTimer(PAPICCPU pApicCpu)
2247{
2248 Assert(pApicCpu);
2249 Assert(TMTimerIsLockOwner(CTX_SUFF(pApicCpu->pTimer)));
2250
2251 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
2252 TMTimerStop(pTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
2253 pApicCpu->uHintedTimerInitialCount = 0;
2254 pApicCpu->uHintedTimerShift = 0;
2255}
2256
2257
2258/**
2259 * Updates the CPUID bits necessary for the given APIC mode.
2260 *
2261 * @param pVM The cross context VM structure.
2262 * @param enmMode The APIC mode.
2263 */
2264VMM_INT_DECL(void) APICUpdateCpuIdForMode(PVM pVM, APICMODE enmMode)
2265{
2266 /* The CPUID bits being updated to reflect the current state is a bit vague. See @bugref{8245#c32}. */
2267 /** @todo This needs to be done on a per-VCPU basis! */
2268 switch (enmMode)
2269 {
2270 case APICMODE_DISABLED:
2271 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
2272 break;
2273
2274 case APICMODE_XAPIC:
2275 case APICMODE_X2APIC:
2276 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
2277 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);
2278 break;
2279
2280 default:
2281 AssertMsgFailed(("Invalid APIC mode: %d\n", (int)enmMode));
2282 break;
2283 }
2284}
2285
2286
2287/**
2288 * Queues a pending interrupt as in-service.
2289 *
2290 * This function should only be needed without virtualized APIC
2291 * registers. With virtualized APIC registers, it's sufficient to keep
2292 * the interrupts pending in the IRR as the hardware takes care of
2293 * virtual interrupt delivery.
2294 *
2295 * @returns true if the interrupt was queued to in-service interrupts,
2296 * false otherwise.
2297 * @param pVCpu The cross context virtual CPU structure.
2298 * @param u8PendingIntr The pending interrupt to queue as
2299 * in-service.
2300 *
2301 * @remarks This assumes the caller has done the necessary checks and
2302 * is ready to take actually service the interrupt (TPR,
2303 * interrupt shadow etc.)
2304 */
2305VMMDECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2306{
2307 VMCPU_ASSERT_EMT(pVCpu);
2308
2309 PAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
2310 Assert(!pApic->fVirtApicRegsEnabled);
2311
2312 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2313 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
2314 if (fIsPending)
2315 {
2316 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
2317 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
2318 apicUpdatePpr(pVCpu);
2319 return true;
2320 }
2321 return false;
2322}
2323
2324
2325/**
2326 * Dequeues a pending interrupt from in-service.
2327 *
2328 * This undoes APICQueueInterruptToService() for premature VM-exits before event
2329 * injection.
2330 *
2331 * @param pVCpu The cross context virtual CPU structure.
2332 * @param u8PendingIntr The pending interrupt to dequeue from
2333 * in-service.
2334 */
2335VMMDECL(void) APICDequeueInterruptFromService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2336{
2337 VMCPU_ASSERT_EMT(pVCpu);
2338
2339 PAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
2340 Assert(!pApic->fVirtApicRegsEnabled);
2341
2342 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2343 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
2344 if (fInService)
2345 {
2346 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
2347 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
2348 apicUpdatePpr(pVCpu);
2349 }
2350}
2351
2352
2353/**
2354 * Updates pending interrupts from the pending interrupt bitmap to the IRR.
2355 *
2356 * @param pVCpu The cross context virtual CPU structure.
2357 */
2358VMMDECL(void) APICUpdatePendingInterrupts(PVMCPU pVCpu)
2359{
2360 VMCPU_ASSERT_EMT(pVCpu);
2361
2362 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2363 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2364 for (;;)
2365 {
2366 bool const fAlreadySet = apicClearNotificationBitInPib(CTX_SUFF(pApicCpu->pvApicPib));
2367 if (!fAlreadySet)
2368 break;
2369
2370 PAPICPIB pPib = (PAPICPIB)CTX_SUFF(pApicCpu->pvApicPib);
2371 for (size_t i = 0; i < RT_ELEMENTS(pPib->aVectorBitmap); i++)
2372 {
2373 uint32_t const uFragment = ASMAtomicXchgU32(&pPib->aVectorBitmap[i], 0);
2374 if (uFragment)
2375 apicOrVectorsToReg(&pXApicPage->irr, i, uFragment);
2376 }
2377 }
2378}
2379
2380
2381/**
2382 * Gets the highest priority pending interrupt.
2383 *
2384 * @returns true if any interrupt is pending, false otherwise.
2385 * @param pVCpu The cross context virtual CPU structure.
2386 * @param pu8PendingIntr Where to store the interrupt vector if the
2387 * interrupt is pending.
2388 */
2389VMMDECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2390{
2391 VMCPU_ASSERT_EMT(pVCpu);
2392 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2393}
2394
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette