VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 60307

Last change on this file since 60307 was 60307, checked in by vboxsync, 9 years ago

VMM: APIC rewrite. Initial commit, work in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 80.2 KB
Line 
1/* $Id: APICAll.cpp 60307 2016-04-04 15:23:11Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/vmcpuset.h>
27
28/*********************************************************************************************************************************
29* Global Variables *
30*********************************************************************************************************************************/
31#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
32/** An ordered array of valid LVT masks. */
33static const uint32_t g_au32LvtValidMasks[] =
34{
35 XAPIC_LVT_TIMER,
36 XAPIC_LVT_THERMAL,
37 XAPIC_LVT_PERF,
38 XAPIC_LVT_LINT0,
39 XAPIC_LVT_LINT1,
40 XAPIC_LVT_ERROR
41};
42#endif
43
44#if 0
45/** @todo CMCI */
46static const uint32_t g_au32LvtExtValidMask[] =
47{
48 XAPIC_LVT_CMCI
49};
50#endif
51
52
53/**
54 * Checks if a vector is set in an APIC 256-bit sparse register.
55 *
56 * @returns true if the specified vector is set, false otherwise.
57 * @param pApicReg The APIC 256-bit spare register.
58 * @param uVector The vector to check if set.
59 */
60DECLINLINE(bool) apicTestVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
61{
62 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
63 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
64}
65
66
67/**
68 * Sets the vector in an APIC 256-bit sparse register.
69 *
70 * @param pApicReg The APIC 256-bit spare register.
71 * @param uVector The vector to set.
72 */
73DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
74{
75 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
76 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
77}
78
79
80/**
81 * Clears the vector in an APIC 256-bit sparse register.
82 *
83 * @param pApicReg The APIC 256-bit spare register.
84 * @param uVector The vector to clear.
85 */
86DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
87{
88 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
89 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
90}
91
92
93/**
94 * Checks if a vector is set in an APIC Pending Interrupt Bitmap (PIB).
95 *
96 * @returns true if the specified vector is set, false otherwise.
97 * @param pvPib Opaque pointer to the PIB.
98 * @param uVector The vector to check if set.
99 */
100DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
101{
102 return ASMBitTest(pvPib, uVector);
103}
104
105
106/**
107 * Atomically tests and sets the PIB notification bit.
108 *
109 * @returns true if the bit was already set, false otherwise.
110 * @param pvPib Opaque pointer to the PIB.
111 */
112DECLINLINE(bool) apicSetNotificationBitInPib(volatile void *pvPib)
113{
114 return ASMAtomicBitTestAndSet(pvPib, XAPIC_PIB_NOTIFICATION_BIT);
115}
116
117
118/**
119 * Atomically tests and clears the PIB notification bit.
120 *
121 * @returns true if the bit was already set, false otherwise.
122 */
123DECLINLINE(bool) apicClearNotificationBitInPib(volatile void *pvPib)
124{
125 return ASMAtomicBitTestAndClear(pvPib, XAPIC_PIB_NOTIFICATION_BIT);
126}
127
128
129/**
130 * Sets the vector in an APIC Pending Interrupt Bitmap (PIB).
131 *
132 * @param pvPib Opaque pointer to the PIB.
133 * @param uVector The vector to set.
134 */
135DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
136{
137 ASMAtomicBitSet(pvPib, uVector);
138}
139
140
141/**
142 * Clears the vector in an APIC Pending Interrupt Bitmap (PIB).
143 *
144 * @param pvPib Opaque pointer to the PIB.
145 * @param uVector The vector to clear.
146 */
147DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
148{
149 ASMAtomicBitClear(pvPib, uVector);
150}
151
152
153/**
154 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
155 * register.
156 *
157 * @param pApicReg The APIC 256-bit spare register.
158 * @param idxFragment The index of the 32-bit fragment in @a
159 * pApicReg.
160 * @param u32Fragment The 32-bit vector fragment.
161 */
162DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
163{
164 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
165 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
166}
167
168
169/**
170 * Reports and returns appropriate error code for invalid MSR accesses.
171 *
172 * @returns Strict VBox status code.
173 * @retval VINF_CPUM_R3_MSR_WRITE if the MSR write could not be serviced in the
174 * current context (raw-mode or ring-0).
175 * @retval VINF_CPUM_R3_MSR_READ if the MSR read could not be serviced in the
176 * current context (raw-mode or ring-0).
177 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
178 * appropriate actions.
179 *
180 * @param pVCpu The cross context virtual CPU structure.
181 * @param u32Reg The MSR being accessed.
182 * @param enmAccess The invalid-access type.
183 */
184static VBOXSTRICTRC apicMsrAccessError(PVMCPU pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
185{
186 static struct
187 {
188 const char *pszBefore; /* The error message before printing the MSR index */
189 const char *pszAfter; /* The error message after printing the MSR index */
190 int rcR0; /* The ring-0 error code */
191 } const s_aAccess[] =
192 {
193 { "read MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_READ },
194 { "write MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_WRITE },
195 { "read reserved/unknown MSR", "", VINF_CPUM_R3_MSR_READ },
196 { "write reserved/unknown MSR", "", VINF_CPUM_R3_MSR_WRITE },
197 { "read write-only MSR", "", VINF_CPUM_R3_MSR_READ },
198 { "write read-only MSR", "", VINF_CPUM_R3_MSR_WRITE },
199 { "read reserved bits of MSR", "", VINF_CPUM_R3_MSR_READ },
200 { "write reserved bits of MSR", "", VINF_CPUM_R3_MSR_WRITE },
201 { "write an invalid value to MSR", "", VINF_CPUM_R3_MSR_WRITE }
202 };
203 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
204
205 size_t const i = enmAccess;
206 Assert(i < RT_ELEMENTS(s_aAccess));
207#ifdef IN_RING3
208 LogRelMax(5, ("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg,
209 s_aAccess[i].pszAfter));
210 return VERR_CPUM_RAISE_GP_0;
211#else
212 return s_aAccess[i].rcR0;
213#endif
214}
215
216
217/**
218 * Gets the current APIC mode.
219 *
220 * @returns The mode.
221 * @param pApicCpu The APIC CPU state.
222 */
223static APICMODE apicGetMode(uint64_t uApicBaseMsr)
224{
225 uint32_t const uMode = ((uint32_t)uApicBaseMsr >> MSR_APICBASE_MODE_SHIFT) & UINT32_C(0x3);
226 APICMODE const enmMode = (APICMODE)uMode;
227#ifdef VBOX_STRICT
228 /* Paranoia. */
229 switch (uMode)
230 {
231 case APICMODE_DISABLED:
232 case APICMODE_INVALID:
233 case APICMODE_XAPIC:
234 case APICMODE_X2APIC:
235 break;
236 default:
237 AssertMsgFailed(("Invalid mode"));
238 }
239#endif
240 return enmMode;
241}
242
243
244/**
245 * Returns whether the APIC is hardware enabled or not.
246 *
247 * @returns true if enabled, false otherwise.
248 */
249DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
250{
251 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
252 return MSR_APICBASE_IS_ENABLED(pApicCpu->uApicBaseMsr);
253}
254
255
256/**
257 * Finds the most significant set bit in an APIC 256-bit sparse register.
258 *
259 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
260 * @param pReg The APIC 256-bit sparse register.
261 * @param rcNotFound What to return when no bit is set.
262 */
263static int apicGetLastSetBit(volatile const XAPIC256BITREG *pReg, int rcNotFound)
264{
265 unsigned const cBitsPerFragment = sizeof(pReg->u[0].u32Reg) * 8;
266 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
267 for (ssize_t i = cFragments - 1; i >= 0; i--)
268 {
269 uint32_t const uFragment = pReg->u[i].u32Reg;
270 if (uFragment)
271 {
272 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
273 --idxSetBit;
274 idxSetBit += (i * cBitsPerFragment);
275 return idxSetBit;
276 }
277 }
278 return rcNotFound;
279}
280
281
282/**
283 * Gets the highest priority pending interrupt.
284 *
285 * @returns true if any interrupt is pending, false otherwise.
286 * @param pVCpu The cross context virtual CPU structure.
287 * @param pu8PendingIntr Where to store the interrupt vector if the
288 * interrupt is pending.
289 */
290static bool apicGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
291{
292 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
293 int const irrv = apicGetLastSetBit(&pXApicPage->irr, -1);
294 if (irrv >= 0)
295 {
296 Assert(irrv <= (int)UINT8_MAX);
297 *pu8PendingIntr = (uint8_t)irrv;
298 return true;
299 }
300 return false;
301}
302
303
304/**
305 * Reads a 32-bit register at a specified offset.
306 *
307 * @returns The value at the specified offset.
308 * @param pXApicPage The xAPIC page.
309 * @param offReg The offset of the register being read.
310 */
311DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
312{
313 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
314 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
315 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
316 return uValue;
317}
318
319
320/**
321 * Writes a 32-bit register at a specified offset.
322 *
323 * @param pXApicPage The xAPIC page.
324 * @param offReg The offset of the register being written.
325 * @param uReg The value of the register.
326 */
327DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
328{
329 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
330 uint8_t *pbXApic = (uint8_t *)pXApicPage;
331 *(uint32_t *)(pbXApic + offReg) = uReg;
332}
333
334
335/**
336 * Sets an error in the internal ESR of the specified APIC.
337 *
338 * @param pVCpu The cross context virtual CPU structure.
339 * @thread Any.
340 */
341DECLINLINE(void) apicSetError(PVMCPU pVCpu, uint32_t uError)
342{
343 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
344 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
345}
346
347
348/**
349 * Clears all errors in the internal ESR.
350 *
351 * @returns The value of the internal ESR before clearing.
352 * @param pApicCpu The APIC CPU state.
353 */
354DECLINLINE(uint32_t) apicClearAllErrors(PVMCPU pVCpu)
355{
356 VMCPU_ASSERT_EMT(pVCpu);
357 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
358 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
359}
360
361
362/**
363 * Signals the guest if a pending interrupt is ready to be serviced.
364 *
365 * @param pVCpu The cross context virtual CPU structure.
366 */
367static void apicSignalNextPendingIntr(PVMCPU pVCpu)
368{
369 VMCPU_ASSERT_EMT(pVCpu);
370
371 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
372 if (pXApicPage->svr.u.fApicSoftwareEnable)
373 {
374 int const irrv = apicGetLastSetBit(&pXApicPage->irr, VERR_NOT_FOUND);
375 if (irrv >= 0)
376 {
377 Assert(irrv <= (int)UINT8_MAX);
378 uint8_t const uVector = irrv;
379 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
380 if ( !uPpr
381 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
382 {
383 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
384 }
385 }
386 }
387 else
388 APICClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
389}
390
391
392/**
393 * Sets the Spurious-Interrupt Vector Register (SVR).
394 *
395 * @returns Strict VBox status code.
396 * @param pVCpu The cross context virtual CPU structure.
397 * @param uSvr The SVR value.
398 */
399static VBOXSTRICTRC apicSetSvr(PVMCPU pVCpu, uint32_t uSvr)
400{
401 VMCPU_ASSERT_EMT(pVCpu);
402
403 uint32_t uValidMask = XAPIC_SVR;
404 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
405 if (pXApicPage->version.u.fEoiBroadcastSupression)
406 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
407
408 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
409 && (uSvr & ~uValidMask))
410 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
411
412 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
413 if (!pXApicPage->svr.u.fApicSoftwareEnable)
414 {
415 /** @todo CMCI. */
416 pXApicPage->lvt_timer.u.u1Mask = 1;
417#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
418 pXApicPage->lvt_thermal.u.u1Mask = 1;
419#endif
420 pXApicPage->lvt_perf.u.u1Mask = 1;
421 pXApicPage->lvt_lint0.u.u1Mask = 1;
422 pXApicPage->lvt_lint1.u.u1Mask = 1;
423 pXApicPage->lvt_error.u.u1Mask = 1;
424 }
425 return VINF_SUCCESS;
426}
427
428
429/**
430 * Sends an interrupt to one or more APICs.
431 *
432 * @returns Strict VBox status code.
433 * @param pVCpu The cross context virtual CPU structure.
434 * @param uVector The interrupt vector.
435 * @param enmTriggerMode The trigger mode.
436 * @param enmDeliveryMode The delivery mode.
437 * @param pDestCpuSet The destination CPU set.
438 * @param rcRZ The return code if the operation cannot be
439 * performed in the current context.
440 */
441static VBOXSTRICTRC apicSendIntr(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
442 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, int rcRZ)
443{
444 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
445 PVM pVM = pVCpu->CTX_SUFF(pVM);
446 VMCPUID const cCpus = pVM->cCpus;
447 switch (enmDeliveryMode)
448 {
449 case XAPICDELIVERYMODE_FIXED:
450 case XAPICDELIVERYMODE_LOWEST_PRIO:
451 {
452 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
453 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
454 APICPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
455 break;
456 }
457
458 case XAPICDELIVERYMODE_SMI:
459 {
460 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
461 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
462 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_SMI);
463 break;
464 }
465
466 case XAPICDELIVERYMODE_NMI:
467 {
468 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
469 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
470 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_NMI);
471 break;
472 }
473
474 case XAPICDELIVERYMODE_INIT:
475 {
476#ifdef IN_RING3
477 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
478 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
479 VMMR3SendInitIpi(pVM, idCpu);
480#else
481 /* We need to return to ring-3 to deliver the INIT. */
482 rcStrict = rcRZ;
483#endif
484 break;
485 }
486
487 case XAPICDELIVERYMODE_STARTUP:
488 {
489#ifdef IN_RING3
490 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
491 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
492 VMMR3SendStartupIpi(pVM, idCpu, uVector);
493#else
494 /* We need to return to ring-3 to deliver the SIPI. */
495 rcStrict = rcRZ;
496#endif
497 break;
498 }
499
500 case XAPICDELIVERYMODE_EXTINT:
501 {
502 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
503 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
504 APICSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_EXTINT);
505 break;
506 }
507
508 default:
509 {
510 AssertMsgFailed(("APIC: apicSendIntr: Unknown delivery mode %#x\n", enmDeliveryMode));
511 break;
512 }
513 }
514
515 /*
516 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
517 * interrupt is being sent by an APIC.
518 *
519 * The 'receive illegal vector' will be set on the target APIC when the interrupt
520 * gets generated, see APICPostInterrupt().
521 *
522 * See Intel spec. 10.5.3 "Error Handling".
523 */
524 if ( rcStrict != rcRZ
525 && pVCpu)
526 {
527 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
528 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
529 }
530 return rcStrict;
531}
532
533
534/**
535 * Checks if this APIC belongs to a logical destination.
536 *
537 * @returns true if the APIC belongs to the logical
538 * destination, false otherwise.
539 * @param pVCpu The cross context virtual CPU structure.
540 * @param fDest The destination mask.
541 *
542 * @thread Any.
543 */
544static bool apicIsLogicalDest(PVMCPU pVCpu, uint32_t fDest)
545{
546 if (XAPIC_IN_X2APIC_MODE(pVCpu))
547 {
548 /*
549 * Flat logical mode is not supported in x2APIC mode.
550 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
551 * - High 16 bits is the cluster ID.
552 * - Low 16 bits: each bit represents a unique APIC within the cluster.
553 */
554 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
555 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
556 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
557 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
558 return false;
559 }
560
561#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
562 /*
563 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
564 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
565 */
566 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
567 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
568 return true;
569
570 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
571 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
572 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
573 {
574 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
575 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
576 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
577 }
578 else
579 {
580 /*
581 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
582 * - High 4 bits is the cluster ID.
583 * - Low 4 bits: each bit represents a unique APIC within the cluster.
584 */
585 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
586 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
587 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
588 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
589 return false;
590 }
591#else
592# error "Implement Pentium and P6 family APIC architectures"
593#endif
594}
595
596
597/**
598 * Figures out the set of destination CPUs for a given destination mode, format
599 * and delivery mode setting.
600 *
601 * @param pVM The cross context VM structure.
602 * @param fDestMask The destination mask.
603 * @param fBroadcastMask The broadcast mask.
604 * @param enmDestMode The destination mode.
605 * @param enmDeliveryMode The delivery mode.
606 * @param pDestCpuSet The destination CPU set to update.
607 */
608static void apicGetDestCpuSet(PVM pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
609 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
610{
611 VMCPUSET_EMPTY(pDestCpuSet);
612
613 /*
614 * Physical destination mode only supports either a broadcast or a single target.
615 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
616 * as a regular broadcast like in fixed delivery mode.
617 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
618 * to the target like in fixed delivery mode.
619 *
620 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
621 */
622 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
623 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
624 {
625 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
626 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
627 }
628
629 uint32_t const cCpus = pVM->cCpus;
630 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
631 {
632 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
633#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
634 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
635 uint8_t u8LowestTpr = UINT8_C(0xff);
636 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
637 {
638 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
639 if ( apicIsEnabled(pVCpuDest) /* PAV */
640 && apicIsLogicalDest(pVCpuDest, fDestMask))
641 {
642 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
643 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
644
645 /* If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
646 See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration". */
647 if (u8LowestTpr <= u8Tpr)
648 {
649 u8LowestTpr = u8Tpr;
650 idCpuLowestTpr = idCpu;
651 }
652 }
653 }
654 if (idCpuLowestTpr != NIL_VMCPUID)
655 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
656#else
657# error "Implement Pentium and P6 family APIC architectures"
658#endif
659 return;
660 }
661
662 /*
663 * x2APIC:
664 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
665 * xAPIC:
666 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
667 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
668 *
669 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
670 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
671 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
672 */
673 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
674 {
675 VMCPUSET_FILL(pDestCpuSet);
676 return;
677 }
678
679 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
680 {
681 /* The destination mask is interpreted as the physical APIC ID of a single target. */
682#if 1
683 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
684 if (RT_LIKELY(fDestMask < cCpus))
685 VMCPUSET_ADD(pDestCpuSet, fDestMask);
686#else
687 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
688 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
689 {
690 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
691 if (XAPIC_IN_X2APIC_MODE(pVCpuDest))
692 {
693 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDest);
694 if (pX2ApicPage->id.u32ApicId == fDestMask)
695 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
696 }
697 else
698 {
699 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
700 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
701 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
702 }
703 }
704#endif
705 }
706 else
707 {
708 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
709
710 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
711 if (RT_UNLIKELY(!fDestMask))
712 return;
713
714 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
715 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
716 {
717 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
718 if (apicIsLogicalDest(pVCpuDest, fDestMask))
719 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
720 }
721 }
722}
723
724
725/**
726 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
727 * Command Register (ICR).
728 *
729 * @returns VBox status code.
730 * @param pVCpu The cross context virtual CPU structure.
731 * @param rcRZ The return code if the operation cannot be
732 * performed in the current context.
733 */
734static VBOXSTRICTRC apicSendIpi(PVMCPU pVCpu, int rcRZ)
735{
736 VMCPU_ASSERT_EMT(pVCpu);
737
738 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
739 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
740 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
741 XAPICINITLEVEL enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
742 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
743 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
744 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
745
746 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
747 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
748
749#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
750 /*
751 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
752 */
753 if (RT_UNLIKELY( enmDeliveryMode == XAPICDELIVERYMODE_INIT_LEVEL_DEASSERT
754 && enmInitLevel == XAPICINITLEVEL_DEASSERT
755 && enmTriggerMode == XAPICTRIGGERMODE_LEVEL))
756 {
757 return VINF_SUCCESS;
758 }
759
760 enmInitLevel = XAPICINITLEVEL_ASSERT;
761 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
762#else
763# error "Implement Pentium and P6 family APIC architectures"
764#endif
765
766 /*
767 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
768 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
769 */
770 VMCPUSET DestCpuSet;
771 switch (enmDestShorthand)
772 {
773 case XAPICDESTSHORTHAND_NONE:
774 {
775 PVM pVM = pVCpu->CTX_SUFF(pVM);
776 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
777 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
778 break;
779 }
780
781 case XAPICDESTSHORTHAND_SELF:
782 {
783 VMCPUSET_EMPTY(&DestCpuSet);
784 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
785 break;
786 }
787
788 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
789 {
790 VMCPUSET_FILL(&DestCpuSet);
791 break;
792 }
793
794 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
795 {
796 VMCPUSET_FILL(&DestCpuSet);
797 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
798 break;
799 }
800 }
801
802 return apicSendIntr(pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet, rcRZ);
803}
804
805
806/**
807 * Sets the Interrupt Command Register (ICR) high dword.
808 *
809 * @returns Strict VBox status code.
810 * @param pVCpu The cross context virtual CPU structure.
811 * @param uIcrHi The ICR high dword.
812 */
813static VBOXSTRICTRC apicSetIcrHi(PVMCPU pVCpu, uint32_t uIcrHi)
814{
815 VMCPU_ASSERT_EMT(pVCpu);
816 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
817
818 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
819 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
820 return VINF_SUCCESS;
821}
822
823
824/**
825 * Sets the Interrupt Command Register (ICR) low dword.
826 *
827 * @returns Strict VBox status code.
828 * @param pVCpu The cross context virtual CPU structure.
829 * @param uIcrLo The ICR low dword.
830 * @param rcRZ The return code if the operation cannot be performed
831 * in the current context.
832 */
833static VBOXSTRICTRC apicSetIcrLo(PVMCPU pVCpu, uint32_t uIcrLo, int rcRZ)
834{
835 VMCPU_ASSERT_EMT(pVCpu);
836
837 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
838 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR;
839
840 apicSendIpi(pVCpu, rcRZ);
841 return VINF_SUCCESS;
842}
843
844
845/**
846 * Sets the Interrupt Command Register (ICR).
847 *
848 * @returns Strict VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param uIcrHi The ICR value.
851 * @param rcRZ The return code if the operation cannot be performed
852 * in the current context.
853 */
854static VBOXSTRICTRC apicSetIcr(PVMCPU pVCpu, uint64_t uIcr, int rcRZ)
855{
856 VMCPU_ASSERT_EMT(pVCpu);
857 Assert(XAPIC_IN_X2APIC_MODE(pVCpu));
858
859 /* Validate. */
860 uint32_t const uLo = RT_LO_U32(uIcr);
861 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR)))
862 {
863 /* Update high dword first, then update the low dword which sends the IPI. */
864 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
865 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(uIcr);
866 return apicSetIcrLo(pVCpu, uLo, rcRZ);
867 }
868 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
869}
870
871
872/**
873 * Sets the Error Status Register (ESR).
874 *
875 * @returns Strict VBox status code.
876 * @param pVCpu The cross context virtual CPU structure.
877 * @param uValue The ESR value.
878 */
879static VBOXSTRICTRC apicSetEsr(PVMCPU pVCpu, uint32_t uValue)
880{
881 VMCPU_ASSERT_EMT(pVCpu);
882 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
883 && (uValue & ~XAPIC_ESR_WO))
884 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
885
886 /*
887 * Writes to the ESR causes the internal state to be updated in the register,
888 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
889 */
890 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
891 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
892 return VINF_SUCCESS;
893}
894
895
896/**
897 * Updates the Processor Priority Register (PPR).
898 *
899 * @param pVCpu The cross context virtual CPU structure.
900 */
901static void apicUpdatePpr(PVMCPU pVCpu)
902{
903 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
904
905 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
906 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
907 uint8_t const uIsrv = apicGetLastSetBit(&pXApicPage->isr, 0 /* rcNotFound */);
908 uint8_t uPpr;
909 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
910 uPpr = pXApicPage->tpr.u8Tpr;
911 else
912 uPpr = XAPIC_PPR_GET_PP(uIsrv);
913 pXApicPage->ppr.u8Ppr = uPpr;
914}
915
916
917/**
918 * Gets the Processor Priority Register (PPR).
919 *
920 * @returns The PPR value.
921 * @param pVCpu The cross context virtual CPU structure.
922 */
923static uint8_t apicGetPpr(PVMCPU pVCpu)
924{
925 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
926
927 /*
928 * With virtualized APIC registers or with TPR virtualization, the hardware may
929 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
930 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
931 */
932 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
933 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
934 apicUpdatePpr(pVCpu);
935 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
936 return pXApicPage->ppr.u8Ppr;
937}
938
939
940/**
941 * Sets the Task Priority Register (TPR).
942 *
943 * @returns Strict VBox status code.
944 * @param pVCpu The cross context virtual CPU structure.
945 * @param uTpr The TPR value.
946 */
947static VBOXSTRICTRC apicSetTpr(PVMCPU pVCpu, uint32_t uTpr)
948{
949 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
950
951 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
952 && (uTpr & ~XAPIC_TPR))
953 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
954
955 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
956 pXApicPage->tpr.u8Tpr = uTpr;
957 apicUpdatePpr(pVCpu);
958 apicSignalNextPendingIntr(pVCpu);
959 return VINF_SUCCESS;
960}
961
962
963/**
964 * Sets the End-Of-Interrupt (EOI) register.
965 *
966 * @returns Strict VBox status code.
967 * @param pVCpu The cross context virtual CPU structure.
968 * @param uEoi The EOI value.
969 */
970static VBOXSTRICTRC apicSetEoi(PVMCPU pVCpu, uint32_t uEoi)
971{
972 VMCPU_ASSERT_EMT(pVCpu);
973
974 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
975 && (uEoi & ~XAPIC_EOI_WO))
976 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
977
978 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
979 int isrv = apicGetLastSetBit(&pXApicPage->isr, VERR_NOT_FOUND);
980 if (isrv >= 0)
981 {
982 /*
983 * Dispensing the spurious-interrupt vector does not affect the ISR.
984 * See Intel spec. 10.9 "Spurious Interrupt".
985 */
986 uint8_t const uVector = isrv;
987 if (uVector != pXApicPage->svr.u.u8SpuriousVector)
988 {
989 apicClearVectorInReg(&pXApicPage->isr, uVector);
990 apicUpdatePpr(pVCpu);
991 bool fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
992 if (fLevelTriggered)
993 {
994 /** @todo We need to broadcast EOI to IO APICs here. */
995 apicClearVectorInReg(&pXApicPage->tmr, uVector);
996 }
997
998 /** @todo Signal next interrupt? Most likely not as
999 * APICUpdatePendingInterrupts() will be called before next VM-entry. */
1000 apicSignalNextPendingIntr(pVCpu);
1001 }
1002 }
1003
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/**
1009 * Sets the Logical Destination Register (LDR).
1010 *
1011 * @returns Strict VBox status code.
1012 * @param pVCpu The cross context virtual CPU structure.
1013 * @param uLdr The LDR value.
1014 *
1015 * @remarks LDR is read-only in x2APIC mode.
1016 */
1017static VBOXSTRICTRC apicSetLdr(PVMCPU pVCpu, uint32_t uLdr)
1018{
1019 VMCPU_ASSERT_EMT(pVCpu);
1020 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1021
1022 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1023 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR);
1024 return VINF_SUCCESS;
1025}
1026
1027
1028/**
1029 * Sets the Destination Format Register (DFR).
1030 *
1031 * @returns Strict VBox status code.
1032 * @param pVCpu The cross context virtual CPU structure.
1033 * @param uLdr The DFR value.
1034 *
1035 * @remarks DFR is not available in x2APIC mode.
1036 */
1037static VBOXSTRICTRC apicSetDfr(PVMCPU pVCpu, uint32_t uDfr)
1038{
1039 VMCPU_ASSERT_EMT(pVCpu);
1040 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1041
1042 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1043 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr & XAPIC_DFR);
1044 return VINF_SUCCESS;
1045}
1046
1047
1048/**
1049 * Sets the Timer Divide Configuration Register (DCR).
1050 *
1051 * @returns Strict VBox status code.
1052 * @param pVCpu The cross context virtual CPU structure.
1053 * @param uTimerDcr The timer DCR value.
1054 */
1055static VBOXSTRICTRC apicSetTimerDcr(PVMCPU pVCpu, uint32_t uTimerDcr)
1056{
1057 VMCPU_ASSERT_EMT(pVCpu);
1058 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1059 && (uTimerDcr & ~XAPIC_TIMER_DCR))
1060 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1061
1062 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1063 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1064 return VINF_SUCCESS;
1065}
1066
1067
1068/**
1069 * Gets the timer's Current Count Register (CCR).
1070 *
1071 * @returns VBox status code.
1072 * @param pVCpu The cross context virtual CPU structure.
1073 * @param rcBusy The busy return code for the timer critical section.
1074 * @param puValue Where to store the LVT timer CCR.
1075 */
1076static VBOXSTRICTRC apicGetTimerCcr(PVMCPU pVCpu, int rcBusy, uint32_t *puValue)
1077{
1078 VMCPU_ASSERT_EMT(pVCpu);
1079 Assert(puValue);
1080
1081 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1082 *puValue = 0;
1083
1084 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1085 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1086 return VINF_SUCCESS;
1087
1088 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1089 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1090 if (!uInitialCount)
1091 return VINF_SUCCESS;
1092
1093 /*
1094 * Reading the virtual-sync clock requires locking its timer because it's not
1095 * a simple atomic operation, see tmVirtualSyncGetEx().
1096 *
1097 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1098 */
1099 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1100 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
1101
1102 int rc = TMTimerLock(pTimer, rcBusy);
1103 if (rc == VINF_SUCCESS)
1104 {
1105 /* If the current-count register is 0, it implies the timer expired. */
1106 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1107 if (uCurrentCount)
1108 {
1109 uint64_t const cTicksElapsed = TMTimerGet(pApicCpu->CTX_SUFF(pTimer)) - pApicCpu->u64TimerInitial;
1110 TMTimerUnlock(pTimer);
1111 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1112 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1113 if (uInitialCount > uDelta)
1114 *puValue = uInitialCount - uDelta;
1115 }
1116 else
1117 TMTimerUnlock(pTimer);
1118 }
1119 return rc;
1120}
1121
1122
1123/**
1124 * Sets the timer's Initial-Count Register (ICR).
1125 *
1126 * @returns Strict VBox status code.
1127 * @param pVCpu The cross context virtual CPU structure.
1128 * @param rcBusy The busy return code for the timer critical section.
1129 * @param uInitialCount The timer ICR.
1130 */
1131static VBOXSTRICTRC apicSetTimerIcr(PVMCPU pVCpu, int rcBusy, uint32_t uInitialCount)
1132{
1133 VMCPU_ASSERT_EMT(pVCpu);
1134
1135 PAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
1136 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1137 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1138 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
1139
1140 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1141 if ( pApic->fSupportsTscDeadline
1142 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1143 return VINF_SUCCESS;
1144
1145 /*
1146 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1147 * so obtain the lock -before- updating it here to be consistent with the
1148 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1149 */
1150 int rc = TMTimerLock(pTimer, rcBusy);
1151 if (rc == VINF_SUCCESS)
1152 {
1153 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1154 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1155 if (uInitialCount)
1156 APICStartTimer(pApicCpu, uInitialCount);
1157 else
1158 APICStopTimer(pApicCpu);
1159 TMTimerUnlock(pTimer);
1160 }
1161 return rc;
1162}
1163
1164
1165/**
1166 * Sets an LVT entry.
1167 *
1168 * @returns Strict VBox status code.
1169 * @param pVCpu The cross context virtual CPU structure.
1170 * @param offLvt The LVT entry offset in the xAPIC page.
1171 * @param uLvt The LVT value to set.
1172 */
1173static VBOXSTRICTRC apicSetLvtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1174{
1175#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1176 VMCPU_ASSERT_EMT(pVCpu);
1177 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1178 || offLvt == XAPIC_OFF_LVT_THERMAL
1179 || offLvt == XAPIC_OFF_LVT_PERF
1180 || offLvt == XAPIC_OFF_LVT_LINT0
1181 || offLvt == XAPIC_OFF_LVT_LINT1
1182 || offLvt == XAPIC_OFF_LVT_ERROR,
1183 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#x, uLvt=%#x\n", pVCpu->idCpu, offLvt, uLvt));
1184
1185 /*
1186 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1187 * and raise #GP(0) in x2APIC mode.
1188 */
1189 PCAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
1190 if (offLvt == XAPIC_OFF_LVT_TIMER)
1191 {
1192 if ( !pApic->fSupportsTscDeadline
1193 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1194 {
1195 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1196 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1197 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1198 /** @todo TSC-deadline timer mode transition */
1199 }
1200 }
1201
1202 /*
1203 * Validate rest of the LVT bits.
1204 */
1205 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1206 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1207
1208 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1209 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1210 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1211
1212 uLvt &= g_au32LvtValidMasks[idxLvt];
1213
1214 /*
1215 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1216 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1217 */
1218 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1219 AssertCompile(RT_OFFSETOF(XAPICPAGE, svr) == RT_OFFSETOF(X2APICPAGE, svr));
1220 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1221 uLvt |= XAPIC_LVT_MASK;
1222
1223 /*
1224 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1225 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signaling the
1226 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1227 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1228 * the interrupt for the vector happens to be generated, see APICPostInterrupt().
1229 *
1230 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1231 */
1232 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1233 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1234 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1235
1236 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1237 return VINF_SUCCESS;
1238#else
1239# error "Implement Pentium and P6 family APIC architectures"
1240#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1241}
1242
1243
1244#if 0
1245/**
1246 * Sets an LVT entry in the extended LVT range.
1247 *
1248 * @returns VBox status code.
1249 * @param pVCpu The cross context virtual CPU structure.
1250 * @param offLvt The LVT entry offset in the xAPIC page.
1251 * @param uValue The LVT value to set.
1252 */
1253static int apicSetLvtExtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1254{
1255 VMCPU_ASSERT_EMT(pVCpu);
1256 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#x\n", pVCpu->idCpu, offLvt));
1257
1258 /** @todo support CMCI. */
1259 return VERR_NOT_IMPLEMENTED;
1260}
1261#endif
1262
1263
1264/**
1265 * Hints TM about the APIC timer frequency.
1266 *
1267 * @param pApicCpu The APIC CPU state.
1268 * @thread Any.
1269 */
1270static void apicHintTimerFreq(PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1271{
1272 Assert(pApicCpu);
1273 Assert(TMTimerIsLockOwner(CTX_SUFF(pApicCpu->pTimer)));
1274
1275 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1276 || pApicCpu->uHintedTimerShift != uTimerShift)
1277 {
1278 uint32_t uHz;
1279 if (uInitialCount)
1280 {
1281 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1282 uHz = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer)) / cTicksPerPeriod;
1283 }
1284 else
1285 uHz = 0;
1286
1287 TMTimerSetFrequencyHint(pApicCpu->CTX_SUFF(pTimer), uHz);
1288 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1289 pApicCpu->uHintedTimerShift = uTimerShift;
1290 }
1291}
1292
1293
1294/**
1295 * Reads an APIC register.
1296 *
1297 * @returns VBox status code.
1298 * @param pApicDev The APIC device instance.
1299 * @param pVCpu The cross context virtual CPU structure.
1300 * @param offReg The offset of the register being read.
1301 * @param puValue Where to store the register value.
1302 */
1303static int apicReadRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t *puValue)
1304{
1305 VMCPU_ASSERT_EMT(pVCpu);
1306 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1307
1308 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1309 uint32_t uValue = 0;
1310 int rc = VINF_SUCCESS;
1311 switch (offReg)
1312 {
1313 case XAPIC_OFF_ID:
1314 case XAPIC_OFF_VERSION:
1315 case XAPIC_OFF_TPR:
1316 case XAPIC_OFF_EOI:
1317 case XAPIC_OFF_RRD:
1318 case XAPIC_OFF_LDR:
1319 case XAPIC_OFF_DFR:
1320 case XAPIC_OFF_SVR:
1321 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1322 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1323 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1324 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1325 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1326 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1327 case XAPIC_OFF_ESR:
1328 case XAPIC_OFF_ICR_LO:
1329 case XAPIC_OFF_ICR_HI:
1330 case XAPIC_OFF_LVT_TIMER:
1331#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1332 case XAPIC_OFF_LVT_THERMAL:
1333#endif
1334 case XAPIC_OFF_LVT_PERF:
1335 case XAPIC_OFF_LVT_LINT0:
1336 case XAPIC_OFF_LVT_LINT1:
1337 case XAPIC_OFF_LVT_ERROR:
1338 case XAPIC_OFF_TIMER_ICR:
1339 case XAPIC_OFF_TIMER_DCR:
1340 {
1341 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1342 || ( offReg != XAPIC_OFF_DFR
1343 && offReg != XAPIC_OFF_ICR_HI
1344 && offReg != XAPIC_OFF_EOI));
1345 uValue = apicReadRaw32(pXApicPage, offReg);
1346 break;
1347 }
1348
1349 case XAPIC_OFF_PPR:
1350 {
1351 uValue = apicGetPpr(pVCpu);
1352 break;
1353 }
1354
1355 case XAPIC_OFF_TIMER_CCR:
1356 {
1357 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1358 rc = VBOXSTRICTRC_VAL(apicGetTimerCcr(pVCpu, VINF_IOM_R3_MMIO_READ, &uValue));
1359 break;
1360 }
1361
1362 case XAPIC_OFF_APR:
1363 {
1364#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1365 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1366 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1367#else
1368# error "Implement Pentium and P6 family APIC architectures"
1369#endif
1370 break;
1371 }
1372
1373 default:
1374 {
1375 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1376 rc = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "offReg=%#x Id=%u\n", offReg, pVCpu->idCpu);
1377 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1378 break;
1379 }
1380 }
1381
1382 *puValue = uValue;
1383 return rc;
1384}
1385
1386
1387/**
1388 * Writes an APIC register.
1389 *
1390 * @returns Strict VBox status code.
1391 * @param pApicDev The APIC device instance.
1392 * @param pVCpu The cross context virtual CPU structure.
1393 * @param offReg The offset of the register being written.
1394 * @param uValue The register value.
1395 */
1396static VBOXSTRICTRC apicWriteRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t uValue)
1397{
1398 VMCPU_ASSERT_EMT(pVCpu);
1399 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1400 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1401
1402 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1403 switch (offReg)
1404 {
1405 case XAPIC_OFF_TPR:
1406 {
1407 rcStrict = apicSetTpr(pVCpu, uValue);
1408 break;
1409 }
1410
1411 case XAPIC_OFF_LVT_TIMER:
1412#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1413 case XAPIC_OFF_LVT_THERMAL:
1414#endif
1415 case XAPIC_OFF_LVT_PERF:
1416 case XAPIC_OFF_LVT_LINT0:
1417 case XAPIC_OFF_LVT_LINT1:
1418 case XAPIC_OFF_LVT_ERROR:
1419 {
1420 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1421 break;
1422 }
1423
1424 case XAPIC_OFF_TIMER_ICR:
1425 {
1426 rcStrict = apicSetTimerIcr(pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1427 break;
1428 }
1429
1430 case XAPIC_OFF_EOI:
1431 {
1432 rcStrict = apicSetEoi(pVCpu, uValue);
1433 break;
1434 }
1435
1436 case XAPIC_OFF_LDR:
1437 {
1438 rcStrict = apicSetLdr(pVCpu, uValue);
1439 break;
1440 }
1441
1442 case XAPIC_OFF_DFR:
1443 {
1444 rcStrict = apicSetDfr(pVCpu, uValue);
1445 break;
1446 }
1447
1448 case XAPIC_OFF_SVR:
1449 {
1450 rcStrict = apicSetSvr(pVCpu, uValue);
1451 break;
1452 }
1453
1454 case XAPIC_OFF_ICR_LO:
1455 {
1456 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE);
1457 break;
1458 }
1459
1460 case XAPIC_OFF_ICR_HI:
1461 {
1462 rcStrict = apicSetIcrHi(pVCpu, uValue);
1463 break;
1464 }
1465
1466 case XAPIC_OFF_TIMER_DCR:
1467 {
1468 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1469 break;
1470 }
1471
1472 case XAPIC_OFF_ESR:
1473 {
1474 rcStrict = apicSetEsr(pVCpu, uValue);
1475 break;
1476 }
1477
1478 case XAPIC_OFF_APR:
1479 case XAPIC_OFF_RRD:
1480 {
1481#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1482 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1483#else
1484# error "Implement Pentium and P6 family APIC architectures"
1485#endif
1486 break;
1487 }
1488
1489 /* Unavailable/reserved in xAPIC mode: */
1490 case X2APIC_OFF_SELF_IPI:
1491 /* Read-only registers: */
1492 case XAPIC_OFF_ID:
1493 case XAPIC_OFF_VERSION:
1494 case XAPIC_OFF_PPR:
1495 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1496 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1497 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1498 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1499 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1500 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1501 case XAPIC_OFF_TIMER_CCR:
1502 default:
1503 {
1504 rcStrict = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "APIC%u: offReg=%#x\n", pVCpu->idCpu, offReg);
1505 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1506 break;
1507 }
1508 }
1509
1510 return rcStrict;
1511}
1512
1513
1514/**
1515 * @interface_method_impl{PDMAPICREG,pfnReadMsrR3}
1516 */
1517VMMDECL(VBOXSTRICTRC) APICReadMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1518{
1519 /*
1520 * Validate.
1521 */
1522 VMCPU_ASSERT_EMT(pVCpu);
1523 Assert(u32Reg >= MSR_IA32_X2APIC_START && u32Reg <= MSR_IA32_X2APIC_END);
1524 Assert(pu64Value);
1525
1526 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1527 if (pApic->fRZEnabled)
1528 { /* likely */}
1529 else
1530 return VINF_CPUM_R3_MSR_READ;
1531
1532 STAM_COUNTER_INC(&VMCPU_TO_APICCPU(pVCpu)->StatMsrWrite);
1533
1534 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1535 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1536 {
1537 switch (u32Reg)
1538 {
1539 /* Special handling for x2APIC: */
1540 case MSR_IA32_X2APIC_ICR:
1541 {
1542 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1543 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1544 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1545 *pu64Value = RT_MAKE_U64(uLo, uHi);
1546 break;
1547 }
1548
1549 /* Special handling, compatible with xAPIC: */
1550 case MSR_IA32_X2APIC_TIMER_CCR:
1551 {
1552 uint32_t uValue;
1553 rcStrict = apicGetTimerCcr(pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1554 *pu64Value = uValue;
1555 break;
1556 }
1557
1558 /* Special handling, compatible with xAPIC: */
1559 case MSR_IA32_X2APIC_PPR:
1560 {
1561 *pu64Value = apicGetPpr(pVCpu);
1562 break;
1563 }
1564
1565 /* Raw read, compatible with xAPIC: */
1566 case MSR_IA32_X2APIC_ID:
1567 case MSR_IA32_X2APIC_VERSION:
1568 case MSR_IA32_X2APIC_TPR:
1569 case MSR_IA32_X2APIC_LDR:
1570 case MSR_IA32_X2APIC_SVR:
1571 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1572 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1573 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1574 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1575 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1576 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1577 case MSR_IA32_X2APIC_ESR:
1578 case MSR_IA32_X2APIC_LVT_TIMER:
1579 case MSR_IA32_X2APIC_LVT_THERMAL:
1580 case MSR_IA32_X2APIC_LVT_PERF:
1581 case MSR_IA32_X2APIC_LVT_LINT0:
1582 case MSR_IA32_X2APIC_LVT_LINT1:
1583 case MSR_IA32_X2APIC_LVT_ERROR:
1584 case MSR_IA32_X2APIC_TIMER_ICR:
1585 case MSR_IA32_X2APIC_TIMER_DCR:
1586 {
1587 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1588 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1589 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1590 break;
1591 }
1592
1593 /* Write-only MSRs: */
1594 case MSR_IA32_X2APIC_SELF_IPI:
1595 case MSR_IA32_X2APIC_EOI:
1596 {
1597 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1598 break;
1599 }
1600
1601 /* Reserved MSRs: */
1602 case MSR_IA32_X2APIC_LVT_CMCI:
1603 default:
1604 {
1605 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1606 break;
1607 }
1608 }
1609 }
1610 else
1611 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1612
1613 return rcStrict;
1614}
1615
1616
1617/**
1618 * @interface_method_impl{PDMAPICREG,pfnWriteMsrR3}
1619 */
1620VMMDECL(VBOXSTRICTRC) APICWriteMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t u64Value)
1621{
1622 /*
1623 * Validate.
1624 */
1625 VMCPU_ASSERT_EMT(pVCpu);
1626 Assert(u32Reg >= MSR_IA32_X2APIC_START && u32Reg <= MSR_IA32_X2APIC_END);
1627
1628 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1629 if (pApic->fRZEnabled)
1630 { /* likely */ }
1631 else
1632 return VINF_CPUM_R3_MSR_WRITE;
1633
1634 STAM_COUNTER_INC(&VMCPU_TO_APICCPU(pVCpu)->StatMsrWrite);
1635
1636 /*
1637 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
1638 * accesses where they are ignored. Hence, we need to validate each register before
1639 * invoking the generic/xAPIC write functions.
1640 *
1641 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
1642 * case first and handle validating the remaining bits on a per-register basis.
1643 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
1644 */
1645 if ( u32Reg != MSR_IA32_X2APIC_ICR
1646 && RT_HI_U32(u64Value))
1647 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
1648
1649 uint32_t u32Value = RT_LO_U32(u64Value);
1650 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1651 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1652 {
1653 switch (u32Reg)
1654 {
1655 case MSR_IA32_X2APIC_TPR:
1656 {
1657 rcStrict = apicSetTpr(pVCpu, u32Value);
1658 break;
1659 }
1660
1661 case MSR_IA32_X2APIC_ICR:
1662 {
1663 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
1664 break;
1665 }
1666
1667 case MSR_IA32_X2APIC_SVR:
1668 {
1669 rcStrict = apicSetSvr(pVCpu, u32Value);
1670 break;
1671 }
1672
1673 case MSR_IA32_X2APIC_ESR:
1674 {
1675 rcStrict = apicSetEsr(pVCpu, u32Value);
1676 break;
1677 }
1678
1679 case MSR_IA32_X2APIC_TIMER_DCR:
1680 {
1681 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
1682 break;
1683 }
1684
1685 case MSR_IA32_X2APIC_LVT_TIMER:
1686 case MSR_IA32_X2APIC_LVT_THERMAL:
1687 case MSR_IA32_X2APIC_LVT_PERF:
1688 case MSR_IA32_X2APIC_LVT_LINT0:
1689 case MSR_IA32_X2APIC_LVT_LINT1:
1690 case MSR_IA32_X2APIC_LVT_ERROR:
1691 {
1692 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
1693 break;
1694 }
1695
1696 case MSR_IA32_X2APIC_TIMER_ICR:
1697 {
1698 rcStrict = apicSetTimerIcr(pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
1699 break;
1700 }
1701
1702 /* Write-only MSRs: */
1703 case MSR_IA32_X2APIC_SELF_IPI:
1704 {
1705 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
1706 APICPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE);
1707 rcStrict = VINF_SUCCESS;
1708 break;
1709 }
1710
1711 case MSR_IA32_X2APIC_EOI:
1712 {
1713 rcStrict = apicSetEoi(pVCpu, u32Value);
1714 break;
1715 }
1716
1717 /* Read-only MSRs: */
1718 case MSR_IA32_X2APIC_ID:
1719 case MSR_IA32_X2APIC_VERSION:
1720 case MSR_IA32_X2APIC_PPR:
1721 case MSR_IA32_X2APIC_LDR:
1722 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1723 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1724 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1725 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1726 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1727 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1728 case MSR_IA32_X2APIC_TIMER_CCR:
1729 {
1730 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
1731 break;
1732 }
1733
1734 /* Reserved MSRs: */
1735 case MSR_IA32_X2APIC_LVT_CMCI:
1736 default:
1737 {
1738 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
1739 break;
1740 }
1741 }
1742 }
1743 else
1744 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
1745
1746 return rcStrict;
1747}
1748
1749
1750/**
1751 * @interface_method_impl{PDMAPICREG,pfnSetBaseMsrR3}
1752 */
1753VMMDECL(VBOXSTRICTRC) APICSetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint64_t uBase)
1754{
1755 Assert(pVCpu);
1756 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1757 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1758 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
1759 APICMODE enmNewMode = apicGetMode(uBase);
1760 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
1761
1762 /** @todo probably go back to ring-3 for all cases regardless of
1763 * fRZEnabled. Writing this MSR is not something guests
1764 * typically do often, and therefore is not performance
1765 * critical. We'll have better diagnostics in ring-3. */
1766 if (!pApic->fRZEnabled)
1767 return VINF_CPUM_R3_MSR_WRITE;
1768
1769 /*
1770 * We do not support re-mapping the APIC base address because:
1771 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
1772 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
1773 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
1774 * region remains mapped but doesn't belong to the called VCPU's APIC).
1775 */
1776 /** @todo Handle per-VCPU APIC base relocation. */
1777 if (MSR_APICBASE_PHYSADDR(uBaseMsr) != XAPIC_APICBASE_PHYSADDR)
1778 {
1779#ifdef IN_RING3
1780 LogRelMax(5, ("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
1781 MSR_APICBASE_PHYSADDR(uBaseMsr)));
1782 return VERR_CPUM_RAISE_GP_0;
1783#else
1784 return VINF_CPUM_R3_MSR_WRITE;
1785#endif
1786 }
1787
1788 /*
1789 * Act on state transition.
1790 */
1791 /** @todo We need to update the CPUID according to the state, which we
1792 * currently don't do as CPUMSetGuestCpuIdFeature() is setting
1793 * per-VM CPUID bits while we need per-VCPU specific bits. */
1794 if (enmNewMode != enmOldMode)
1795 {
1796 switch (enmNewMode)
1797 {
1798 case APICMODE_DISABLED:
1799 {
1800#ifdef IN_RING3
1801 /*
1802 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
1803 * is different). We can start with a clean slate identical to the state after a power-up/reset.
1804 *
1805 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
1806 */
1807 APICR3Reset(pVCpu);
1808 uBaseMsr &= ~(MSR_APICBASE_XAPIC_ENABLE_BIT | MSR_APICBASE_X2APIC_ENABLE_BIT);
1809#else
1810 return VINF_CPUM_R3_MSR_WRITE;
1811#endif
1812 break;
1813 }
1814
1815 case APICMODE_XAPIC:
1816 {
1817 if (enmOldMode != APICMODE_DISABLED)
1818 {
1819 Log(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
1820 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
1821 }
1822 uBaseMsr |= MSR_APICBASE_XAPIC_ENABLE_BIT;
1823 break;
1824 }
1825
1826 case APICMODE_X2APIC:
1827 {
1828 uBaseMsr |= MSR_APICBASE_X2APIC_ENABLE_BIT;
1829
1830 /*
1831 * The APIC ID needs updating when entering x2APIC mode.
1832 * Software written APIC ID in xAPIC mode isn't preseved.
1833 * The APIC ID becomes read-only to software in x2APIC mode.
1834 *
1835 * See Intel spec. 10.12.5.1 "x2APIC States".
1836 */
1837 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1838 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
1839 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
1840
1841 /*
1842 * LDR initialization occurs when entering x2APIC mode.
1843 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
1844 */
1845 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
1846 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
1847 break;
1848 }
1849
1850 case APICMODE_INVALID:
1851 default:
1852 {
1853 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
1854 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
1855 }
1856 }
1857 }
1858
1859 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
1860 return VINF_SUCCESS;
1861}
1862
1863
1864/**
1865 * @interface_method_impl{PDMAPICREG,pfnGetBaseMsrR3}
1866 */
1867VMMDECL(uint64_t) APICGetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
1868{
1869 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
1870
1871 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1872 return pApicCpu->uApicBaseMsr;
1873}
1874
1875
1876/**
1877 * @interface_method_impl{PDMAPICREG,pfnSetTprR3}
1878 */
1879VMMDECL(void) APICSetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Tpr)
1880{
1881 apicSetTpr(pVCpu, u8Tpr);
1882}
1883
1884
1885/**
1886 * @interface_method_impl{PDMAPICREG,pfnGetTprR3}
1887 */
1888VMMDECL(uint8_t) APICGetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
1889{
1890 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
1891 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1892 return pXApicPage->tpr.u8Tpr;
1893}
1894
1895
1896/**
1897 * @interface_method_impl{PDMAPICREG,pfnGetTimerFreqR3}
1898 */
1899VMMDECL(uint64_t) APICGetTimerFreq(PPDMDEVINS pDevIns)
1900{
1901 PVM pVM = PDMDevHlpGetVM(pDevIns);
1902 PVMCPU pVCpu = &pVM->aCpus[0];
1903 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1904 uint64_t uTimer = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer));
1905 return uTimer;
1906}
1907
1908
1909/**
1910 * @interface_method_impl{PDMAPICREG,pfnBusDeliverR3}
1911 * @remarks This is a private interface between the IOAPIC and the APIC.
1912 */
1913VMMDECL(int) APICBusDeliver(PPDMDEVINS pDevIns, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
1914 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uTagSrc)
1915{
1916 NOREF(uPolarity);
1917 PVM pVM = PDMDevHlpGetVM(pDevIns);
1918
1919 /*
1920 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
1921 * Hence, the broadcast mask is 0xff.
1922 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
1923 */
1924 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
1925 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
1926 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
1927 uint32_t fDestMask = uDest;
1928 uint32_t fBroadcastMask = UINT32_C(0xff);
1929
1930 VMCPUSET DestCpuSet;
1931 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1932 VBOXSTRICTRC rcStrict = apicSendIntr(NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet, VINF_SUCCESS);
1933 return VBOXSTRICTRC_VAL(rcStrict);
1934}
1935
1936
1937/**
1938 * @interface_method_impl{PDMAPICREG,pfnLocalInterruptR3}
1939 * @remarks This is a private interface between the PIC and the APIC.
1940 */
1941VMMDECL(VBOXSTRICTRC) APICLocalInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
1942{
1943 NOREF(pDevIns);
1944 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
1945 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
1946
1947 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1948 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1949
1950 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
1951 if ( apicIsEnabled(pVCpu)
1952 && pXApicPage->svr.u.fApicSoftwareEnable)
1953 {
1954 /* Pick the LVT entry corresponding to the interrupt pin. */
1955 static const uint16_t s_au16LvtOffsets[] =
1956 {
1957 XAPIC_OFF_LVT_LINT0,
1958 XAPIC_OFF_LVT_LINT1
1959 };
1960 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
1961 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
1962 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
1963
1964 /* If software hasn't masked the interrupt in the LVT entry, proceed with interrupt processing. */
1965 if (!XAPIC_LVT_IS_MASKED(uLvt))
1966 {
1967 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
1968 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
1969
1970 switch (enmDeliveryMode)
1971 {
1972 case XAPICDELIVERYMODE_FIXED:
1973 {
1974 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
1975 if (offLvt == XAPIC_OFF_LVT_LINT1)
1976 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
1977 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
1978 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
1979 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
1980 means. */
1981 /* fallthru */
1982 }
1983 case XAPICDELIVERYMODE_SMI:
1984 case XAPICDELIVERYMODE_NMI:
1985 case XAPICDELIVERYMODE_INIT: /** @todo won't work in R0/RC because callers don't care about rcRZ. */
1986 case XAPICDELIVERYMODE_EXTINT:
1987 {
1988 VMCPUSET DestCpuSet;
1989 VMCPUSET_EMPTY(&DestCpuSet);
1990 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1991 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
1992 rcStrict = apicSendIntr(pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet, rcRZ);
1993 break;
1994 }
1995
1996 /* Reserved/unknown delivery modes: */
1997 case XAPICDELIVERYMODE_LOWEST_PRIO:
1998 case XAPICDELIVERYMODE_STARTUP:
1999 default:
2000 {
2001 rcStrict = VERR_INTERNAL_ERROR_3;
2002 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x on LINT%d\n", pVCpu->idCpu,
2003 enmDeliveryMode, u8Pin));
2004 break;
2005 }
2006 }
2007 }
2008 }
2009 else
2010 {
2011 /* If the APIC is disabled, pass it through the CPU. */
2012 if (u8Level)
2013 APICSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2014 else
2015 APICClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2016 }
2017
2018 return rcStrict;
2019}
2020
2021
2022/**
2023 * @interface_method_impl{PDMAPICREG,pfnHasPendingIrqR3}
2024 */
2025VMMDECL(bool) APICHasPendingIrq(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t *pu8PendingIrq)
2026{
2027 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIrq);
2028}
2029
2030
2031/**
2032 * @interface_method_impl{PDMAPICREG,pfnGetInterruptR3}
2033 */
2034VMMDECL(int) APICGetInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t *puTagSrc)
2035{
2036 VMCPU_ASSERT_EMT(pVCpu);
2037
2038 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2039 if ( apicIsEnabled(pVCpu)
2040 && pXApicPage->svr.u.fApicSoftwareEnable)
2041 {
2042 int const irrv = apicGetLastSetBit(&pXApicPage->irr, -1);
2043 if (irrv >= 0)
2044 {
2045 Assert(irrv <= (int)UINT8_MAX);
2046 uint8_t const uVector = irrv;
2047
2048 /** @todo this cannot possibly happen for anything other than ExtINT
2049 * interrupts right? */
2050 uint8_t uTpr = pXApicPage->tpr.u8Tpr;
2051 if (uTpr > 0 && uVector <= uTpr)
2052 {
2053 *puTagSrc = 0;
2054 return pXApicPage->svr.u.u8SpuriousVector;
2055 }
2056
2057 apicClearVectorInReg(&pXApicPage->irr, uVector);
2058 apicSetVectorInReg(&pXApicPage->isr, uVector);
2059 apicUpdatePpr(pVCpu);
2060
2061 /** @todo Signal next interrupt? Most likely not as
2062 * APICUpdatePendingInterrupts() will be called before next VM-entry. */
2063 return uVector;
2064 }
2065 }
2066 /** @todo */
2067
2068 return -1;
2069}
2070
2071
2072/**
2073 * @callback_method_impl{FNIOMMMIOREAD}
2074 */
2075VMMDECL(int) APICReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
2076{
2077 Assert(!(GCPhysAddr & 0xf));
2078 Assert(cb == 4);
2079
2080 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2081 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2082 uint16_t offReg = (GCPhysAddr & 0xff0);
2083 uint32_t uValue = 0;
2084#ifdef VBOX_WITH_STATISTICS
2085 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2086 STAM_COUNTER_INC(&CTXSUFF(pApicCpu->StatMmioRead));
2087#endif
2088 int rc = apicReadRegister(pApicDev, pVCpu, offReg, &uValue);
2089 *(uint32_t *)pv = uValue;
2090 return rc;
2091}
2092
2093
2094/**
2095 * @callback_method_impl{FNIOMMMIOWRITE}
2096 */
2097VMMDECL(int) APICWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
2098{
2099 Assert(!(GCPhysAddr & 0xf));
2100 Assert(cb == 4);
2101
2102 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2103 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2104 uint16_t offReg = (GCPhysAddr & 0xff0);
2105 uint32_t uValue = *(uint32_t *)pv;
2106#ifdef VBOX_WITH_STATISTICS
2107 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2108 STAM_COUNTER_INC(&CTXSUFF(pApicCpu->StatMmioWrite));
2109#endif
2110 int rc = VBOXSTRICTRC_VAL(apicWriteRegister(pApicDev, pVCpu, offReg, uValue));
2111 return rc;
2112}
2113
2114
2115/**
2116 * Sets the interrupt pending force-flag and pokes the EMT if required.
2117 *
2118 * @param pVCpu The cross context virtual CPU structure.
2119 * @param enmType The IRQ type.
2120 */
2121VMMDECL(void) APICSetInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2122{
2123 PVM pVM = CTX_SUFF(pVCpu->pVM);
2124 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2125 CTX_SUFF(pApicDev->pApicHlp)->pfnSetInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2126}
2127
2128
2129/**
2130 * Clears the interrupt pending force-flag.
2131 *
2132 * @param pVCpu The cross context virtual CPU structure.
2133 * @param enmType The IRQ type.
2134 */
2135VMMDECL(void) APICClearInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2136{
2137 PVM pVM = CTX_SUFF(pVCpu->pVM);
2138 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2139 CTX_SUFF(pApicDev->pApicHlp)->pfnClearInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2140}
2141
2142
2143/**
2144 * Posts an interrupt to a target APIC.
2145 *
2146 * This function handles interrupts received from the system bus or
2147 * interrupts generated locally from the LVT or via a self IPI.
2148 *
2149 * Don't use this function to try and deliver ExtINT style interrupts.
2150 *
2151 * @param pVCpu The cross context virtual CPU structure.
2152 * @param uVector The vector of the interrupt to be posted.
2153 * @param enmTriggerMode The trigger mode of the interrupt.
2154 *
2155 * @thread Any.
2156 */
2157VMM_INT_DECL(void) APICPostInterrupt(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode)
2158{
2159 Assert(pVCpu);
2160
2161 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2162 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2163 if (apicIsEnabled(pVCpu)) /* PAV */
2164 {
2165 /* Validate the vector. See Intel spec. 10.5.2 "Valid Interrupt Vectors". */
2166 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
2167 {
2168 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2169 {
2170 apicSetVectorInPib(CTX_SUFF(pApicCpu->pvApicPib), uVector);
2171 bool const fAlreadySet = apicSetNotificationBitInPib(CTX_SUFF(pApicCpu->pvApicPib));
2172 if (fAlreadySet)
2173 return;
2174
2175 if (pApic->fPostedIntrsEnabled)
2176 { /** @todo posted-interrupt call to hardware */ }
2177 else
2178 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
2179 }
2180 else
2181 {
2182 /*
2183 * Level-triggered interrupts requires updating of the TMR and thus cannot be
2184 * delivered asynchronously.
2185 */
2186 apicSetVectorInPib(&pApicCpu->ApicPibLevel.aVectorBitmap[0], uVector);
2187 bool const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel.aVectorBitmap[0]);
2188 if (fAlreadySet)
2189 return;
2190
2191 APICSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
2192 }
2193 }
2194 else
2195 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
2196 }
2197}
2198
2199
2200/**
2201 * Starts the APIC timer.
2202 *
2203 * @param pApicCpu The APIC CPU state.
2204 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
2205 * 0.
2206 * @thread Any.
2207 */
2208VMM_INT_DECL(void) APICStartTimer(PAPICCPU pApicCpu, uint32_t uInitialCount)
2209{
2210 Assert(pApicCpu);
2211 Assert(TMTimerIsLockOwner(CTX_SUFF(pApicCpu->pTimer)));
2212 Assert(uInitialCount > 0);
2213
2214 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
2215 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
2216 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
2217
2218 /*
2219 * The assumption here is that the timer doesn't tick during this call
2220 * and thus setting a relative time to fire next is accurate. The advantage
2221 * however is updating u64TimerInitial 'atomically' while setting the next
2222 * tick.
2223 */
2224 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
2225 TMTimerSetRelative(pTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
2226 apicHintTimerFreq(pApicCpu, uInitialCount, uTimerShift);
2227}
2228
2229
2230/**
2231 * Stops the APIC timer.
2232 *
2233 * @param pApicCpu The APIC CPU state.
2234 * @thread Any.
2235 */
2236VMM_INT_DECL(void) APICStopTimer(PAPICCPU pApicCpu)
2237{
2238 Assert(pApicCpu);
2239 Assert(TMTimerIsLockOwner(CTX_SUFF(pApicCpu->pTimer)));
2240
2241 PTMTIMER pTimer = CTX_SUFF(pApicCpu->pTimer);
2242 TMTimerStop(pTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
2243 pApicCpu->uHintedTimerInitialCount = 0;
2244 pApicCpu->uHintedTimerShift = 0;
2245}
2246
2247
2248/**
2249 * Updates the CPUID bits necessary for the given APIC mode.
2250 *
2251 * @param pVM The cross context VM structure.
2252 * @param enmMode The APIC mode.
2253 */
2254VMM_INT_DECL(void) APICUpdateCpuIdForMode(PVM pVM, APICMODE enmMode)
2255{
2256 /* The CPUID bits being updated to reflect the current state is a bit vague. See @bugref{8245#c32}. */
2257 /** @todo This needs to be done on a per-VCPU basis! */
2258 switch (enmMode)
2259 {
2260 case APICMODE_DISABLED:
2261 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
2262 break;
2263
2264 case APICMODE_XAPIC:
2265 case APICMODE_X2APIC:
2266 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_APIC);
2267 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);
2268 break;
2269
2270 default:
2271 AssertMsgFailed(("Invalid APIC mode: %d\n", (int)enmMode));
2272 break;
2273 }
2274}
2275
2276
2277/**
2278 * Queues a pending interrupt as in-service.
2279 *
2280 * This function should only be needed without virtualized APIC
2281 * registers. With virtualized APIC registers, it's sufficient to keep
2282 * the interrupts pending in the IRR as the hardware takes care of
2283 * virtual interrupt delivery.
2284 *
2285 * @returns true if the interrupt was queued to in-service interrupts,
2286 * false otherwise.
2287 * @param pVCpu The cross context virtual CPU structure.
2288 * @param u8PendingIntr The pending interrupt to queue as
2289 * in-service.
2290 */
2291VMMDECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2292{
2293 VMCPU_ASSERT_EMT(pVCpu);
2294
2295 PAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
2296 Assert(!pApic->fVirtApicRegsEnabled);
2297
2298 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2299 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
2300 if (fIsPending)
2301 {
2302 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
2303 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
2304 apicUpdatePpr(pVCpu);
2305 return true;
2306 }
2307 return false;
2308}
2309
2310
2311/**
2312 * Dequeues a pending interrupt from in-service.
2313 *
2314 * This undoes APICQueueInterruptToService() for premature VM-exits before event
2315 * injection.
2316 *
2317 * @param pVCpu The cross context virtual CPU structure.
2318 * @param u8PendingIntr The pending interrupt to dequeue from
2319 * in-service.
2320 */
2321VMMDECL(void) APICDequeueInterruptFromService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2322{
2323 VMCPU_ASSERT_EMT(pVCpu);
2324
2325 PAPIC pApic = VM_TO_APIC(CTX_SUFF(pVCpu->pVM));
2326 Assert(!pApic->fVirtApicRegsEnabled);
2327
2328 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2329 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
2330 if (fInService)
2331 {
2332 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
2333 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
2334 apicUpdatePpr(pVCpu);
2335 }
2336}
2337
2338
2339/**
2340 * Updates pending interrupts from the pending interrupt bitmap to the IRR.
2341 *
2342 * @param pVCpu The cross context virtual CPU structure.
2343 */
2344VMMDECL(void) APICUpdatePendingInterrupts(PVMCPU pVCpu)
2345{
2346 VMCPU_ASSERT_EMT(pVCpu);
2347
2348 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2349 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2350 for (;;)
2351 {
2352 bool const fAlreadySet = apicClearNotificationBitInPib(CTX_SUFF(pApicCpu->pvApicPib));
2353 if (!fAlreadySet)
2354 break;
2355
2356 PAPICPIB pPib = (PAPICPIB)CTX_SUFF(pApicCpu->pvApicPib);
2357 for (size_t i = 0; i < RT_ELEMENTS(pPib->aVectorBitmap); i++)
2358 {
2359 uint32_t const uFragment = ASMAtomicXchgU32(&pPib->aVectorBitmap[i], 0);
2360 if (uFragment)
2361 apicOrVectorsToReg(&pXApicPage->irr, i, uFragment);
2362 }
2363 }
2364}
2365
2366
2367/**
2368 * Gets the highest priority pending interrupt.
2369 *
2370 * @returns true if any interrupt is pending, false otehrwise.
2371 * @param pVCpu The cross context virtual CPU structure.
2372 * @param pu8PendingIntr Where to store the interrupt vector if the
2373 * interrupt is pending.
2374 */
2375VMMDECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2376{
2377 VMCPU_ASSERT_EMT(pVCpu);
2378 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2379}
2380
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette