VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GICAll.cpp@ 109021

Last change on this file since 109021 was 109021, checked in by vboxsync, 3 weeks ago

VMM/GIC: bugref:10877 GIC ITS, work-in-progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 145.1 KB
Line 
1/* $Id: GICAll.cpp 109021 2025-04-18 08:56:15Z vboxsync $ */
2/** @file
3 * GIC - Generic Interrupt Controller Architecture (GIC) - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_gic GIC - Generic Interrupt Controller
29 *
30 * The GIC is an interrupt controller device that lives in VMM but also registers
31 * itself with PDM similar to the APIC. The reason for this is needs to access
32 * per-VCPU data and is an integral part of any ARMv8 VM.
33 *
34 * The GIC is made up of 3 main components:
35 * - Distributor
36 * - Redistributor
37 * - Interrupt Translation Service (ITS)
38 *
39 * The distributor is per-VM while the redistributors are per-VCPU. PEs (Processing
40 * Elements) and CIs (CPU Interfaces) correspond to VCPUs. The distributor and
41 * redistributor each have their memory mapped I/O regions. The redistributor is
42 * accessible via CPU system registers as well. The distributor and redistributor
43 * code lives in GICAll.cpp and GICR3.cpp.
44 *
45 * The ITS is the interrupt translation service component of the GIC and its
46 * presence is optional. It provides MSI support along with routing interrupt
47 * sources to specific PEs. The ITS is only accessible via its memory mapped I/O
48 * region. When the MMIO handle for the its region is NIL_IOMMMIOHANDLE it's
49 * considered to be disabled for the VM. Most of the ITS code lives in GITSAll.cpp.
50 *
51 * This implementation only targets GICv3. This implementation does not support
52 * dual security states, nor does it support exception levels (EL2, EL3). Earlier
53 * versions are considered legacy and not important enough to be emulated.
54 * GICv4 primarily adds support for virtualizing the GIC and its necessity will be
55 * evaluated in the future if/when there is support for nested virtualization on
56 * ARMv8 hosts.
57 */
58
59
60/*********************************************************************************************************************************
61* Header Files *
62*********************************************************************************************************************************/
63#define LOG_GROUP LOG_GROUP_DEV_GIC
64#include "GICInternal.h"
65#include <VBox/vmm/pdmgic.h>
66#include <VBox/vmm/pdmdev.h>
67#include <VBox/vmm/pdmapi.h>
68#include <VBox/vmm/vmcc.h>
69#include <VBox/vmm/vmm.h>
70#include <VBox/vmm/vmcpuset.h>
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76#define GIC_IDLE_PRIORITY 0xff
77#define GIC_IS_INTR_SGI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SGI_START < GIC_INTID_SGI_RANGE_SIZE)
78#define GIC_IS_INTR_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_PPI_START < GIC_INTID_PPI_RANGE_SIZE)
79#define GIC_IS_INTR_SGI_OR_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SGI_START < GIC_INTID_PPI_RANGE_SIZE)
80#define GIC_IS_INTR_SPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SPI_START < GIC_INTID_SPI_RANGE_SIZE)
81#define GIC_IS_INTR_SPECIAL(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SPECIAL_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
82#define GIC_IS_INTR_EXT_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
83#define GIC_IS_INTR_EXT_SPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
84#define GIC_IS_REG_IN_RANGE(a_offReg, a_offFirst, a_cbRegion) ((uint32_t)(a_offReg) - (a_offFirst) < (a_cbRegion))
85
86
87#ifdef LOG_ENABLED
88/**
89 * Gets the description of a CPU interface register.
90 *
91 * @returns The description.
92 * @param u32Reg The CPU interface register offset.
93 */
94static const char *gicIccGetRegDescription(uint32_t u32Reg)
95{
96 switch (u32Reg)
97 {
98#define GIC_ICC_REG_CASE(a_Reg) case ARMV8_AARCH64_SYSREG_ ## a_Reg: return #a_Reg
99 GIC_ICC_REG_CASE(ICC_PMR_EL1);
100 GIC_ICC_REG_CASE(ICC_IAR0_EL1);
101 GIC_ICC_REG_CASE(ICC_EOIR0_EL1);
102 GIC_ICC_REG_CASE(ICC_HPPIR0_EL1);
103 GIC_ICC_REG_CASE(ICC_BPR0_EL1);
104 GIC_ICC_REG_CASE(ICC_AP0R0_EL1);
105 GIC_ICC_REG_CASE(ICC_AP0R1_EL1);
106 GIC_ICC_REG_CASE(ICC_AP0R2_EL1);
107 GIC_ICC_REG_CASE(ICC_AP0R3_EL1);
108 GIC_ICC_REG_CASE(ICC_AP1R0_EL1);
109 GIC_ICC_REG_CASE(ICC_AP1R1_EL1);
110 GIC_ICC_REG_CASE(ICC_AP1R2_EL1);
111 GIC_ICC_REG_CASE(ICC_AP1R3_EL1);
112 GIC_ICC_REG_CASE(ICC_DIR_EL1);
113 GIC_ICC_REG_CASE(ICC_RPR_EL1);
114 GIC_ICC_REG_CASE(ICC_SGI1R_EL1);
115 GIC_ICC_REG_CASE(ICC_ASGI1R_EL1);
116 GIC_ICC_REG_CASE(ICC_SGI0R_EL1);
117 GIC_ICC_REG_CASE(ICC_IAR1_EL1);
118 GIC_ICC_REG_CASE(ICC_EOIR1_EL1);
119 GIC_ICC_REG_CASE(ICC_HPPIR1_EL1);
120 GIC_ICC_REG_CASE(ICC_BPR1_EL1);
121 GIC_ICC_REG_CASE(ICC_CTLR_EL1);
122 GIC_ICC_REG_CASE(ICC_SRE_EL1);
123 GIC_ICC_REG_CASE(ICC_IGRPEN0_EL1);
124 GIC_ICC_REG_CASE(ICC_IGRPEN1_EL1);
125#undef GIC_ICC_REG_CASE
126 default:
127 return "<UNKNOWN>";
128 }
129}
130
131
132/**
133 * Gets the description of a distributor register given it's register offset.
134 *
135 * @returns The register description.
136 * @param offReg The distributor register offset.
137 */
138static const char *gicDistGetRegDescription(uint16_t offReg)
139{
140 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE)) return "GICD_IGROUPRn";
141 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE)) return "GICD_IGROUPRnE";
142 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE)) return "GICD_IROUTERn";
143 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE)) return "GICD_IROUTERnE";
144 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE)) return "GICD_ISENABLERn";
145 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE)) return "GICD_ISENABLERnE";
146 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE)) return "GICD_ICENABLERn";
147 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE)) return "GICD_ICENABLERnE";
148 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE)) return "GICD_ISACTIVERn";
149 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE)) return "GICD_ISACTIVERnE";
150 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE)) return "GICD_ICACTIVERn";
151 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE)) return "GICD_ICACTIVERnE";
152 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE)) return "GICD_IPRIORITYRn";
153 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICD_IPRIORITYRnE";
154 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE)) return "GICD_ISPENDRn";
155 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE)) return "GICD_ISPENDRnE";
156 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE)) return "GICD_ICPENDRn";
157 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE)) return "GICD_ICPENDRnE";
158 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE)) return "GICD_ICFGRn";
159 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE)) return "GICD_ICFGRnE";
160 switch (offReg)
161 {
162 case GIC_DIST_REG_CTLR_OFF: return "GICD_CTLR";
163 case GIC_DIST_REG_TYPER_OFF: return "GICD_TYPER";
164 case GIC_DIST_REG_STATUSR_OFF: return "GICD_STATUSR";
165 case GIC_DIST_REG_ITARGETSRn_OFF_START: return "GICD_ITARGETSRn";
166 case GIC_DIST_REG_IGRPMODRn_OFF_START: return "GICD_IGRPMODRn";
167 case GIC_DIST_REG_NSACRn_OFF_START: return "GICD_NSACRn";
168 case GIC_DIST_REG_SGIR_OFF: return "GICD_SGIR";
169 case GIC_DIST_REG_CPENDSGIRn_OFF_START: return "GICD_CSPENDSGIRn";
170 case GIC_DIST_REG_SPENDSGIRn_OFF_START: return "GICD_SPENDSGIRn";
171 case GIC_DIST_REG_INMIn_OFF_START: return "GICD_INMIn";
172 case GIC_DIST_REG_PIDR2_OFF: return "GICD_PIDR2";
173 case GIC_DIST_REG_IIDR_OFF: return "GICD_IIDR";
174 case GIC_DIST_REG_TYPER2_OFF: return "GICD_TYPER2";
175 default:
176 return "<UNKNOWN>";
177 }
178}
179#endif /* LOG_ENABLED */
180
181
182/**
183 * Gets the description of a redistributor register given it's register offset.
184 *
185 * @returns The register description.
186 * @param offReg The redistributor register offset.
187 */
188static const char *gicReDistGetRegDescription(uint16_t offReg)
189{
190 switch (offReg)
191 {
192 case GIC_REDIST_REG_CTLR_OFF: return "GICR_CTLR";
193 case GIC_REDIST_REG_IIDR_OFF: return "GICR_IIDR";
194 case GIC_REDIST_REG_TYPER_OFF: return "GICR_TYPER";
195 case GIC_REDIST_REG_TYPER_AFFINITY_OFF: return "GICR_TYPER_AFF";
196 case GIC_REDIST_REG_STATUSR_OFF: return "GICR_STATUSR";
197 case GIC_REDIST_REG_WAKER_OFF: return "GICR_WAKER";
198 case GIC_REDIST_REG_MPAMIDR_OFF: return "GICR_MPAMIDR";
199 case GIC_REDIST_REG_PARTIDR_OFF: return "GICR_PARTIDR";
200 case GIC_REDIST_REG_SETLPIR_OFF: return "GICR_SETLPIR";
201 case GIC_REDIST_REG_CLRLPIR_OFF: return "GICR_CLRLPIR";
202 case GIC_REDIST_REG_PROPBASER_OFF: return "GICR_PROPBASER";
203 case GIC_REDIST_REG_PENDBASER_OFF: return "GICR_PENDBASER";
204 case GIC_REDIST_REG_INVLPIR_OFF: return "GICR_INVLPIR";
205 case GIC_REDIST_REG_INVALLR_OFF: return "GICR_INVALLR";
206 case GIC_REDIST_REG_SYNCR_OFF: return "GICR_SYNCR";
207 case GIC_REDIST_REG_PIDR2_OFF: return "GICR_PIDR2";
208 default:
209 return "<UNKNOWN>";
210 }
211}
212
213
214/**
215 * Gets the description of an SGI/PPI redistributor register given it's register
216 * offset.
217 *
218 * @returns The register description.
219 * @param offReg The redistributor register offset.
220 */
221static const char *gicReDistGetSgiPpiRegDescription(uint16_t offReg)
222{
223 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE)) return "GICR_IGROUPn";
224 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE)) return "GICR_ISENABLERn";
225 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE)) return "GICR_ICENABLERn";
226 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE)) return "GICR_ISACTIVERn";
227 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE)) return "GICR_ICACTIVERn";
228 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE)) return "GICR_ISPENDRn";
229 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE)) return "GICR_ICPENDRn";
230 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICR_IPREIORITYn";
231 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE)) return "GICR_ICFGRn";
232 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_INMIR0_OFF, GIC_REDIST_SGI_PPI_REG_INMIRnE_RANGE_SIZE)) return "GICR_INMIRn";
233 switch (offReg)
234 {
235 case GIC_REDIST_SGI_PPI_REG_NSACR_OFF: return "GICR_NSACR";
236 case GIC_REDIST_SGI_PPI_REG_IGRPMODR0_OFF: return "GICR_IGRPMODR0";
237 case GIC_REDIST_SGI_PPI_REG_IGRPMODR1E_OFF: return "GICR_IGRPMODR1E";
238 case GIC_REDIST_SGI_PPI_REG_IGRPMODR2E_OFF: return "GICR_IGRPMODR2E";
239 default:
240 return "<UNKNOWN>";
241 }
242}
243
244
245/**
246 * Gets the interrupt ID given a distributor interrupt index.
247 *
248 * @returns The interrupt ID.
249 * @param idxIntr The distributor interrupt index.
250 * @remarks A distributor interrupt is an interrupt type that belong in the
251 * distributor (e.g. SPIs, extended SPIs).
252 */
253DECLHIDDEN(uint16_t) gicDistGetIntIdFromIndex(uint16_t idxIntr)
254{
255 /*
256 * Distributor interrupts bits to interrupt ID mapping:
257 * +--------------------------------------------------------+
258 * | Range (incl) | SGI | PPI | SPI | Ext SPI |
259 * |--------------+--------+--------+----------+------------|
260 * | Bit | 0..15 | 16..31 | 32..1023 | 1024..2047 |
261 * | Int Id | 0..15 | 16..31 | 32..1023 | 4096..5119 |
262 * +--------------------------------------------------------+
263 */
264 uint16_t uIntId;
265 /* SGIs, PPIs, SPIs and specials. */
266 if (idxIntr < 1024)
267 uIntId = idxIntr;
268 /* Extended SPIs. */
269 else if (idxIntr < 2048)
270 uIntId = GIC_INTID_RANGE_EXT_SPI_START + idxIntr - 1024;
271 else
272 {
273 uIntId = 0;
274 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
275 }
276 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
277 || GIC_IS_INTR_SPI(uIntId)
278 || GIC_IS_INTR_SPECIAL(uIntId)
279 || GIC_IS_INTR_EXT_SPI(uIntId));
280 return uIntId;
281}
282
283
284/**
285 * Gets the distributor interrupt index given an interrupt ID.
286 *
287 * @returns The distributor interrupt index.
288 * @param uIntId The interrupt ID.
289 * @remarks A distributor interrupt is an interrupt type that belong in the
290 * distributor (e.g. SPIs, extended SPIs).
291 */
292static uint16_t gicDistGetIndexFromIntId(uint16_t uIntId)
293{
294 uint16_t idxIntr;
295 /* SGIs, PPIs, SPIs and specials. */
296 if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
297 idxIntr = uIntId;
298 /* Extended SPIs. */
299 else if (uIntId - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
300 idxIntr = 1024 + uIntId - GIC_INTID_RANGE_EXT_SPI_START;
301 else
302 {
303 idxIntr = 0;
304 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
305 }
306 Assert(idxIntr < sizeof(GICDEV::bmIntrPending) * 8);
307 return idxIntr;
308}
309
310
311/**
312 * Gets the interrupt ID given a redistributor interrupt index.
313 *
314 * @returns The interrupt ID.
315 * @param idxIntr The redistributor interrupt index.
316 * @remarks A redistributor interrupt is an interrupt type that belong in the
317 * redistributor (e.g. SGIs, PPIs, extended PPIs).
318 */
319DECLHIDDEN(uint16_t) gicReDistGetIntIdFromIndex(uint16_t idxIntr)
320{
321 /*
322 * Redistributor interrupts bits to interrupt ID mapping:
323 * +---------------------------------------------+
324 * | Range (incl) | SGI | PPI | Ext PPI |
325 * +---------------------------------------------+
326 * | Bit | 0..15 | 16..31 | 32..95 |
327 * | Int Id | 0..15 | 16..31 | 1056..1119 |
328 * +---------------------------------------------+
329 */
330 uint16_t uIntId;
331 /* SGIs and PPIs. */
332 if (idxIntr < 32)
333 uIntId = idxIntr;
334 /* Extended PPIs. */
335 else if (idxIntr < 96)
336 uIntId = GIC_INTID_RANGE_EXT_PPI_START + idxIntr - 32;
337 else
338 {
339 uIntId = 0;
340 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
341 }
342 Assert(GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId));
343 return uIntId;
344}
345
346
347/**
348 * Gets the redistributor interrupt index given an interrupt ID.
349 *
350 * @returns The interrupt ID.
351 * @param uIntId The interrupt ID.
352 * @remarks A redistributor interrupt is an interrupt type that belong in the
353 * redistributor (e.g. SGIs, PPIs, extended PPIs).
354 */
355static uint16_t gicReDistGetIndexFromIntId(uint16_t uIntId)
356{
357 /* SGIs and PPIs. */
358 uint16_t idxIntr;
359 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
360 idxIntr = uIntId;
361 /* Extended PPIs. */
362 else if (uIntId - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
363 idxIntr = 32 + uIntId - GIC_INTID_RANGE_EXT_PPI_START;
364 else
365 {
366 idxIntr = 0;
367 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
368 }
369 Assert(idxIntr < sizeof(GICCPU::bmIntrPending) * 8);
370 return idxIntr;
371}
372
373
374/**
375 * Sets the interrupt pending force-flag and pokes the EMT if required.
376 *
377 * @param pVCpu The cross context virtual CPU structure.
378 * @param fIrq Flag whether to assert the IRQ line or leave it alone.
379 * @param fFiq Flag whether to assert the FIQ line or leave it alone.
380 */
381static void gicSetInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
382{
383 Assert(fIrq || fFiq);
384 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
385
386#ifdef IN_RING3
387 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
388 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
389#endif
390
391 if (fIrq)
392 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
393 if (fFiq)
394 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
395
396 /*
397 * We need to wake up the target CPU if we're not on EMT.
398 */
399 /** @todo We could just use RTThreadNativeSelf() here, couldn't we? */
400#if defined(IN_RING0)
401# error "Implement me!"
402#elif defined(IN_RING3)
403 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
404 VMCPUID idCpu = pVCpu->idCpu;
405 if (VMMGetCpuId(pVM) != idCpu)
406 {
407 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
408 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
409 }
410#endif
411}
412
413
414/**
415 * Clears the interrupt pending force-flag.
416 *
417 * @param pVCpu The cross context virtual CPU structure.
418 * @param fIrq Flag whether to clear the IRQ flag.
419 * @param fFiq Flag whether to clear the FIQ flag.
420 */
421DECLINLINE(void) gicClearInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
422{
423 Assert(fIrq || fFiq);
424 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
425
426#ifdef IN_RING3
427 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
428 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
429#endif
430
431 if (fIrq)
432 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
433 if (fFiq)
434 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
435}
436
437
438/**
439 * Updates the interrupt force-flag.
440 *
441 * @param pVCpu The cross context virtual CPU structure.
442 * @param fIrq Flag whether to clear the IRQ flag.
443 * @param fFiq Flag whether to clear the FIQ flag.
444 */
445DECLINLINE(void) gicUpdateInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
446{
447 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
448
449 if (fIrq || fFiq)
450 gicSetInterruptFF(pVCpu, fIrq, fFiq);
451
452 if (!fIrq || !fFiq)
453 gicClearInterruptFF(pVCpu, !fIrq, !fFiq);
454}
455
456
457/**
458 * Gets whether the redistributor has pending interrupts with sufficient priority to
459 * be signalled to the PE.
460 *
461 * @param pGicCpu The GIC redistributor and CPU interface state.
462 * @param pfIrq Where to store whether IRQs can be signalled.
463 * @param pfFiq Where to store whether FIQs can be signalled.
464 */
465static void gicReDistHasIrqPending(PCGICCPU pGicCpu, bool *pfIrq, bool *pfFiq)
466{
467 bool const fIsGroup1Enabled = pGicCpu->fIntrGroup1Enabled;
468 bool const fIsGroup0Enabled = pGicCpu->fIntrGroup0Enabled;
469 LogFlowFunc(("fIsGroup0Enabled=%RTbool fIsGroup1Enabled=%RTbool\n", fIsGroup0Enabled, fIsGroup1Enabled));
470
471# if 1
472 uint32_t bmIntrs[3];
473 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
474 {
475 /* Collect interrupts that are pending, enabled and inactive. */
476 bmIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
477
478 /* Discard interrupts if the group they belong to is disabled. */
479 if (!fIsGroup1Enabled)
480 bmIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
481 if (!fIsGroup0Enabled)
482 bmIntrs[i] &= pGicCpu->bmIntrGroup[i];
483 }
484
485 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
486 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
487 AssertCompile(!(cIntrs % 32));
488 if (idxIntr >= 0)
489 {
490 /* Only allow interrupts with higher priority than the current configured and running one. */
491 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
492 do
493 {
494 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
495 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
496 {
497 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
498 bool const fInGroup0 = !fInGroup1;
499 *pfIrq = fInGroup1 && fIsGroup1Enabled;
500 *pfFiq = fInGroup0 && fIsGroup0Enabled;
501 return;
502 }
503 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
504 } while (idxIntr != -1);
505 }
506#else /** @todo Measure and pick the faster version. */
507 /* Only allow interrupts with higher priority than the current configured and running one. */
508 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
509
510 for (uint8_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
511 {
512 /* Collect interrupts that are pending, enabled and inactive. */
513 uint32_t bmIntr = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
514
515 /* Discard interrupts if the group they belong to is disabled. */
516 if (!fIsGroup1Enabled)
517 bmIntr &= ~pGicCpu->bmIntrGroup[i];
518 if (!fIsGroup0Enabled)
519 bmIntr &= pGicCpu->bmIntrGroup[i];
520
521 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
522 uint16_t const idxPending = ASMBitFirstSetU32(bmIntr);
523 if (idxPending > 0)
524 {
525 uint16_t const idxIntr = 32 * i + idxPending - 1;
526 AssertRelease(idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
527 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
528 {
529 AssertRelease(idxIntr < sizeof(pGicCpu->bmIntrGroup) * 8);
530 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
531 bool const fInGroup0 = !fInGroup1;
532 *pfIrq = fInGroup1 && fIsGroup1Enabled;
533 *pfFiq = fInGroup0 && fIsGroup0Enabled;
534 return;
535 }
536 }
537 }
538#endif
539 *pfIrq = false;
540 *pfFiq = false;
541}
542
543
544/**
545 * Gets whether the distributor has pending interrupts with sufficient priority to
546 * be signalled to the PE.
547 *
548 * @param pGicDev The GIC distributor state.
549 * @param pVCpu The cross context virtual CPU structure.
550 * @param idCpu The ID of the virtual CPU.
551 * @param pfIrq Where to store whether there are IRQs can be signalled.
552 * @param pfFiq Where to store whether there are FIQs can be signalled.
553 */
554static void gicDistHasIrqPendingForVCpu(PCGICDEV pGicDev, PCVMCPUCC pVCpu, VMCPUID idCpu, bool *pfIrq, bool *pfFiq)
555{
556 bool const fIsGroup1Enabled = pGicDev->fIntrGroup1Enabled;
557 bool const fIsGroup0Enabled = pGicDev->fIntrGroup0Enabled;
558 LogFlowFunc(("fIsGroup1Enabled=%RTbool fIsGroup0Enabled=%RTbool\n", fIsGroup1Enabled, fIsGroup0Enabled));
559
560#if 1
561 uint32_t bmIntrs[64];
562 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
563 {
564 /* Collect interrupts that are pending, enabled and inactive. */
565 bmIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
566
567 /* Discard interrupts if the group they belong to is disabled. */
568 if (!fIsGroup1Enabled)
569 bmIntrs[i] &= ~pGicDev->bmIntrGroup[i];
570 if (!fIsGroup0Enabled)
571 bmIntrs[i] &= pGicDev->bmIntrGroup[i];
572 }
573
574 /*
575 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
576 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
577 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
578 * routing is enabled (which it currently is always enabled in our implementation).
579 */
580 Assert(pGicDev->fAffRoutingEnabled);
581 Assert(bmIntrs[0] == 0);
582 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
583 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
584 AssertCompile(!(cIntrs % 32));
585 if (idxIntr >= 0)
586 {
587 /* Only allow interrupts with higher priority than the current configured and running one. */
588 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
589 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
590 do
591 {
592 AssertCompile(RT_ELEMENTS(pGicDev->abIntrPriority) == RT_ELEMENTS(pGicDev->au32IntrRouting));
593 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
594 Assert(idxIntr < GIC_INTID_RANGE_SPECIAL_START || idxIntr > GIC_INTID_RANGE_SPECIAL_LAST);
595 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
596 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
597 {
598 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
599 bool const fInGroup0 = !fInGroup1;
600 *pfFiq = fInGroup0 && fIsGroup0Enabled;
601 *pfIrq = fInGroup1 && fIsGroup1Enabled;
602 return;
603 }
604 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
605 } while (idxIntr != -1);
606 }
607#else /** @todo Measure and pick the faster version. */
608 /* Only allow interrupts with higher priority than the running one. */
609 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
610 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
611
612 for (uint8_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
613 {
614 /* Collect interrupts that are pending, enabled and inactive. */
615 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
616 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
617
618 /* Discard interrupts if the group they belong to is disabled. */
619 if (!fIsGroup1Enabled)
620 {
621 uLo &= ~pGicDev->bmIntrGroup[i];
622 uHi &= ~pGicDev->bmIntrGroup[i + 1];
623 }
624 if (!fIsGroup0Enabled)
625 {
626 uLo &= pGicDev->bmIntrGroup[i];
627 uHi &= pGicDev->bmIntrGroup[i + 1];
628 }
629
630 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
631 Assert(pGicDev->fAffRoutingEnabled);
632 uint64_t const bmIntrPending = RT_MAKE_U64(uLo, uHi);
633 uint16_t const idxPending = ASMBitFirstSetU64(bmIntrPending);
634 if (idxPending > 0)
635 {
636 /*
637 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
638 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
639 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
640 * routing is enabled (which it always is in our implementation).
641 */
642 uint32_t const idxIntr = 64 * i + idxPending - 1;
643 AssertRelease(idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
644 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
645 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
646 {
647 Assert(idxIntr > GIC_INTID_RANGE_PPI_LAST);
648 AssertRelease(idxIntr < sizeof(pGicDev->bmIntrGroup) * 8);
649 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
650 bool const fInGroup0 = !fInGroup1;
651 *pfFiq = fInGroup0 && fIsGroup0Enabled;
652 *pfIrq = fInGroup1 && fIsGroup1Enabled;
653 return;
654 }
655 }
656 }
657#endif
658 *pfIrq = false;
659 *pfFiq = false;
660}
661
662
663DECLHIDDEN(void) gicDistReadLpiConfigTableFromMem(PPDMDEVINS pDevIns)
664{
665 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
666 Assert(pGicDev->fEnableLpis);
667 LogFlowFunc(("\n"));
668
669 /* Check if the guest is disabling LPIs by setting the number of LPI INTID bits below the minimum required bits. */
670 uint8_t const cIdBits = RT_BF_GET(pGicDev->uLpiConfigBaseReg.u, GIC_BF_REDIST_REG_PROPBASER_ID_BITS) + 1;
671 if (cIdBits < GIC_LPI_ID_BITS_MIN)
672 {
673 RT_ZERO(pGicDev->abLpiConfig);
674 return;
675 }
676
677 /* Copy the LPI config table from guest memory to our internal cache. */
678 Assert(UINT32_C(2) << pGicDev->uMaxLpi <= RT_ELEMENTS(pGicDev->abLpiConfig));
679 RTGCPHYS const GCPhysLpiConfigTable = pGicDev->uLpiConfigBaseReg.u & GIC_BF_REDIST_REG_PROPBASER_PHYS_ADDR_MASK;
680 uint32_t const cbLpiConfigTable = sizeof(pGicDev->abLpiConfig);
681
682 /** @todo Try releasing and re-acquiring the device critical section here.
683 * Probably safe, but haven't verified this... */
684 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysLpiConfigTable, (void *)&pGicDev->abLpiConfig[0], cbLpiConfigTable);
685 AssertRC(rc);
686}
687
688
689static void gicReDistReadLpiPendingBitmapFromMem(PPDMDEVINS pDevIns, PVMCPU pVCpu, PGICDEV pGicDev)
690{
691 Assert(pGicDev->fEnableLpis);
692 LogFlowFunc(("\n"));
693
694 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
695 bool const fIsZeroed = RT_BF_GET(pGicDev->uLpiPendingBaseReg.u, GIC_BF_REDIST_REG_PENDBASER_PTZ);
696 if (!fIsZeroed)
697 {
698 /* Copy the LPI pending bitmap from guest memory to our internal cache. */
699 RTGCPHYS const GCPhysLpiPendingBitmap = (pGicDev->uLpiPendingBaseReg.u & GIC_BF_REDIST_REG_PENDBASER_PHYS_ADDR_MASK)
700 + GIC_INTID_RANGE_LPI_START; /* Skip first 1KB (since LPI INTIDs start at 8192). */
701 uint32_t const cbLpiPendingBitmap = sizeof(pGicCpu->bmLpiPending);
702
703 /** @todo Try releasing and re-acquiring the device critical section here.
704 * Probably safe, but haven't verified this... */
705 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysLpiPendingBitmap, (void *)&pGicCpu->bmLpiPending[0],
706 cbLpiPendingBitmap);
707 AssertRC(rc);
708 }
709 else
710 RT_ZERO(pGicCpu->bmLpiPending); /* Paranoia. */
711}
712
713
714/**
715 * Updates the internal IRQ state and sets or clears the appropriate force action
716 * flags.
717 *
718 * @returns Strict VBox status code.
719 * @param pGicDev The GIC distributor state.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static VBOXSTRICTRC gicReDistUpdateIrqState(PCGICDEV pGicDev, PVMCPUCC pVCpu)
723{
724 LogFlowFunc(("\n"));
725 bool fIrq;
726 bool fFiq;
727 gicReDistHasIrqPending(VMCPU_TO_GICCPU(pVCpu), &fIrq, &fFiq);
728
729 bool fIrqDist;
730 bool fFiqDist;
731 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, pVCpu->idCpu, &fIrqDist, &fFiqDist);
732 LogFlowFunc(("fIrq=%RTbool fFiq=%RTbool fIrqDist=%RTbool fFiqDist=%RTbool\n", fIrq, fFiq, fIrqDist, fFiqDist));
733
734 fIrq |= fIrqDist;
735 fFiq |= fFiqDist;
736 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
737 return VINF_SUCCESS;
738}
739
740
741/**
742 * Updates the internal IRQ state of the distributor and sets or clears the appropirate force action flags.
743 *
744 * @returns Strict VBox status code.
745 * @param pVM The cross context VM state.
746 * @param pGicDev The GIC distributor state.
747 */
748static VBOXSTRICTRC gicDistUpdateIrqState(PCVMCC pVM, PCGICDEV pGicDev)
749{
750 LogFlowFunc(("\n"));
751 for (uint32_t i = 0; i < pVM->cCpus; i++)
752 {
753 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[i];
754 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
755
756 bool fIrq, fFiq;
757 gicReDistHasIrqPending(pGicCpu, &fIrq, &fFiq);
758
759 bool fIrqDist, fFiqDist;
760 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, i, &fIrqDist, &fFiqDist);
761 fIrq |= fIrqDist;
762 fFiq |= fFiqDist;
763
764 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
765 }
766 return VINF_SUCCESS;
767}
768
769
770/**
771 * Reads the distributor's interrupt routing register (GICD_IROUTER).
772 *
773 * @returns Strict VBox status code.
774 * @param pGicDev The GIC distributor state.
775 * @param idxReg The index of the register in the GICD_IROUTER range.
776 * @param puValue Where to store the register's value.
777 */
778static VBOXSTRICTRC gicDistReadIntrRoutingReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
779{
780 /* When affinity routing is disabled, reads return 0. */
781 Assert(pGicDev->fAffRoutingEnabled);
782
783 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
784 idxReg += GIC_INTID_RANGE_SPI_START;
785 AssertReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), VERR_BUFFER_OVERFLOW);
786 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
787 if (!(idxReg % 2))
788 {
789 /* Lower 32-bits. */
790 uint8_t const fIrm = ASMBitTest(&pGicDev->bmIntrRoutingMode[0], idxReg);
791 *puValue = GIC_DIST_REG_IROUTERn_SET(fIrm, pGicDev->au32IntrRouting[idxReg]);
792 }
793 else
794 {
795 /* Upper 32-bits. */
796 *puValue = pGicDev->au32IntrRouting[idxReg] >> 24;
797 }
798
799 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
800 return VINF_SUCCESS;
801}
802
803
804/**
805 * Writes the distributor's interrupt routing register (GICD_IROUTER).
806 *
807 * @returns Strict VBox status code.
808 * @param pGicDev The GIC distributor state.
809 * @param idxReg The index of the register in the GICD_IROUTER range.
810 * @param uValue The value to write to the register.
811 */
812static VBOXSTRICTRC gicDistWriteIntrRoutingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
813{
814 /* When affinity routing is disabled, writes are ignored. */
815 Assert(pGicDev->fAffRoutingEnabled);
816
817 AssertMsgReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), ("idxReg=%u\n", idxReg), VERR_BUFFER_OVERFLOW);
818 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
819 if (!(idxReg % 2))
820 {
821 /* Lower 32-bits. */
822 bool const fIrm = GIC_DIST_REG_IROUTERn_IRM_GET(uValue);
823 if (fIrm)
824 ASMBitSet(&pGicDev->bmIntrRoutingMode[0], idxReg);
825 else
826 ASMBitClear(&pGicDev->bmIntrRoutingMode[0], idxReg);
827 uint32_t const fAff3 = pGicDev->au32IntrRouting[idxReg] & 0xff000000;
828 pGicDev->au32IntrRouting[idxReg] = fAff3 | (uValue & 0x00ffffff);
829 }
830 else
831 {
832 /* Upper 32-bits. */
833 uint32_t const fAffOthers = pGicDev->au32IntrRouting[idxReg] & 0x00ffffff;
834 pGicDev->au32IntrRouting[idxReg] = (uValue << 24) | fAffOthers;
835 }
836
837 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->au32IntrRouting[idxReg]));
838 return VINF_SUCCESS;
839}
840
841
842/**
843 * Reads the distributor's interrupt (set/clear) enable register (GICD_ISENABLER and
844 * GICD_ICENABLER).
845 *
846 * @returns Strict VBox status code.
847 * @param pGicDev The GIC distributor state.
848 * @param idxReg The index of the register in the GICD_ISENABLER and
849 * GICD_ICENABLER range.
850 * @param puValue Where to store the register's value.
851 */
852static VBOXSTRICTRC gicDistReadIntrEnableReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
853{
854 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
855 *puValue = pGicDev->bmIntrEnabled[idxReg];
856 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
857 return VINF_SUCCESS;
858}
859
860
861/**
862 * Writes the distributor's interrupt set-enable register (GICD_ISENABLER).
863 *
864 * @returns Strict VBox status code.
865 * @param pVM The cross context VM structure.
866 * @param pGicDev The GIC distributor state.
867 * @param idxReg The index of the register in the GICD_ISENABLER range.
868 * @param uValue The value to write to the register.
869 */
870static VBOXSTRICTRC gicDistWriteIntrSetEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
871{
872 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
873 Assert(pGicDev->fAffRoutingEnabled);
874 if (idxReg > 0)
875 {
876 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
877 pGicDev->bmIntrEnabled[idxReg] |= uValue;
878 return gicDistUpdateIrqState(pVM, pGicDev);
879 }
880 else
881 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
882 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
883 return VINF_SUCCESS;
884}
885
886
887/**
888 * Writes the distributor's interrupt clear-enable register (GICD_ICENABLER).
889 *
890 * @returns Strict VBox status code.
891 * @param pVM The cross context VM structure.
892 * @param pGicDev The GIC distributor state.
893 * @param idxReg The index of the register in the GICD_ICENABLER range.
894 * @param uValue The value to write to the register.
895 */
896static VBOXSTRICTRC gicDistWriteIntrClearEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
897{
898 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
899 Assert(pGicDev->fAffRoutingEnabled);
900 if (idxReg > 0)
901 {
902 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
903 pGicDev->bmIntrEnabled[idxReg] &= ~uValue;
904 return gicDistUpdateIrqState(pVM, pGicDev);
905 }
906 else
907 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
908 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
909 return VINF_SUCCESS;
910}
911
912
913/**
914 * Reads the distributor's interrupt active register (GICD_ISACTIVER and
915 * GICD_ICACTIVER).
916 *
917 * @returns Strict VBox status code.
918 * @param pGicDev The GIC distributor state.
919 * @param idxReg The index of the register in the GICD_ISACTIVER and
920 * GICD_ICACTIVER range.
921 * @param puValue Where to store the register's value.
922 */
923static VBOXSTRICTRC gicDistReadIntrActiveReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
924{
925 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
926 *puValue = pGicDev->bmIntrActive[idxReg];
927 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
928 return VINF_SUCCESS;
929}
930
931
932/**
933 * Writes the distributor's interrupt set-active register (GICD_ISACTIVER).
934 *
935 * @returns Strict VBox status code.
936 * @param pVM The cross context VM structure.
937 * @param pGicDev The GIC distributor state.
938 * @param idxReg The index of the register in the GICD_ISACTIVER range.
939 * @param uValue The value to write to the register.
940 */
941static VBOXSTRICTRC gicDistWriteIntrSetActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
942{
943 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
944 Assert(pGicDev->fAffRoutingEnabled);
945 if (idxReg > 0)
946 {
947 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
948 pGicDev->bmIntrActive[idxReg] |= uValue;
949 return gicDistUpdateIrqState(pVM, pGicDev);
950 }
951 else
952 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
953 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
954 return VINF_SUCCESS;
955}
956
957
958/**
959 * Writes the distributor's interrupt clear-active register (GICD_ICACTIVER).
960 *
961 * @returns Strict VBox status code.
962 * @param pVM The cross context VM structure.
963 * @param pGicDev The GIC distributor state.
964 * @param idxReg The index of the register in the GICD_ICACTIVER range.
965 * @param uValue The value to write to the register.
966 */
967static VBOXSTRICTRC gicDistWriteIntrClearActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
968{
969 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
970 Assert(pGicDev->fAffRoutingEnabled);
971 if (idxReg > 0)
972 {
973 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
974 pGicDev->bmIntrActive[idxReg] &= ~uValue;
975 return gicDistUpdateIrqState(pVM, pGicDev);
976 }
977 else
978 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
979 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
980 return VINF_SUCCESS;
981}
982
983
984/**
985 * Reads the distributor's interrupt priority register (GICD_IPRIORITYR).
986 *
987 * @returns Strict VBox status code.
988 * @param pGicDev The GIC distributor state.
989 * @param idxReg The index of the register in the GICD_IPRIORITY range.
990 * @param puValue Where to store the register's value.
991 */
992static VBOXSTRICTRC gicDistReadIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
993{
994 /* When affinity routing is enabled, reads to registers 0..7 (pertaining to SGIs and PPIs) return 0. */
995 Assert(pGicDev->fAffRoutingEnabled);
996 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
997 Assert(idxReg != 255);
998 if (idxReg > 7)
999 {
1000 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1001 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1002 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
1003 *puValue = *(uint32_t *)&pGicDev->abIntrPriority[idxPriority];
1004 }
1005 else
1006 {
1007 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1008 *puValue = 0;
1009 }
1010 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1011 return VINF_SUCCESS;
1012}
1013
1014
1015/**
1016 * Writes the distributor's interrupt priority register (GICD_IPRIORITYR).
1017 *
1018 * @returns Strict VBox status code.
1019 * @param pGicDev The GIC distributor state.
1020 * @param idxReg The index of the register in the GICD_IPRIORITY range.
1021 * @param uValue The value to write to the register.
1022 */
1023static VBOXSTRICTRC gicDistWriteIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1024{
1025 /* When affinity routing is enabled, writes to registers 0..7 are ignored. */
1026 Assert(pGicDev->fAffRoutingEnabled);
1027 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
1028 Assert(idxReg != 255);
1029 if (idxReg > 7)
1030 {
1031 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1032 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1033 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1034 *(uint32_t *)&pGicDev->abIntrPriority[idxPriority] = uValue;
1035 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicDev->abIntrPriority[idxPriority]));
1036 }
1037 else
1038 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1039 return VINF_SUCCESS;
1040}
1041
1042
1043/**
1044 * Reads the distributor's interrupt pending register (GICD_ISPENDR and
1045 * GICD_ICPENDR).
1046 *
1047 * @returns Strict VBox status code.
1048 * @param pGicDev The GIC distributor state.
1049 * @param idxReg The index of the register in the GICD_ISPENDR and
1050 * GICD_ICPENDR range.
1051 * @param puValue Where to store the register's value.
1052 */
1053static VBOXSTRICTRC gicDistReadIntrPendingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1054{
1055 /* When affinity routing is enabled, reads for SGIs and PPIs return 0. */
1056 Assert(pGicDev->fAffRoutingEnabled);
1057 if (idxReg > 0)
1058 {
1059 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1060 *puValue = pGicDev->bmIntrPending[idxReg];
1061 }
1062 else
1063 {
1064 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1065 *puValue = 0;
1066 }
1067 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1068 return VINF_SUCCESS;
1069}
1070
1071
1072/**
1073 * Write's the distributor's interrupt set-pending register (GICD_ISPENDR).
1074 *
1075 * @returns Strict VBox status code.
1076 * @param pVM The cross context VM structure.
1077 * @param pGicDev The GIC distributor state.
1078 * @param idxReg The index of the register in the GICD_ISPENDR range.
1079 * @param uValue The value to write to the register.
1080 */
1081static VBOXSTRICTRC gicDistWriteIntrSetPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1082{
1083 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1084 Assert(pGicDev->fAffRoutingEnabled);
1085 if (idxReg > 0)
1086 {
1087 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1088 pGicDev->bmIntrPending[idxReg] |= uValue;
1089 return gicDistUpdateIrqState(pVM, pGicDev);
1090 }
1091 else
1092 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1093 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1094 return VINF_SUCCESS;
1095}
1096
1097
1098/**
1099 * Write's the distributor's interrupt clear-pending register (GICD_ICPENDR).
1100 *
1101 * @returns Strict VBox status code.
1102 * @param pVM The cross context VM structure.
1103 * @param pGicDev The GIC distributor state.
1104 * @param idxReg The index of the register in the GICD_ICPENDR range.
1105 * @param uValue The value to write to the register.
1106 */
1107static VBOXSTRICTRC gicDistWriteIntrClearPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1108{
1109 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1110 Assert(pGicDev->fAffRoutingEnabled);
1111 if (idxReg > 0)
1112 {
1113 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1114 pGicDev->bmIntrPending[idxReg] &= ~uValue;
1115 return gicDistUpdateIrqState(pVM, pGicDev);
1116 }
1117 else
1118 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1119 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1120 return VINF_SUCCESS;
1121}
1122
1123
1124/**
1125 * Reads the distributor's interrupt config register (GICD_ICFGR).
1126 *
1127 * @returns Strict VBox status code.
1128 * @param pGicDev The GIC distributor state.
1129 * @param idxReg The index of the register in the GICD_ICFGR range.
1130 * @param puValue Where to store the register's value.
1131 */
1132static VBOXSTRICTRC gicDistReadIntrConfigReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1133{
1134 /* When affinity routing is enabled SGIs and PPIs, reads to SGIs and PPIs return 0. */
1135 Assert(pGicDev->fAffRoutingEnabled);
1136 if (idxReg >= 2)
1137 {
1138 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1139 *puValue = pGicDev->bmIntrConfig[idxReg];
1140 }
1141 else
1142 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1143 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1144 return VINF_SUCCESS;
1145}
1146
1147
1148/**
1149 * Writes the distributor's interrupt config register (GICD_ICFGR).
1150 *
1151 * @returns Strict VBox status code.
1152 * @param pGicDev The GIC distributor state.
1153 * @param idxReg The index of the register in the GICD_ICFGR range.
1154 * @param uValue The value to write to the register.
1155 */
1156static VBOXSTRICTRC gicDistWriteIntrConfigReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1157{
1158 /* When affinity routing is enabled SGIs and PPIs, writes to SGIs and PPIs are ignored. */
1159 Assert(pGicDev->fAffRoutingEnabled);
1160 if (idxReg >= 2)
1161 {
1162 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1163 pGicDev->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1164 }
1165 else
1166 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1167 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1168 return VINF_SUCCESS;
1169}
1170
1171
1172/**
1173 * Reads the distributor's interrupt config register (GICD_IGROUPR).
1174 *
1175 * @returns Strict VBox status code.
1176 * @param pGicDev The GIC distributor state.
1177 * @param idxReg The index of the register in the GICD_IGROUPR range.
1178 * @param puValue Where to store the register's value.
1179 */
1180static VBOXSTRICTRC gicDistReadIntrGroupReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1181{
1182 /* When affinity routing is enabled, reads to SGIs and PPIs return 0. */
1183 Assert(pGicDev->fAffRoutingEnabled);
1184 if (idxReg > 0)
1185 {
1186 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrGroup));
1187 *puValue = pGicDev->bmIntrGroup[idxReg];
1188 }
1189 else
1190 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1191 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1192 return VINF_SUCCESS;
1193}
1194
1195
1196/**
1197 * Writes the distributor's interrupt config register (GICD_ICFGR).
1198 *
1199 * @returns Strict VBox status code.
1200 * @param pVM The cross context VM structure.
1201 * @param pGicDev The GIC distributor state.
1202 * @param idxReg The index of the register in the GICD_ICFGR range.
1203 * @param uValue The value to write to the register.
1204 */
1205static VBOXSTRICTRC gicDistWriteIntrGroupReg(PCVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1206{
1207 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1208 Assert(pGicDev->fAffRoutingEnabled);
1209 if (idxReg > 0)
1210 {
1211 pGicDev->bmIntrGroup[idxReg] = uValue;
1212 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrGroup[idxReg]));
1213 }
1214 else
1215 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1216 return gicDistUpdateIrqState(pVM, pGicDev);
1217}
1218
1219
1220/**
1221 * Reads the redistributor's interrupt priority register (GICR_IPRIORITYR).
1222 *
1223 * @returns Strict VBox status code.
1224 * @param pGicDev The GIC distributor state.
1225 * @param pGicCpu The GIC redistributor and CPU interface state.
1226 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1227 * @param puValue Where to store the register's value.
1228 */
1229static VBOXSTRICTRC gicReDistReadIntrPriorityReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1230{
1231 /* When affinity routing is disabled, reads return 0. */
1232 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1233 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1234 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1235 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
1236 *puValue = *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority];
1237 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1238 return VINF_SUCCESS;
1239}
1240
1241
1242/**
1243 * Writes the redistributor's interrupt priority register (GICR_IPRIORITYR).
1244 *
1245 * @returns Strict VBox status code.
1246 * @param pGicDev The GIC distributor state.
1247 * @param pVCpu The cross context virtual CPU structure.
1248 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1249 * @param uValue The value to write to the register.
1250 */
1251static VBOXSTRICTRC gicReDistWriteIntrPriorityReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1252{
1253 /* When affinity routing is disabled, writes are ignored. */
1254 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1255 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1256 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1257 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1258 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1259 *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority] = uValue;
1260 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority]));
1261 return VINF_SUCCESS;
1262}
1263
1264
1265/**
1266 * Reads the redistributor's interrupt pending register (GICR_ISPENDR and
1267 * GICR_ICPENDR).
1268 *
1269 * @returns Strict VBox status code.
1270 * @param pGicDev The GIC distributor state.
1271 * @param pGicCpu The GIC redistributor and CPU interface state.
1272 * @param idxReg The index of the register in the GICR_ISPENDR and
1273 * GICR_ICPENDR range.
1274 * @param puValue Where to store the register's value.
1275 */
1276static VBOXSTRICTRC gicReDistReadIntrPendingReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1277{
1278 /* When affinity routing is disabled, reads return 0. */
1279 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1280 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1281 *puValue = pGicCpu->bmIntrPending[idxReg];
1282 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1283 return VINF_SUCCESS;
1284}
1285
1286
1287/**
1288 * Writes the redistributor's interrupt set-pending register (GICR_ISPENDR).
1289 *
1290 * @returns Strict VBox status code.
1291 * @param pGicDev The GIC distributor state.
1292 * @param pVCpu The cross context virtual CPU structure.
1293 * @param idxReg The index of the register in the GICR_ISPENDR range.
1294 * @param uValue The value to write to the register.
1295 */
1296static VBOXSTRICTRC gicReDistWriteIntrSetPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1297{
1298 /* When affinity routing is disabled, writes are ignored. */
1299 Assert(pGicDev->fAffRoutingEnabled);
1300 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1301 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1302 pGicCpu->bmIntrPending[idxReg] |= uValue;
1303 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1304 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1305}
1306
1307
1308/**
1309 * Writes the redistributor's interrupt clear-pending register (GICR_ICPENDR).
1310 *
1311 * @returns Strict VBox status code.
1312 * @param pGicDev The GIC distributor state.
1313 * @param pVCpu The cross context virtual CPU structure.
1314 * @param idxReg The index of the register in the GICR_ICPENDR range.
1315 * @param uValue The value to write to the register.
1316 */
1317static VBOXSTRICTRC gicReDistWriteIntrClearPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1318{
1319 /* When affinity routing is disabled, writes are ignored. */
1320 Assert(pGicDev->fAffRoutingEnabled);
1321 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1322 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1323 pGicCpu->bmIntrPending[idxReg] &= ~uValue;
1324 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1325 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1326}
1327
1328
1329/**
1330 * Reads the redistributor's interrupt enable register (GICR_ISENABLER and
1331 * GICR_ICENABLER).
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pGicDev The GIC distributor state.
1335 * @param pGicCpu The GIC redistributor and CPU interface state.
1336 * @param idxReg The index of the register in the GICR_ISENABLER and
1337 * GICR_ICENABLER range.
1338 * @param puValue Where to store the register's value.
1339 */
1340static VBOXSTRICTRC gicReDistReadIntrEnableReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1341{
1342 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1343 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1344 *puValue = pGicCpu->bmIntrEnabled[idxReg];
1345 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1346 return VINF_SUCCESS;
1347}
1348
1349
1350/**
1351 * Writes the redistributor's interrupt set-enable register (GICR_ISENABLER).
1352 *
1353 * @returns Strict VBox status code.
1354 * @param pGicDev The GIC distributor state.
1355 * @param pVCpu The cross context virtual CPU structure.
1356 * @param idxReg The index of the register in the GICR_ISENABLER range.
1357 * @param uValue The value to write to the register.
1358 */
1359static VBOXSTRICTRC gicReDistWriteIntrSetEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1360{
1361 Assert(pGicDev->fAffRoutingEnabled);
1362 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1363 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1364 pGicCpu->bmIntrEnabled[idxReg] |= uValue;
1365 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1366 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1367}
1368
1369
1370/**
1371 * Writes the redistributor's interrupt clear-enable register (GICR_ICENABLER).
1372 *
1373 * @returns Strict VBox status code.
1374 * @param pGicDev The GIC distributor state.
1375 * @param pVCpu The cross context virtual CPU structure.
1376 * @param idxReg The index of the register in the GICR_ICENABLER range.
1377 * @param uValue The value to write to the register.
1378 */
1379static VBOXSTRICTRC gicReDistWriteIntrClearEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1380{
1381 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1382 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1383 pGicCpu->bmIntrEnabled[idxReg] &= ~uValue;
1384 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1385 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1386}
1387
1388
1389/**
1390 * Reads the redistributor's interrupt active register (GICR_ISACTIVER and
1391 * GICR_ICACTIVER).
1392 *
1393 * @returns Strict VBox status code.
1394 * @param pGicCpu The GIC redistributor and CPU interface state.
1395 * @param idxReg The index of the register in the GICR_ISACTIVER and
1396 * GICR_ICACTIVER range.
1397 * @param puValue Where to store the register's value.
1398 */
1399static VBOXSTRICTRC gicReDistReadIntrActiveReg(PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1400{
1401 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1402 *puValue = pGicCpu->bmIntrActive[idxReg];
1403 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1404 return VINF_SUCCESS;
1405}
1406
1407
1408/**
1409 * Writes the redistributor's interrupt set-active register (GICR_ISACTIVER).
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pGicDev The GIC distributor state.
1413 * @param pVCpu The cross context virtual CPU structure.
1414 * @param idxReg The index of the register in the GICR_ISACTIVER range.
1415 * @param uValue The value to write to the register.
1416 */
1417static VBOXSTRICTRC gicReDistWriteIntrSetActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1418{
1419 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1420 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1421 pGicCpu->bmIntrActive[idxReg] |= uValue;
1422 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1423 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1424}
1425
1426
1427/**
1428 * Writes the redistributor's interrupt clear-active register (GICR_ICACTIVER).
1429 *
1430 * @returns Strict VBox status code.
1431 * @param pGicDev The GIC distributor state.
1432 * @param pVCpu The cross context virtual CPU structure.
1433 * @param idxReg The index of the register in the GICR_ICACTIVER range.
1434 * @param uValue The value to write to the register.
1435 */
1436static VBOXSTRICTRC gicReDistWriteIntrClearActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1437{
1438 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1439 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1440 pGicCpu->bmIntrActive[idxReg] &= ~uValue;
1441 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1442 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1443}
1444
1445
1446/**
1447 * Reads the redistributor's interrupt config register (GICR_ICFGR).
1448 *
1449 * @returns Strict VBox status code.
1450 * @param pGicDev The GIC distributor state.
1451 * @param pGicCpu The GIC redistributor and CPU interface state.
1452 * @param idxReg The index of the register in the GICR_ICFGR range.
1453 * @param puValue Where to store the register's value.
1454 */
1455static VBOXSTRICTRC gicReDistReadIntrConfigReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1456{
1457 /* When affinity routing is disabled, reads return 0. */
1458 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1459 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1460 *puValue = pGicCpu->bmIntrConfig[idxReg];
1461 /* Ensure SGIs are read-only and remain configured as edge-triggered. */
1462 Assert(idxReg > 0 || *puValue == 0xaaaaaaaa);
1463 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1464 return VINF_SUCCESS;
1465}
1466
1467
1468/**
1469 * Writes the redistributor's interrupt config register (GICR_ICFGR).
1470 *
1471 * @returns Strict VBox status code.
1472 * @param pGicDev The GIC distributor state.
1473 * @param pVCpu The cross context virtual CPU structure.
1474 * @param idxReg The index of the register in the GICR_ICFGR range.
1475 * @param uValue The value to write to the register.
1476 */
1477static VBOXSTRICTRC gicReDistWriteIntrConfigReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1478{
1479 /* When affinity routing is disabled, writes are ignored. */
1480 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1481 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1482 if (idxReg > 0)
1483 {
1484 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1485 pGicCpu->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1486 }
1487 else
1488 {
1489 /* SGIs are always edge-triggered ignore writes. Windows 11 (24H2) arm64 guests writes these. */
1490 Assert(uValue == 0xaaaaaaaa);
1491 Assert(pGicCpu->bmIntrConfig[0] == uValue);
1492 }
1493 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrConfig[idxReg]));
1494 return VINF_SUCCESS;
1495}
1496
1497
1498/**
1499 * Reads the redistributor's interrupt group register (GICD_IGROUPR).
1500 *
1501 * @returns Strict VBox status code.
1502 * @param pGicDev The GIC distributor state.
1503 * @param pGicCpu The GIC redistributor and CPU interface state.
1504 * @param idxReg The index of the register in the GICR_IGROUPR range.
1505 * @param puValue Where to store the register's value.
1506 */
1507static VBOXSTRICTRC gicReDistReadIntrGroupReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1508{
1509 /* When affinity routing is disabled, reads return 0. */
1510 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1511 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1512 *puValue = pGicCpu->bmIntrGroup[idxReg];
1513 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1514 return VINF_SUCCESS;
1515}
1516
1517
1518/**
1519 * Writes the redistributor's interrupt group register (GICR_IGROUPR).
1520 *
1521 * @returns Strict VBox status code.
1522 * @param pGicDev The GIC distributor state.
1523 * @param pVCpu The cross context virtual CPU structure.
1524 * @param idxReg The index of the register in the GICR_IGROUPR range.
1525 * @param uValue The value to write to the register.
1526 */
1527static VBOXSTRICTRC gicReDistWriteIntrGroupReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1528{
1529 /* When affinity routing is disabled, writes are ignored. */
1530 Assert(pGicDev->fAffRoutingEnabled);
1531 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1532 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1533 pGicCpu->bmIntrGroup[idxReg] = uValue;
1534 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1535 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1536}
1537
1538
1539/**
1540 * Gets the virtual CPUID given the affinity values.
1541 *
1542 * @returns The virtual CPUID.
1543 * @param idCpuInterface The virtual CPUID within the PE cluster (0..15).
1544 * @param uAff1 The affinity 1 value.
1545 * @param uAff2 The affinity 2 value.
1546 * @param uAff3 The affinity 3 value.
1547 */
1548DECL_FORCE_INLINE(VMCPUID) gicGetCpuIdFromAffinity(uint8_t idCpuInterface, uint8_t uAff1, uint8_t uAff2, uint8_t uAff3)
1549{
1550 AssertReturn(idCpuInterface < 16, 0);
1551 return (uAff3 * 1048576) + (uAff2 * 4096) + (uAff1 * 16) + idCpuInterface;
1552}
1553
1554
1555/**
1556 * Gets the highest priority pending interrupt that can be signalled to the PE.
1557 *
1558 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT if no interrupt
1559 * is pending or not in a state to be signalled to the PE.
1560 * @param pGicDev The GIC distributor state.
1561 * @param pGicCpu The GIC redistributor and CPU interface state.
1562 * @param fGroup0 Whether to consider group 0 interrupts.
1563 * @param fGroup1 Whether to consider group 1 interrupts.
1564 * @param pidxIntr Where to store the distributor interrupt index for the
1565 * returned interrupt ID. UINT16_MAX if this function returns
1566 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT. Optional, can be
1567 * NULL.
1568 * @param pbPriority Where to store the priority of the returned interrupt ID.
1569 * GIC_IDLE_PRIORITY if this function returns
1570 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT.
1571 */
1572static uint16_t gicGetHighestPriorityPendingIntr(PCGICDEV pGicDev, PCGICCPU pGicCpu, bool fGroup0, bool fGroup1,
1573 uint16_t *pidxIntr, uint8_t *pbPriority)
1574{
1575#if 1
1576 uint16_t idxIntr = UINT16_MAX;
1577 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1578 uint8_t uPriority = GIC_IDLE_PRIORITY;
1579
1580 /* Redistributor. */
1581 {
1582 uint32_t bmReDistIntrs[RT_ELEMENTS(pGicCpu->bmIntrPending)];
1583 AssertCompile(sizeof(pGicCpu->bmIntrPending) == sizeof(bmReDistIntrs));
1584 for (uint16_t i = 0; i < RT_ELEMENTS(bmReDistIntrs); i++)
1585 {
1586 /* Collect interrupts that are pending, enabled and inactive. */
1587 bmReDistIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1588 /* Discard interrupts if the group they belong to is disabled. */
1589 if (!fGroup1)
1590 bmReDistIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
1591 if (!fGroup0)
1592 bmReDistIntrs[i] &= pGicCpu->bmIntrGroup[i];
1593 }
1594 /* Among the collected interrupts, pick the one with the highest, non-idle priority. */
1595 uint16_t idxHighest = UINT16_MAX;
1596 const void *pvIntrs = &bmReDistIntrs[0];
1597 uint32_t const cIntrs = sizeof(bmReDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1598 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1599 if (idxPending >= 0)
1600 {
1601 do
1602 {
1603 if (pGicCpu->abIntrPriority[idxPending] < uPriority)
1604 {
1605 idxHighest = (uint16_t)idxPending;
1606 uPriority = pGicCpu->abIntrPriority[idxPending];
1607 }
1608 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1609 } while (idxPending != -1);
1610 if (idxHighest != UINT16_MAX)
1611 {
1612 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1613 idxIntr = idxHighest;
1614 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1615 || GIC_IS_INTR_EXT_PPI(uIntId));
1616 }
1617 }
1618 }
1619
1620 /* Distributor */
1621 {
1622 uint32_t bmDistIntrs[RT_ELEMENTS(pGicDev->bmIntrPending)];
1623 AssertCompile(sizeof(pGicDev->bmIntrPending) == sizeof(bmDistIntrs));
1624 for (uint16_t i = 0; i < RT_ELEMENTS(bmDistIntrs); i++)
1625 {
1626 /* Collect interrupts that are pending, enabled and inactive. */
1627 bmDistIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1628 /* Discard interrupts if the group they belong to is disabled. */
1629 if (!fGroup1)
1630 bmDistIntrs[i] &= ~pGicDev->bmIntrGroup[i];
1631 if (!fGroup0)
1632 bmDistIntrs[i] &= pGicDev->bmIntrGroup[i];
1633 }
1634 /* Among the collected interrupts, pick one with priority higher than what we picked from the redistributor. */
1635 {
1636 uint16_t idxHighest = UINT16_MAX;
1637 const void *pvIntrs = &bmDistIntrs[0];
1638 uint32_t const cIntrs = sizeof(bmDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1639 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1640 if (idxPending >= 0)
1641 {
1642 do
1643 {
1644 if (pGicDev->abIntrPriority[idxPending] < uPriority)
1645 {
1646 idxHighest = (uint16_t)idxPending;
1647 uPriority = pGicDev->abIntrPriority[idxPending];
1648 }
1649 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1650 } while (idxPending != -1);
1651 if (idxHighest != UINT16_MAX)
1652 {
1653 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1654 idxIntr = idxHighest;
1655 Assert( GIC_IS_INTR_SPI(uIntId)
1656 || GIC_IS_INTR_EXT_SPI(uIntId));
1657 }
1658 }
1659 }
1660 }
1661#else /** @todo Measure and pick the faster version. */
1662 /*
1663 * Collect interrupts that are pending, enabled and inactive.
1664 * Discard interrupts if the group they belong to is disabled.
1665 * While collecting the interrupts, pick the one with the highest, non-idle priority.
1666 */
1667 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1668 uint16_t idxIntr = UINT16_MAX;
1669 uint8_t uPriority = GIC_IDLE_PRIORITY;
1670
1671 /* Redistributor. */
1672 {
1673 uint16_t idxHighest = UINT16_MAX;
1674 for (uint16_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
1675 {
1676 uint32_t uIntrPending = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1677 if (!fGroup1)
1678 uIntrPending &= ~pGicCpu->bmIntrGroup[i];
1679 if (!fGroup0)
1680 uIntrPending &= pGicCpu->bmIntrGroup[i];
1681
1682 uint16_t const idxPending = ASMBitFirstSetU32(uIntrPending);
1683 if (idxPending > 0)
1684 {
1685 uint32_t const idxPriority = 32 * i + idxPending - 1;
1686 Assert(idxPriority < RT_ELEMENTS(pGicCpu->abIntrPriority));
1687 if (pGicCpu->abIntrPriority[idxPriority] < uPriority)
1688 {
1689 idxHighest = idxPriority;
1690 uPriority = pGicCpu->abIntrPriority[idxPriority];
1691 }
1692 }
1693 }
1694 if (idxHighest != UINT16_MAX)
1695 {
1696 idxIntr = idxHighest;
1697 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1698 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1699 || GIC_IS_INTR_EXT_PPI(uIntId));
1700 Assert(uPriority != GIC_IDLE_PRIORITY);
1701 }
1702 }
1703
1704 /* Distributor. */
1705 {
1706 uint16_t idxHighest = UINT16_MAX;
1707 for (uint16_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
1708 {
1709 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1710 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
1711 if (!fGroup1)
1712 {
1713 uLo &= ~pGicDev->bmIntrGroup[i];
1714 uHi &= ~pGicDev->bmIntrGroup[i + 1];
1715 }
1716 if (!fGroup0)
1717 {
1718 uLo &= pGicDev->bmIntrGroup[i];
1719 uHi &= pGicDev->bmIntrGroup[i + 1];
1720 }
1721
1722 uint64_t const uIntrPending = RT_MAKE_U64(uLo, uHi);
1723 uint16_t const idxPending = ASMBitFirstSetU64(uIntrPending);
1724 if (idxPending > 0)
1725 {
1726 uint32_t const idxPriority = 64 * i + idxPending - 1;
1727 if (pGicDev->abIntrPriority[idxPriority] < uPriority)
1728 {
1729 idxHighest = idxPriority;
1730 uPriority = pGicDev->abIntrPriority[idxPriority];
1731 }
1732 }
1733 }
1734 if (idxHighest != UINT16_MAX)
1735 {
1736 idxIntr = idxHighest;
1737 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1738 Assert( GIC_IS_INTR_SPI(uIntId)
1739 || GIC_IS_INTR_EXT_SPI(uIntId));
1740 Assert(uPriority != GIC_IDLE_PRIORITY);
1741 }
1742 }
1743#endif
1744
1745 /* Ensure that if no interrupt is pending, the idle priority is returned. */
1746 Assert(uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT || uPriority == GIC_IDLE_PRIORITY);
1747 if (pbPriority)
1748 *pbPriority = uPriority;
1749 if (pidxIntr)
1750 *pidxIntr = idxIntr;
1751
1752 LogFlowFunc(("uIntId=%u [idxIntr=%u uPriority=%u]\n", uIntId, idxIntr, uPriority));
1753 return uIntId;
1754}
1755
1756
1757/**
1758 * Get and acknowledge the interrupt ID of a signalled interrupt.
1759 *
1760 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT no interrupts
1761 * are pending or not in a state to be signalled.
1762 * @param pGicDev The GIC distributor state.
1763 * @param pVCpu The cross context virtual CPU structure.
1764 * @param fGroup0 Whether to consider group 0 interrupts.
1765 * @param fGroup1 Whether to consider group 1 interrupts.
1766 */
1767static uint16_t gicAckHighestPriorityPendingIntr(PGICDEV pGicDev, PVMCPUCC pVCpu, bool fGroup0, bool fGroup1)
1768{
1769 Assert(fGroup0 || fGroup1);
1770 LogFlowFunc(("fGroup0=%RTbool fGroup1=%RTbool\n", fGroup0, fGroup1));
1771
1772 /*
1773 * Get the pending interrupt with the highest priority for the given group.
1774 */
1775 uint8_t bIntrPriority;
1776 uint16_t idxIntr;
1777 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1778 STAM_PROFILE_START(&pGicCpu->StatProfIntrAck, x);
1779 uint16_t const uIntId = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, fGroup0, fGroup1, &idxIntr, &bIntrPriority);
1780 if (uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
1781 {
1782 /*
1783 * The interrupt priority must be higher than the priority mask of the CPU interface for the
1784 * interrupt to be signalled/acknowledged. Here, we must NOT use priority grouping when comparing
1785 * the priority of a pending interrupt with this priority mask (threshold).
1786 *
1787 * See ARM GIC spec. 4.8.6 "Priority masking".
1788 */
1789 if (bIntrPriority >= pGicCpu->bIntrPriorityMask)
1790 {
1791 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1792 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1793 }
1794
1795 /*
1796 * The group priority of the pending interrupt must be higher than that of the running priority.
1797 * The number of bits for the group priority depends on the the binary point registers.
1798 * We mask the sub-priority bits and only compare the group priority.
1799 *
1800 * When the binary point registers indicates no preemption, we must allow interrupts that have
1801 * a higher priority than idle. Hence, the use of two different masks below.
1802 *
1803 * See ARM GIC spec. 4.8.3 "Priority grouping".
1804 * See ARM GIC spec. 4.8.5 "Preemption".
1805 */
1806 static uint8_t const s_afGroupPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00 };
1807 static uint8_t const s_afRunningPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0xff };
1808 uint8_t const idxPriorityMask = (fGroup0 || (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_CBPR))
1809 ? pGicCpu->bBinaryPtGroup0 & 7
1810 : pGicCpu->bBinaryPtGroup1 & 7;
1811 uint8_t const bRunningPriority = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
1812 uint8_t const bRunningGroupPriority = bRunningPriority & s_afRunningPriorityMasks[idxPriorityMask];
1813 uint8_t const bIntrGroupPriority = bIntrPriority & s_afGroupPriorityMasks[idxPriorityMask];
1814 if (bIntrGroupPriority >= bRunningGroupPriority)
1815 {
1816 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1817 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1818 }
1819
1820 /*
1821 * Acknowledge the interrupt.
1822 */
1823 bool const fIsRedistIntId = GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId);
1824 if (fIsRedistIntId)
1825 {
1826 /* Mark the interrupt as active. */
1827 AssertMsg(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, ("idxIntr=%u\n", idxIntr));
1828 ASMBitSet(&pGicCpu->bmIntrActive[0], idxIntr);
1829
1830 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1831 /* Update the active priorities bitmap. */
1832 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1833 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1834 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1835 if (fGroup0)
1836 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1837 if (fGroup1)
1838 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1839
1840 /* Drop priority. */
1841 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1842 {
1843 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1844 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1845 bIntrPriority,
1846 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1847 ++pGicCpu->idxRunningPriority;
1848 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1849 }
1850 else
1851 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1852 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1853
1854 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1855 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicCpu->bmIntrConfig) * 8);
1856 bool const fEdgeTriggered = ASMBitTest(&pGicCpu->bmIntrConfig[0], 2 * idxIntr + 1);
1857 if (fEdgeTriggered)
1858 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
1859
1860 /* Update the redistributor IRQ state to reflect change to the active interrupt. */
1861 gicReDistUpdateIrqState(pGicDev, pVCpu);
1862 }
1863 else
1864 {
1865 /* Sanity check if the interrupt ID belongs to the distributor. */
1866 Assert(GIC_IS_INTR_SPI(uIntId) || GIC_IS_INTR_EXT_SPI(uIntId));
1867
1868 /* Mark the interrupt as active. */
1869 Assert(idxIntr < sizeof(pGicDev->bmIntrActive) * 8);
1870 ASMBitSet(&pGicDev->bmIntrActive[0], idxIntr);
1871
1872 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1873 /* Update the active priorities bitmap. */
1874 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1875 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1876 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1877 if (fGroup0)
1878 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1879 if (fGroup1)
1880 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1881
1882 /* Drop priority. */
1883 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1884 {
1885 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1886 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1887 bIntrPriority,
1888 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1889 ++pGicCpu->idxRunningPriority;
1890 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1891 }
1892 else
1893 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1894 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1895
1896 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1897 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicDev->bmIntrConfig) * 8);
1898 bool const fEdgeTriggered = ASMBitTest(&pGicDev->bmIntrConfig[0], 2 * idxIntr + 1);
1899 if (fEdgeTriggered)
1900 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
1901
1902 /* Update the distributor IRQ state to reflect change to the active interrupt. */
1903 gicDistUpdateIrqState(pVCpu->CTX_SUFF(pVM), pGicDev);
1904 }
1905 }
1906 else
1907 Assert(bIntrPriority == GIC_IDLE_PRIORITY);
1908
1909 LogFlowFunc(("uIntId=%u\n", uIntId));
1910 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1911 return uIntId;
1912}
1913
1914
1915/**
1916 * Reads a distributor register.
1917 *
1918 * @returns VBox status code.
1919 * @param pDevIns The device instance.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 * @param offReg The offset of the register being read.
1922 * @param puValue Where to store the register value.
1923 */
1924DECLINLINE(VBOXSTRICTRC) gicDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
1925{
1926 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
1927 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
1928
1929 /*
1930 * 64-bit registers.
1931 */
1932 {
1933 /*
1934 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
1935 */
1936 uint16_t const cbReg = sizeof(uint64_t);
1937 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
1938 {
1939 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
1940 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
1941 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
1942 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1943 }
1944 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
1945 {
1946 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
1947 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
1948 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1949 }
1950 }
1951
1952 /*
1953 * 32-bit registers.
1954 */
1955 {
1956 /*
1957 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
1958 */
1959 uint16_t const cbReg = sizeof(uint32_t);
1960 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
1961 {
1962 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
1963 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1964 }
1965 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
1966 {
1967 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
1968 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
1969 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1970 }
1971
1972 /*
1973 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
1974 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
1975 */
1976 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
1977 {
1978 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
1979 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1980 }
1981 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
1982 {
1983 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
1984 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
1985 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1986 }
1987 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
1988 {
1989 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
1990 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1991 }
1992 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
1993 {
1994 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
1995 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
1996 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1997 }
1998
1999 /*
2000 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
2001 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
2002 */
2003 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
2004 {
2005 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
2006 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2007 }
2008 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
2009 {
2010 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2011 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
2012 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2013 }
2014 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
2015 {
2016 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
2017 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2018 }
2019 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
2020 {
2021 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2022 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
2023 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2024 }
2025
2026 /*
2027 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
2028 */
2029 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2030 {
2031 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2032 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2033 }
2034 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2035 {
2036 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2037 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2038 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2039 }
2040
2041 /*
2042 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2043 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2044 */
2045 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2046 {
2047 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2048 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2049 }
2050 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2051 {
2052 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2053 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2054 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2055 }
2056 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2057 {
2058 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2059 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2060 }
2061 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2062 {
2063 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2064 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2065 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2066 }
2067
2068 /*
2069 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2070 */
2071 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2072 {
2073 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2074 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2075 }
2076 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2077 {
2078 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2079 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2080 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2081 }
2082 }
2083
2084 switch (offReg)
2085 {
2086 case GIC_DIST_REG_CTLR_OFF:
2087 Assert(pGicDev->fAffRoutingEnabled);
2088 *puValue = (pGicDev->fIntrGroup0Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP0 : 0)
2089 | (pGicDev->fIntrGroup1Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP1_NS : 0)
2090 | GIC_DIST_REG_CTRL_DS /* We don't support multiple security states. */
2091 | GIC_DIST_REG_CTRL_ARE_S; /* We don't support GICv2 backwards compatibility, ARE is always enabled. */
2092 break;
2093 case GIC_DIST_REG_TYPER_OFF:
2094 {
2095 Assert(pGicDev->uMaxSpi > 0 && pGicDev->uMaxSpi <= GIC_DIST_REG_TYPER_NUM_ITLINES);
2096 Assert(pGicDev->fAffRoutingEnabled);
2097 *puValue = GIC_DIST_REG_TYPER_NUM_ITLINES_SET(pGicDev->uMaxSpi)
2098 | GIC_DIST_REG_TYPER_NUM_PES_SET(0) /* Affinity routing is always enabled, hence this MBZ. */
2099 /*| GIC_DIST_REG_TYPER_NMI*/ /** @todo Support non-maskable interrupts */
2100 /*| GIC_DIST_REG_TYPER_SECURITY_EXTN*/ /** @todo Support dual security states. */
2101 | (pGicDev->fMbi ? GIC_DIST_REG_TYPER_MBIS : 0)
2102 | (pGicDev->fRangeSel ? GIC_DIST_REG_TYPER_RSS : 0)
2103 | GIC_DIST_REG_TYPER_IDBITS_SET(15) /* We only support 16-bit interrupt IDs. */
2104 | (pGicDev->fAff3Levels ? GIC_DIST_REG_TYPER_A3V : 0);
2105 if (pGicDev->fExtSpi)
2106 *puValue |= GIC_DIST_REG_TYPER_ESPI
2107 | GIC_DIST_REG_TYPER_ESPI_RANGE_SET(pGicDev->uMaxExtSpi);
2108 if (pGicDev->fLpi)
2109 {
2110 Assert(pGicDev->uMaxLpi - 2 < 13);
2111 Assert(GIC_INTID_RANGE_LPI_START + (UINT32_C(2) << pGicDev->uMaxLpi) <= UINT16_MAX);
2112 *puValue |= GIC_DIST_REG_TYPER_LPIS
2113 | GIC_DIST_REG_TYPER_NUM_LPIS_SET(pGicDev->uMaxLpi);
2114 }
2115 break;
2116 }
2117 case GIC_DIST_REG_PIDR2_OFF:
2118 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2119 *puValue = GIC_DIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2120 break;
2121 case GIC_DIST_REG_IIDR_OFF:
2122 *puValue = GIC_DIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2123 break;
2124 case GIC_DIST_REG_TYPER2_OFF:
2125 *puValue = 0;
2126 break;
2127 default:
2128 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2129 *puValue = 0;
2130 break;
2131 }
2132 return VINF_SUCCESS;
2133}
2134
2135
2136/**
2137 * Writes a distributor register.
2138 *
2139 * @returns Strict VBox status code.
2140 * @param pDevIns The device instance.
2141 * @param pVCpu The cross context virtual CPU structure.
2142 * @param offReg The offset of the register being written.
2143 * @param uValue The register value.
2144 */
2145DECLINLINE(VBOXSTRICTRC) gicDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2146{
2147 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
2148 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2149 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
2150
2151 /*
2152 * 64-bit registers.
2153 */
2154 {
2155 /*
2156 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
2157 */
2158 uint16_t const cbReg = sizeof(uint64_t);
2159 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
2160 {
2161 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
2162 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
2163 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
2164 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2165 }
2166 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
2167 {
2168 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
2169 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
2170 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2171 }
2172
2173 }
2174
2175 /*
2176 * 32-bit registers.
2177 */
2178 {
2179 /*
2180 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
2181 */
2182 uint16_t const cbReg = sizeof(uint32_t);
2183 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
2184 {
2185 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
2186 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2187 }
2188 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
2189 {
2190 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
2191 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
2192 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2193 }
2194
2195 /*
2196 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
2197 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
2198 */
2199 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
2200 {
2201 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
2202 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2203 }
2204 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
2205 {
2206 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2207 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
2208 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2209 }
2210 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
2211 {
2212 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
2213 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2214 }
2215 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
2216 {
2217 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2218 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
2219 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2220 }
2221
2222 /*
2223 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
2224 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
2225 */
2226 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
2227 {
2228 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
2229 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2230 }
2231 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
2232 {
2233 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2234 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
2235 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2236 }
2237 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
2238 {
2239 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICACTIVERn_OFF_START) / cbReg;
2240 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2241 }
2242 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
2243 {
2244 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2245 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
2246 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2247 }
2248
2249 /*
2250 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
2251 */
2252 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2253 {
2254 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2255 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2256 }
2257 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2258 {
2259 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2260 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2261 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2262 }
2263
2264 /*
2265 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2266 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2267 */
2268 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2269 {
2270 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2271 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2272 }
2273 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2274 {
2275 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2276 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2277 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2278 }
2279 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2280 {
2281 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2282 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2283 }
2284 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2285 {
2286 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2287 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2288 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2289 }
2290
2291 /*
2292 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2293 */
2294 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2295 {
2296 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2297 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2298 }
2299 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2300 {
2301 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2302 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2303 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2304 }
2305 }
2306
2307 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2308 switch (offReg)
2309 {
2310 case GIC_DIST_REG_CTLR_OFF:
2311 Assert(!(uValue & GIC_DIST_REG_CTRL_ARE_NS));
2312 pGicDev->fIntrGroup0Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP0);
2313 pGicDev->fIntrGroup1Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP1_NS);
2314 rcStrict = gicDistUpdateIrqState(pVM, pGicDev);
2315 break;
2316 default:
2317 {
2318 /* Windows 11 arm64 (24H2) writes zeroes into these reserved registers. We ignore them. */
2319 if (offReg >= 0x7fe0 && offReg <= 0x7ffc)
2320 LogFlowFunc(("Bad guest writing to reserved GIC distributor register space [0x7fe0..0x7ffc] -- ignoring!"));
2321 else
2322 AssertReleaseMsgFailed(("offReg=%#x uValue=%#RX32\n", offReg, uValue));
2323 break;
2324 }
2325 }
2326
2327 return rcStrict;
2328}
2329
2330
2331/**
2332 * Reads a GIC redistributor register.
2333 *
2334 * @returns VBox status code.
2335 * @param pDevIns The device instance.
2336 * @param pVCpu The cross context virtual CPU structure.
2337 * @param idRedist The redistributor ID.
2338 * @param offReg The offset of the register being read.
2339 * @param puValue Where to store the register value.
2340 */
2341DECLINLINE(VBOXSTRICTRC) gicReDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint32_t idRedist, uint16_t offReg, uint32_t *puValue)
2342{
2343 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2344 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2345 Assert(idRedist == pVCpu->idCpu);
2346
2347 switch (offReg)
2348 {
2349 case GIC_REDIST_REG_TYPER_OFF:
2350 *puValue = (pVCpu->idCpu == pVM->cCpus - 1 ? GIC_REDIST_REG_TYPER_LAST : 0)
2351 | GIC_REDIST_REG_TYPER_CPU_NUMBER_SET(idRedist)
2352 | GIC_REDIST_REG_TYPER_CMN_LPI_AFF_SET(GIC_REDIST_REG_TYPER_CMN_LPI_AFF_ALL)
2353 | (pGicDev->fExtPpi ? GIC_REDIST_REG_TYPER_PPI_NUM_SET(pGicDev->uMaxExtPpi) : 0)
2354 | (pGicDev->fLpi ? GIC_REDIST_REG_TYPER_PLPIS : 0);
2355 Assert(!pGicDev->fExtPpi || pGicDev->uMaxExtPpi > 0);
2356 break;
2357 case GIC_REDIST_REG_WAKER_OFF:
2358 *puValue = 0;
2359 break;
2360 case GIC_REDIST_REG_IIDR_OFF:
2361 *puValue = GIC_REDIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2362 break;
2363 case GIC_REDIST_REG_TYPER_AFFINITY_OFF:
2364 *puValue = idRedist;
2365 break;
2366 case GIC_REDIST_REG_PIDR2_OFF:
2367 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2368 *puValue = GIC_REDIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2369 break;
2370 case GIC_REDIST_REG_CTLR_OFF:
2371 *puValue = (pGicDev->fEnableLpis ? GIC_REDIST_REG_CTLR_ENABLE_LPI : 0)
2372 | GIC_REDIST_REG_CTLR_CES_SET(1);
2373 break;
2374 case GIC_REDIST_REG_PROPBASER_OFF:
2375 *puValue = pGicDev->uLpiConfigBaseReg.s.Lo;
2376 break;
2377 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2378 *puValue = pGicDev->uLpiConfigBaseReg.s.Hi;
2379 break;
2380 case GIC_REDIST_REG_PENDBASER_OFF:
2381 *puValue = pGicDev->uLpiPendingBaseReg.s.Lo;
2382 break;
2383 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2384 *puValue = pGicDev->uLpiPendingBaseReg.s.Hi;
2385 break;
2386 default:
2387 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2388 *puValue = 0;
2389 break;
2390 }
2391 return VINF_SUCCESS;
2392}
2393
2394
2395/**
2396 * Reads a GIC redistributor SGI/PPI frame register.
2397 *
2398 * @returns VBox status code.
2399 * @param pDevIns The device instance.
2400 * @param pVCpu The cross context virtual CPU structure.
2401 * @param offReg The offset of the register being read.
2402 * @param puValue Where to store the register value.
2403 */
2404DECLINLINE(VBOXSTRICTRC) gicReDistReadSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
2405{
2406 VMCPU_ASSERT_EMT(pVCpu);
2407 RT_NOREF(pDevIns);
2408
2409 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2410 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2411 uint16_t const cbReg = sizeof(uint32_t);
2412
2413 /*
2414 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2415 */
2416 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2417 {
2418 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2419 return gicReDistReadIntrGroupReg(pGicDev, pGicCpu, idxReg, puValue);
2420 }
2421
2422 /*
2423 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2424 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2425 */
2426 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2427 {
2428 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2429 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2430 }
2431 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2432 {
2433 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLERnE_OFF_START) / cbReg;
2434 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2435 }
2436
2437 /*
2438 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2439 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2440 */
2441 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2442 {
2443 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2444 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2445 }
2446 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2447 {
2448 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2449 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2450 }
2451
2452 /*
2453 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2454 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2455 */
2456 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2457 {
2458 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2459 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2460 }
2461 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2462 {
2463 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2464 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2465 }
2466
2467 /*
2468 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2469 */
2470 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2471 {
2472 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2473 return gicReDistReadIntrPriorityReg(pGicDev, pGicCpu, idxReg, puValue);
2474 }
2475
2476 /*
2477 * GICR_ICFGR0, GICR_ICFGR1 and GICR_ICFGR<n>E.
2478 */
2479 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2480 {
2481 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2482 return gicReDistReadIntrConfigReg(pGicDev, pGicCpu, idxReg, puValue);
2483 }
2484
2485 AssertReleaseMsgFailed(("offReg=%#x (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2486 *puValue = 0;
2487 return VINF_SUCCESS;
2488}
2489
2490
2491/**
2492 * Writes a GIC redistributor frame register.
2493 *
2494 * @returns Strict VBox status code.
2495 * @param pDevIns The device instance.
2496 * @param pVCpu The cross context virtual CPU structure.
2497 * @param offReg The offset of the register being written.
2498 * @param uValue The register value.
2499 */
2500DECLINLINE(VBOXSTRICTRC) gicReDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2501{
2502 VMCPU_ASSERT_EMT(pVCpu);
2503 RT_NOREF(pVCpu, uValue);
2504
2505 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2506 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2507 switch (offReg)
2508 {
2509 case GIC_REDIST_REG_WAKER_OFF:
2510 Assert(uValue == 0);
2511 break;
2512 case GIC_REDIST_REG_CTLR_OFF:
2513 {
2514 /* Check if LPIs are supported and whether the enable LPI bit changed. */
2515 uint32_t const uOldCtlr = pGicDev->fEnableLpis ? GIC_REDIST_REG_CTLR_ENABLE_LPI : 0;
2516 uint32_t const uNewCtlr = uValue & GIC_REDIST_REG_CTLR_ENABLE_LPI;
2517 if ( pGicDev->fLpi
2518 && ((uNewCtlr ^ uOldCtlr) & GIC_REDIST_REG_CTLR_ENABLE_LPI))
2519 {
2520 pGicDev->fEnableLpis = RT_BOOL(uNewCtlr & GIC_REDIST_REG_CTLR_ENABLE_LPI);
2521 if (pGicDev->fEnableLpis)
2522 {
2523 gicDistReadLpiConfigTableFromMem(pDevIns);
2524 gicReDistReadLpiPendingBitmapFromMem(pDevIns, pVCpu, pGicDev);
2525 }
2526 else
2527 {
2528 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2529 RT_ZERO(pGicCpu->bmLpiPending);
2530 }
2531 }
2532 break;
2533 }
2534 case GIC_REDIST_REG_PROPBASER_OFF:
2535 pGicDev->uLpiConfigBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2536 break;
2537 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2538 pGicDev->uLpiConfigBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2539 break;
2540 case GIC_REDIST_REG_PENDBASER_OFF:
2541 pGicDev->uLpiPendingBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2542 break;
2543 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2544 pGicDev->uLpiPendingBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2545 break;
2546 default:
2547 AssertReleaseMsgFailed(("offReg=%#x (%s) uValue=%#RX32\n", offReg, gicReDistGetRegDescription(offReg), uValue));
2548 break;
2549 }
2550
2551 return rcStrict;
2552}
2553
2554
2555/**
2556 * Writes a GIC redistributor SGI/PPI frame register.
2557 *
2558 * @returns Strict VBox status code.
2559 * @param pDevIns The device instance.
2560 * @param pVCpu The cross context virtual CPU structure.
2561 * @param offReg The offset of the register being written.
2562 * @param uValue The register value.
2563 */
2564DECLINLINE(VBOXSTRICTRC) gicReDistWriteSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2565{
2566 VMCPU_ASSERT_EMT(pVCpu);
2567 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2568 uint16_t const cbReg = sizeof(uint32_t);
2569
2570 /*
2571 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2572 */
2573 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2574 {
2575 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2576 return gicReDistWriteIntrGroupReg(pGicDev, pVCpu, idxReg, uValue);
2577 }
2578
2579 /*
2580 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2581 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2582 */
2583 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2584 {
2585 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2586 return gicReDistWriteIntrSetEnableReg(pGicDev, pVCpu, idxReg, uValue);
2587 }
2588 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2589 {
2590 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF) / cbReg;
2591 return gicReDistWriteIntrClearEnableReg(pGicDev, pVCpu, idxReg, uValue);
2592 }
2593
2594 /*
2595 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2596 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2597 */
2598 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2599 {
2600 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2601 return gicReDistWriteIntrSetActiveReg(pGicDev, pVCpu, idxReg, uValue);
2602 }
2603 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2604 {
2605 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2606 return gicReDistWriteIntrClearActiveReg(pGicDev, pVCpu, idxReg, uValue);
2607 }
2608
2609 /*
2610 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2611 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2612 */
2613 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2614 {
2615 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2616 return gicReDistWriteIntrSetPendingReg(pGicDev, pVCpu, idxReg, uValue);
2617 }
2618 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2619 {
2620 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2621 return gicReDistWriteIntrClearPendingReg(pGicDev, pVCpu, idxReg, uValue);
2622 }
2623
2624 /*
2625 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2626 */
2627 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2628 {
2629 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2630 return gicReDistWriteIntrPriorityReg(pGicDev, pVCpu, idxReg, uValue);
2631 }
2632
2633 /*
2634 * GICR_ICFGR0, GIC_ICFGR1 and GICR_ICFGR<n>E.
2635 */
2636 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2637 {
2638 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2639 return gicReDistWriteIntrConfigReg(pGicDev, pVCpu, idxReg, uValue);
2640 }
2641
2642 AssertReleaseMsgFailed(("offReg=%#RX16 (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2643 return VERR_INTERNAL_ERROR_2;
2644}
2645
2646
2647/**
2648 * @interface_method_impl{PDMGICBACKEND,pfnSetSpi}
2649 */
2650static DECLCALLBACK(int) gicSetSpi(PVMCC pVM, uint32_t uSpiIntId, bool fAsserted)
2651{
2652 LogFlowFunc(("pVM=%p uSpiIntId=%u fAsserted=%RTbool\n",
2653 pVM, uSpiIntId, fAsserted));
2654
2655 PGIC pGic = VM_TO_GIC(pVM);
2656 PPDMDEVINS pDevIns = pGic->CTX_SUFF(pDevIns);
2657 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2658
2659#ifdef VBOX_WITH_STATISTICS
2660 PVMCPU pVCpu = VMMGetCpuById(pVM, 0);
2661 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSpi);
2662 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2663#endif
2664 STAM_PROFILE_START(&pGicCpu->StatProfSetSpi, a);
2665
2666 uint16_t const uIntId = GIC_INTID_RANGE_SPI_START + uSpiIntId;
2667 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
2668
2669 Assert(idxIntr >= GIC_INTID_RANGE_SPI_START);
2670 AssertMsgReturn(idxIntr < sizeof(pGicDev->bmIntrPending) * 8,
2671 ("out-of-range SPI interrupt ID %RU32 (%RU32)\n", uIntId, uSpiIntId),
2672 VERR_INVALID_PARAMETER);
2673
2674 GIC_CRIT_SECT_ENTER(pDevIns);
2675
2676 /* Update the interrupt pending state. */
2677 if (fAsserted)
2678 ASMBitSet(&pGicDev->bmIntrPending[0], idxIntr);
2679 else
2680 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
2681
2682 int const rc = VBOXSTRICTRC_VAL(gicDistUpdateIrqState(pVM, pGicDev));
2683 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSpi, a);
2684
2685 GIC_CRIT_SECT_LEAVE(pDevIns);
2686 return rc;
2687}
2688
2689
2690/**
2691 * @interface_method_impl{PDMGICBACKEND,pfnSetPpi}
2692 */
2693static DECLCALLBACK(int) gicSetPpi(PVMCPUCC pVCpu, uint32_t uPpiIntId, bool fAsserted)
2694{
2695 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uPpiIntId=%u fAsserted=%RTbool\n", pVCpu, pVCpu->idCpu, uPpiIntId, fAsserted));
2696
2697 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2698 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2699 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2700
2701 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetPpi);
2702 STAM_PROFILE_START(&pGicCpu->StatProfSetPpi, b);
2703
2704 uint32_t const uIntId = GIC_INTID_RANGE_PPI_START + uPpiIntId;
2705 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
2706
2707 Assert(idxIntr >= GIC_INTID_RANGE_PPI_START);
2708 AssertMsgReturn(idxIntr < sizeof(pGicCpu->bmIntrPending) * 8,
2709 ("out-of-range PPI interrupt ID %RU32 (%RU32)\n", uIntId, uPpiIntId),
2710 VERR_INVALID_PARAMETER);
2711
2712 GIC_CRIT_SECT_ENTER(pDevIns);
2713
2714 /* Update the interrupt pending state. */
2715 if (fAsserted)
2716 ASMBitSet(&pGicCpu->bmIntrPending[0], idxIntr);
2717 else
2718 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
2719
2720 int const rc = VBOXSTRICTRC_VAL(gicReDistUpdateIrqState(pGicDev, pVCpu));
2721 STAM_PROFILE_STOP(&pGicCpu->StatProfSetPpi, b);
2722
2723 GIC_CRIT_SECT_LEAVE(pDevIns);
2724 return rc;
2725}
2726
2727
2728/**
2729 * Sets the specified software generated interrupt (SGI).
2730 *
2731 * @returns Strict VBox status code.
2732 * @param pGicDev The GIC distributor state.
2733 * @param pVCpu The cross context virtual CPU structure.
2734 * @param pDestCpuSet Which CPUs to deliver the SGI to.
2735 * @param uIntId The SGI interrupt ID.
2736 */
2737static VBOXSTRICTRC gicSetSgi(PCGICDEV pGicDev, PVMCPUCC pVCpu, PCVMCPUSET pDestCpuSet, uint8_t uIntId)
2738{
2739 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uIntId=%u\n", pVCpu, pVCpu->idCpu, uIntId));
2740
2741 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2742 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2743 uint32_t const cCpus = pVM->cCpus;
2744 AssertReturn(uIntId <= GIC_INTID_RANGE_SGI_LAST, VERR_INVALID_PARAMETER);
2745 Assert(GIC_CRIT_SECT_IS_OWNER(pDevIns)); NOREF(pDevIns);
2746
2747 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
2748 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
2749 {
2750 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVM->CTX_SUFF(apCpus)[idCpu]);
2751 pGicCpu->bmIntrPending[0] |= RT_BIT_32(uIntId);
2752 }
2753
2754 return gicDistUpdateIrqState(pVM, pGicDev);
2755}
2756
2757
2758/**
2759 * Writes to the redistributor's SGI group 1 register (ICC_SGI1R_EL1).
2760 *
2761 * @returns Strict VBox status code.
2762 * @param pGicDev The GIC distributor state.
2763 * @param pVCpu The cross context virtual CPU structure.
2764 * @param uValue The value being written to the ICC_SGI1R_EL1 register.
2765 */
2766static VBOXSTRICTRC gicReDistWriteSgiReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint64_t uValue)
2767{
2768#ifdef VBOX_WITH_STATISTICS
2769 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2770 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSgi);
2771 STAM_PROFILE_START(&pGicCpu->StatProfSetSgi, c);
2772#else
2773 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2774#endif
2775
2776 VMCPUSET DestCpuSet;
2777 if (uValue & ARMV8_ICC_SGI1R_EL1_AARCH64_IRM)
2778 {
2779 /*
2780 * Deliver to all VCPUs but this one.
2781 */
2782 VMCPUSET_FILL(&DestCpuSet);
2783 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
2784 }
2785 else
2786 {
2787 /*
2788 * Target specific VCPUs.
2789 * See ARM GICv3 and GICv4 Software Overview spec 3.3 "Affinity routing".
2790 */
2791 VMCPUSET_EMPTY(&DestCpuSet);
2792 bool const fRangeSelSupport = RT_BOOL(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_RSS);
2793 uint8_t const idRangeStart = ARMV8_ICC_SGI1R_EL1_AARCH64_RS_GET(uValue) * 16;
2794 uint16_t const bmCpuInterfaces = ARMV8_ICC_SGI1R_EL1_AARCH64_TARGET_LIST_GET(uValue);
2795 uint8_t const uAff1 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF1_GET(uValue);
2796 uint8_t const uAff2 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF2_GET(uValue);
2797 uint8_t const uAff3 = (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_A3V)
2798 ? ARMV8_ICC_SGI1R_EL1_AARCH64_AFF3_GET(uValue)
2799 : 0;
2800 uint32_t const cCpus = pVCpu->CTX_SUFF(pVM)->cCpus;
2801 for (uint8_t idCpuInterface = 0; idCpuInterface < 16; idCpuInterface++)
2802 {
2803 if (bmCpuInterfaces & RT_BIT(idCpuInterface))
2804 {
2805 VMCPUID idCpuTarget;
2806 if (fRangeSelSupport)
2807 idCpuTarget = RT_MAKE_U32_FROM_U8(idRangeStart + idCpuInterface, uAff1, uAff2, uAff3);
2808 else
2809 idCpuTarget = gicGetCpuIdFromAffinity(idCpuInterface, uAff1, uAff2, uAff3);
2810 if (RT_LIKELY(idCpuTarget < cCpus))
2811 VMCPUSET_ADD(&DestCpuSet, idCpuTarget);
2812 else
2813 AssertReleaseMsgFailed(("VCPU ID out-of-bounds %RU32, must be < %u\n", idCpuTarget, cCpus));
2814 }
2815 }
2816 }
2817
2818 if (!VMCPUSET_IS_EMPTY(&DestCpuSet))
2819 {
2820 uint8_t const uSgiIntId = ARMV8_ICC_SGI1R_EL1_AARCH64_INTID_GET(uValue);
2821 Assert(GIC_IS_INTR_SGI(uSgiIntId));
2822 VBOXSTRICTRC const rcStrict = gicSetSgi(pGicDev, pVCpu, &DestCpuSet, uSgiIntId);
2823 Assert(RT_SUCCESS(rcStrict)); RT_NOREF_PV(rcStrict);
2824 }
2825
2826 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSgi, c);
2827 return VINF_SUCCESS;
2828}
2829
2830
2831/**
2832 * @interface_method_impl{PDMGICBACKEND,pfnReadSysReg}
2833 */
2834static DECLCALLBACK(VBOXSTRICTRC) gicReadSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
2835{
2836 /*
2837 * Validate.
2838 */
2839 VMCPU_ASSERT_EMT(pVCpu);
2840 Assert(pu64Value);
2841
2842 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegRead);
2843
2844 *pu64Value = 0;
2845 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2846 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2847 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2848
2849 GIC_CRIT_SECT_ENTER(pDevIns);
2850
2851 switch (u32Reg)
2852 {
2853 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2854 *pu64Value = pGicCpu->bIntrPriorityMask;
2855 break;
2856 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2857 AssertReleaseFailed();
2858 break;
2859 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2860 AssertReleaseFailed();
2861 break;
2862 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
2863 AssertReleaseFailed();
2864 break;
2865 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
2866 *pu64Value = ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup0);
2867 break;
2868 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
2869 AssertReleaseFailed();
2870 *pu64Value = pGicCpu->bmActivePriorityGroup0[0];
2871 break;
2872 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
2873 AssertReleaseFailed();
2874 *pu64Value = pGicCpu->bmActivePriorityGroup0[1];
2875 break;
2876 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
2877 AssertReleaseFailed();
2878 *pu64Value = pGicCpu->bmActivePriorityGroup0[2];
2879 break;
2880 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
2881 AssertReleaseFailed();
2882 *pu64Value = pGicCpu->bmActivePriorityGroup0[3];
2883 break;
2884 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
2885 AssertReleaseFailed();
2886 *pu64Value = pGicCpu->bmActivePriorityGroup1[0];
2887 break;
2888 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
2889 AssertReleaseFailed();
2890 *pu64Value = pGicCpu->bmActivePriorityGroup1[1];
2891 break;
2892 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
2893 AssertReleaseFailed();
2894 *pu64Value = pGicCpu->bmActivePriorityGroup1[2];
2895 break;
2896 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
2897 AssertReleaseFailed();
2898 *pu64Value = pGicCpu->bmActivePriorityGroup1[3];
2899 break;
2900 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
2901 AssertReleaseFailed();
2902 break;
2903 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
2904 AssertReleaseFailed();
2905 break;
2906 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
2907 *pu64Value = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
2908 break;
2909 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
2910 AssertReleaseFailed();
2911 break;
2912 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
2913 AssertReleaseFailed();
2914 break;
2915 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
2916 AssertReleaseFailed();
2917 break;
2918 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
2919 *pu64Value = gicAckHighestPriorityPendingIntr(pGicDev, pVCpu, false /*fGroup0*/, true /*fGroup1*/);
2920 break;
2921 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
2922 AssertReleaseFailed();
2923 break;
2924 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
2925 {
2926 AssertReleaseFailed();
2927 *pu64Value = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, false /*fGroup0*/, true /*fGroup1*/,
2928 NULL /*pidxIntr*/, NULL /*pbPriority*/);
2929 break;
2930 }
2931 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
2932 *pu64Value = ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup1);
2933 break;
2934 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
2935 *pu64Value = pGicCpu->uIccCtlr;
2936 break;
2937 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
2938 AssertReleaseFailed();
2939 break;
2940 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
2941 *pu64Value = pGicCpu->fIntrGroup0Enabled ? ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE : 0;
2942 break;
2943 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
2944 *pu64Value = pGicCpu->fIntrGroup1Enabled ? ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE : 0;
2945 break;
2946 default:
2947 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
2948 break;
2949 }
2950
2951 GIC_CRIT_SECT_LEAVE(pDevIns);
2952
2953 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} pu64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), *pu64Value));
2954 return VINF_SUCCESS;
2955}
2956
2957
2958/**
2959 * @interface_method_impl{PDMGICBACKEND,pfnWriteSysReg}
2960 */
2961static DECLCALLBACK(VBOXSTRICTRC) gicWriteSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
2962{
2963 /*
2964 * Validate.
2965 */
2966 VMCPU_ASSERT_EMT(pVCpu);
2967 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} u64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), u64Value));
2968
2969 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegWrite);
2970
2971 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2972 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2973 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2974
2975 GIC_CRIT_SECT_ENTER(pDevIns);
2976
2977 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2978 switch (u32Reg)
2979 {
2980 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2981 LogFlowFunc(("ICC_PMR_EL1: Interrupt priority now %u\n", (uint8_t)u64Value));
2982 pGicCpu->bIntrPriorityMask = (uint8_t)u64Value;
2983 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
2984 break;
2985 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2986 AssertReleaseFailed();
2987 break;
2988 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2989 AssertReleaseFailed();
2990 break;
2991 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
2992 AssertReleaseFailed();
2993 break;
2994 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
2995 pGicCpu->bBinaryPtGroup0 = (uint8_t)ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_GET(u64Value);
2996 break;
2997 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
2998 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
2999 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
3000 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
3001 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
3002 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
3003 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
3004 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
3005 /* Writes ignored, well behaving guest would write all 0s or the last read value of the register. */
3006 break;
3007 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
3008 AssertReleaseFailed();
3009 break;
3010 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
3011 AssertReleaseFailed();
3012 break;
3013 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
3014 AssertReleaseFailed();
3015 break;
3016 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
3017 {
3018 gicReDistWriteSgiReg(pGicDev, pVCpu, u64Value);
3019 break;
3020 }
3021 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
3022 AssertReleaseFailed();
3023 break;
3024 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
3025 AssertReleaseFailed();
3026 break;
3027 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
3028 AssertReleaseFailed();
3029 break;
3030 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
3031 {
3032 /*
3033 * We only support priority drop + interrupt deactivation with writes to this register.
3034 * This avoids an extra access which would be required by software for deactivation.
3035 */
3036 Assert(!(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_EOIMODE));
3037
3038 /*
3039 * Mark the interrupt as inactive, though it might still be pending.
3040 * It is up to the guest to ensure the interrupt ID belongs to the right group as
3041 * failure to do so results in unpredictable behavior.
3042 *
3043 * See ARM GIC spec. 12.2.10 "ICC_EOIR1_EL1, Interrupt Controller End Of Interrupt Register 1".
3044 * NOTE! The order of the 'if' checks below are crucial.
3045 */
3046 uint16_t const uIntId = (uint16_t)u64Value;
3047 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
3048 {
3049 /* SGIs and PPIs. */
3050 AssertCompile(GIC_INTID_RANGE_PPI_LAST < 8 * sizeof(pGicDev->bmIntrActive[0]));
3051 Assert(pGicDev->fAffRoutingEnabled);
3052 pGicCpu->bmIntrActive[0] &= ~RT_BIT_32(uIntId);
3053 }
3054 else if (uIntId <= GIC_INTID_RANGE_SPI_LAST)
3055 {
3056 /* SPIs. */
3057 uint16_t const idxIntr = /*gicDistGetIndexFromIntId*/(uIntId);
3058 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3059 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3060 }
3061 else if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
3062 {
3063 /* Special interrupt IDs, ignored. */
3064 Log(("Ignoring write to EOI with special interrupt ID.\n"));
3065 break;
3066 }
3067 else if (uIntId <= GIC_INTID_RANGE_EXT_PPI_LAST)
3068 {
3069 /* Extended PPIs. */
3070 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
3071 AssertReturn(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3072 ASMBitClear(&pGicCpu->bmIntrActive[0], idxIntr);
3073 }
3074 else if (uIntId <= GIC_INTID_RANGE_EXT_SPI_LAST)
3075 {
3076 /* Extended SPIs. */
3077 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
3078 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3079 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3080 }
3081 else
3082 {
3083 AssertMsgFailed(("Invalid INTID %u\n", uIntId));
3084 break;
3085 }
3086
3087 /*
3088 * Drop priority by restoring previous interrupt.
3089 */
3090 if (RT_LIKELY(pGicCpu->idxRunningPriority))
3091 {
3092 LogFlowFunc(("Restoring interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
3093 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
3094 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority - 1],
3095 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority - 1));
3096
3097 /*
3098 * Clear the interrupt priority from the active priorities bitmap.
3099 * It is up to the guest to ensure that writes to EOI registers are done in the exact
3100 * reverse order of the reads from the IAR registers.
3101 *
3102 * See ARM GIC spec 4.1.1 "Physical CPU interface".
3103 */
3104 uint8_t const idxPreemptionLevel = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] >> 1;
3105 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
3106 ASMBitClear(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
3107
3108 pGicCpu->idxRunningPriority--;
3109 Assert(pGicCpu->abRunningPriorities[0] == GIC_IDLE_PRIORITY);
3110 }
3111 else
3112 AssertReleaseMsgFailed(("Index of running-priority interrupt out-of-bounds %u\n", pGicCpu->idxRunningPriority));
3113 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
3114 break;
3115 }
3116 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
3117 AssertReleaseFailed();
3118 break;
3119 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
3120 pGicCpu->bBinaryPtGroup1 = (uint8_t)ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_GET(u64Value);
3121 break;
3122 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
3123 pGicCpu->uIccCtlr &= ARMV8_ICC_CTLR_EL1_RW;
3124 /** @todo */
3125 break;
3126 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
3127 AssertReleaseFailed();
3128 break;
3129 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
3130 pGicCpu->fIntrGroup0Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE);
3131 break;
3132 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
3133 pGicCpu->fIntrGroup1Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE);
3134 break;
3135 default:
3136 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
3137 break;
3138 }
3139
3140 GIC_CRIT_SECT_LEAVE(pDevIns);
3141 return rcStrict;
3142}
3143
3144
3145/**
3146 * Initializes the GIC distributor state.
3147 *
3148 * @param pDevIns The device instance.
3149 * @remarks This is also called during VM reset, so do NOT remove values that are
3150 * cleared to zero!
3151 */
3152static void gicInit(PPDMDEVINS pDevIns)
3153{
3154 LogFlowFunc(("\n"));
3155 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3156
3157 /* Distributor. */
3158 RT_ZERO(pGicDev->bmIntrGroup);
3159 RT_ZERO(pGicDev->bmIntrConfig);
3160 RT_ZERO(pGicDev->bmIntrEnabled);
3161 RT_ZERO(pGicDev->bmIntrPending);
3162 RT_ZERO(pGicDev->bmIntrActive);
3163 RT_ZERO(pGicDev->abIntrPriority);
3164 RT_ZERO(pGicDev->au32IntrRouting);
3165 RT_ZERO(pGicDev->bmIntrRoutingMode);
3166 pGicDev->fIntrGroup0Enabled = false;
3167 pGicDev->fIntrGroup1Enabled = false;
3168 pGicDev->fAffRoutingEnabled = true; /* GICv2 backwards compatibility is not implemented, so this is RA1/WI. */
3169
3170 /* GITS. */
3171 PGITSDEV pGitsDev = &pGicDev->Gits;
3172 gitsInit(pGitsDev);
3173
3174 /* LPIs. */
3175 RT_ZERO(pGicDev->abLpiConfig);
3176 pGicDev->uLpiConfigBaseReg.u = 0;
3177 pGicDev->uLpiPendingBaseReg.u = 0;
3178 pGicDev->fEnableLpis = false;
3179}
3180
3181
3182/**
3183 * Initialies the GIC redistributor and CPU interface state.
3184 *
3185 * @param pDevIns The device instance.
3186 * @param pVCpu The cross context virtual CPU structure.
3187 * @remarks This is also called during VM reset, so do NOT remove values that are
3188 * cleared to zero!
3189 */
3190static void gicInitCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3191{
3192 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3193 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3194 PGICCPU pGicCpu = &pVCpu->gic.s;
3195
3196 RT_ZERO(pGicCpu->bmIntrGroup);
3197 RT_ZERO(pGicCpu->bmIntrConfig);
3198 /* SGIs are always edge-triggered, writes to GICR_ICFGR0 are to be ignored. */
3199 pGicCpu->bmIntrConfig[0] = 0xaaaaaaaa;
3200 RT_ZERO(pGicCpu->bmIntrEnabled);
3201 RT_ZERO(pGicCpu->bmIntrPending);
3202 RT_ZERO(pGicCpu->bmIntrActive);
3203 RT_ZERO(pGicCpu->abIntrPriority);
3204
3205 pGicCpu->uIccCtlr = ARMV8_ICC_CTLR_EL1_AARCH64_PMHE
3206 | ARMV8_ICC_CTLR_EL1_AARCH64_PRIBITS_SET(4)
3207 | ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_SET(ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_16BITS)
3208 | (pGicDev->fRangeSel ? ARMV8_ICC_CTLR_EL1_AARCH64_RSS : 0)
3209 | (pGicDev->fAff3Levels ? ARMV8_ICC_CTLR_EL1_AARCH64_A3V : 0)
3210 | (pGicDev->fExtPpi || pGicDev->fExtSpi ? ARMV8_ICC_CTLR_EL1_AARCH64_EXTRANGE : 0);
3211
3212 pGicCpu->bIntrPriorityMask = 0; /* Means no interrupt gets through to the PE. */
3213 pGicCpu->idxRunningPriority = 0;
3214 memset((void *)&pGicCpu->abRunningPriorities[0], 0xff, sizeof(pGicCpu->abRunningPriorities));
3215 RT_ZERO(pGicCpu->bmActivePriorityGroup0);
3216 RT_ZERO(pGicCpu->bmActivePriorityGroup1);
3217 pGicCpu->bBinaryPtGroup0 = 0;
3218 pGicCpu->bBinaryPtGroup1 = 0;
3219 pGicCpu->fIntrGroup0Enabled = false;
3220 pGicCpu->fIntrGroup1Enabled = false;
3221 RT_ZERO(pGicCpu->bmLpiPending);
3222}
3223
3224
3225/**
3226 * Initializes per-VM GIC to the state following a power-up or hardware
3227 * reset.
3228 *
3229 * @param pDevIns The device instance.
3230 */
3231DECLHIDDEN(void) gicReset(PPDMDEVINS pDevIns)
3232{
3233 LogFlowFunc(("\n"));
3234 gicInit(pDevIns);
3235}
3236
3237
3238/**
3239 * Initializes per-VCPU GIC to the state following a power-up or hardware
3240 * reset.
3241 *
3242 * @param pDevIns The device instance.
3243 * @param pVCpu The cross context virtual CPU structure.
3244 */
3245DECLHIDDEN(void) gicResetCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3246{
3247 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3248 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3249 gicInitCpu(pDevIns, pVCpu);
3250}
3251
3252
3253/**
3254 * @callback_method_impl{FNIOMMMIONEWREAD}
3255 */
3256DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3257{
3258 NOREF(pvUser);
3259 Assert(!(off & 0x3));
3260 Assert(cb == 4); RT_NOREF_PV(cb);
3261
3262 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3263 uint16_t offReg = off & 0xfffc;
3264 uint32_t uValue = 0;
3265
3266 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3267
3268 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(gicDistReadRegister(pDevIns, pVCpu, offReg, &uValue));
3269 *(uint32_t *)pv = uValue;
3270
3271 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3272 return rc;
3273}
3274
3275
3276/**
3277 * @callback_method_impl{FNIOMMMIONEWWRITE}
3278 */
3279DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3280{
3281 NOREF(pvUser);
3282 Assert(!(off & 0x3));
3283 Assert(cb == 4); RT_NOREF_PV(cb);
3284
3285 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3286 uint16_t offReg = off & 0xfffc;
3287 uint32_t uValue = *(uint32_t *)pv;
3288
3289 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3290 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3291
3292 return gicDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3293}
3294
3295
3296/**
3297 * @callback_method_impl{FNIOMMMIONEWREAD}
3298 */
3299DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3300{
3301 NOREF(pvUser);
3302 Assert(!(off & 0x3));
3303 Assert(cb == 4); RT_NOREF_PV(cb);
3304
3305 /*
3306 * Determine the redistributor being targeted. Each redistributor takes
3307 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3308 * and the redistributors are adjacent.
3309 */
3310 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3311 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3312
3313 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
3314 Assert(idReDist < pVM->cCpus);
3315 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3316
3317 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3318
3319 /* Redistributor or SGI/PPI frame? */
3320 uint16_t const offReg = off & 0xfffc;
3321 uint32_t uValue = 0;
3322 VBOXSTRICTRC rcStrict;
3323 if (off < GIC_REDIST_REG_FRAME_SIZE)
3324 rcStrict = gicReDistReadRegister(pDevIns, pVCpu, idReDist, offReg, &uValue);
3325 else
3326 rcStrict = gicReDistReadSgiPpiRegister(pDevIns, pVCpu, offReg, &uValue);
3327
3328 *(uint32_t *)pv = uValue;
3329 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3330 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3331 return rcStrict;
3332}
3333
3334
3335/**
3336 * @callback_method_impl{FNIOMMMIONEWWRITE}
3337 */
3338DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3339{
3340 NOREF(pvUser);
3341 Assert(!(off & 0x3));
3342 Assert(cb == 4); RT_NOREF_PV(cb);
3343
3344 uint32_t uValue = *(uint32_t *)pv;
3345
3346 /*
3347 * Determine the redistributor being targeted. Each redistributor takes
3348 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3349 * and the redistributors are adjacent.
3350 */
3351 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3352 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3353
3354 PCVMCC pVM = PDMDevHlpGetVM(pDevIns);
3355 Assert(idReDist < pVM->cCpus);
3356 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3357
3358 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3359
3360 /* Redistributor or SGI/PPI frame? */
3361 uint16_t const offReg = off & 0xfffc;
3362 VBOXSTRICTRC rcStrict;
3363 if (off < GIC_REDIST_REG_FRAME_SIZE)
3364 rcStrict = gicReDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3365 else
3366 rcStrict = gicReDistWriteSgiPpiRegister(pDevIns, pVCpu, offReg, uValue);
3367
3368 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3369 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3370 return rcStrict;
3371}
3372
3373
3374/**
3375 * @callback_method_impl{FNIOMMMIONEWREAD}
3376 */
3377DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3378{
3379 RT_NOREF_PV(pvUser);
3380 Assert(!(off & 0x3));
3381 Assert(cb == 8 || cb == 4);
3382
3383 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
3384 PCGITSDEV pGitsDev = &pGicDev->Gits;
3385 uint64_t uReg;
3386 if (off < GITS_REG_FRAME_SIZE)
3387 {
3388 /* Control registers space. */
3389 uint16_t const offReg = off & 0xfffc;
3390 uReg = gitsMmioReadCtrl(pGitsDev, offReg, cb);
3391 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uReg));
3392 }
3393 else
3394 {
3395 /* Translation registers space. */
3396 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3397 uReg = gitsMmioReadTranslate(pGitsDev, offReg, cb);
3398 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uReg));
3399 }
3400
3401 if (cb == 8)
3402 *(uint64_t *)pv = uReg;
3403 else
3404 *(uint32_t *)pv = uReg;
3405 return VINF_SUCCESS;
3406}
3407
3408
3409/**
3410 * @callback_method_impl{FNIOMMMIONEWWRITE}
3411 */
3412DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3413{
3414 RT_NOREF_PV(pvUser);
3415 Assert(!(off & 0x3));
3416 Assert(cb == 8 || cb == 4);
3417
3418 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3419 PGITSDEV pGitsDev = &pGicDev->Gits;
3420
3421 uint64_t const uValue = cb == 8 ? *(uint64_t *)pv : *(uint32_t *)pv;
3422 if (off < GITS_REG_FRAME_SIZE)
3423 {
3424 /* Control registers space. */
3425 uint16_t const offReg = off & 0xfffc;
3426 gitsMmioWriteCtrl(pDevIns, pGitsDev, offReg, uValue, cb);
3427 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uValue));
3428 }
3429 else
3430 {
3431 /* Translation registers space. */
3432 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3433 gitsMmioWriteTranslate(pGitsDev, offReg, uValue, cb);
3434 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uValue));
3435 }
3436 return VINF_SUCCESS;
3437}
3438
3439
3440/**
3441 * GIC device registration structure.
3442 */
3443const PDMDEVREG g_DeviceGIC =
3444{
3445 /* .u32Version = */ PDM_DEVREG_VERSION,
3446 /* .uReserved0 = */ 0,
3447 /* .szName = */ "gic",
3448 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
3449 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
3450 /* .cMaxInstances = */ 1,
3451 /* .uSharedVersion = */ 42,
3452 /* .cbInstanceShared = */ sizeof(GICDEV),
3453 /* .cbInstanceCC = */ 0,
3454 /* .cbInstanceRC = */ 0,
3455 /* .cMaxPciDevices = */ 0,
3456 /* .cMaxMsixVectors = */ 0,
3457 /* .pszDescription = */ "Generic Interrupt Controller",
3458#if defined(IN_RING3)
3459 /* .szRCMod = */ "VMMRC.rc",
3460 /* .szR0Mod = */ "VMMR0.r0",
3461 /* .pfnConstruct = */ gicR3Construct,
3462 /* .pfnDestruct = */ gicR3Destruct,
3463 /* .pfnRelocate = */ NULL,
3464 /* .pfnMemSetup = */ NULL,
3465 /* .pfnPowerOn = */ NULL,
3466 /* .pfnReset = */ gicR3Reset,
3467 /* .pfnSuspend = */ NULL,
3468 /* .pfnResume = */ NULL,
3469 /* .pfnAttach = */ NULL,
3470 /* .pfnDetach = */ NULL,
3471 /* .pfnQueryInterface = */ NULL,
3472 /* .pfnInitComplete = */ NULL,
3473 /* .pfnPowerOff = */ NULL,
3474 /* .pfnSoftReset = */ NULL,
3475 /* .pfnReserved0 = */ NULL,
3476 /* .pfnReserved1 = */ NULL,
3477 /* .pfnReserved2 = */ NULL,
3478 /* .pfnReserved3 = */ NULL,
3479 /* .pfnReserved4 = */ NULL,
3480 /* .pfnReserved5 = */ NULL,
3481 /* .pfnReserved6 = */ NULL,
3482 /* .pfnReserved7 = */ NULL,
3483#elif defined(IN_RING0)
3484 /* .pfnEarlyConstruct = */ NULL,
3485 /* .pfnConstruct = */ NULL,
3486 /* .pfnDestruct = */ NULL,
3487 /* .pfnFinalDestruct = */ NULL,
3488 /* .pfnRequest = */ NULL,
3489 /* .pfnReserved0 = */ NULL,
3490 /* .pfnReserved1 = */ NULL,
3491 /* .pfnReserved2 = */ NULL,
3492 /* .pfnReserved3 = */ NULL,
3493 /* .pfnReserved4 = */ NULL,
3494 /* .pfnReserved5 = */ NULL,
3495 /* .pfnReserved6 = */ NULL,
3496 /* .pfnReserved7 = */ NULL,
3497#elif defined(IN_RC)
3498 /* .pfnConstruct = */ NULL,
3499 /* .pfnReserved0 = */ NULL,
3500 /* .pfnReserved1 = */ NULL,
3501 /* .pfnReserved2 = */ NULL,
3502 /* .pfnReserved3 = */ NULL,
3503 /* .pfnReserved4 = */ NULL,
3504 /* .pfnReserved5 = */ NULL,
3505 /* .pfnReserved6 = */ NULL,
3506 /* .pfnReserved7 = */ NULL,
3507#else
3508# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3509#endif
3510 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3511};
3512
3513
3514/**
3515 * The VirtualBox GIC backend.
3516 */
3517const PDMGICBACKEND g_GicBackend =
3518{
3519 /* .pfnReadSysReg = */ gicReadSysReg,
3520 /* .pfnWriteSysReg = */ gicWriteSysReg,
3521 /* .pfnSetSpi = */ gicSetSpi,
3522 /* .pfnSetPpi = */ gicSetPpi,
3523 /* .pfnSendMsi = */ gitsSendMsi,
3524};
3525
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette