VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GICAll.cpp

Last change on this file was 109003, checked in by vboxsync, 4 days ago

VMM/GIC: bugref:10877 Use LPI pending table in each redistributor and increase its size.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 145.7 KB
Line 
1/* $Id: GICAll.cpp 109003 2025-04-16 12:50:56Z vboxsync $ */
2/** @file
3 * GIC - Generic Interrupt Controller Architecture (GIC) - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_gic GIC - Generic Interrupt Controller
29 *
30 * The GIC is an interrupt controller device that lives in VMM but also registers
31 * itself with PDM similar to the APIC. The reason for this is needs to access
32 * per-VCPU data and is an integral part of any ARMv8 VM.
33 *
34 * The GIC is made up of 3 main components:
35 * - Distributor
36 * - Redistributor
37 * - Interrupt Translation Service (ITS)
38 *
39 * The distributor is per-VM while the redistributors are per-VCPU. PEs (Processing
40 * Elements) and CIs (CPU Interfaces) correspond to VCPUs. The distributor and
41 * redistributor each have their memory mapped I/O regions. The redistributor is
42 * accessible via CPU system registers as well. The distributor and redistributor
43 * code lives in GICAll.cpp and GICR3.cpp.
44 *
45 * The ITS is the interrupt translation service component of the GIC and its
46 * presence is optional. It provides MSI support along with routing interrupt
47 * sources to specific PEs. The ITS is only accessible via its memory mapped I/O
48 * region. When the MMIO handle for the its region is NIL_IOMMMIOHANDLE it's
49 * considered to be disabled for the VM. Most of the ITS code lives in GITSAll.cpp.
50 *
51 * This implementation only targets GICv3. This implementation does not support
52 * dual security states, nor does it support exception levels (EL2, EL3). Earlier
53 * versions are considered legacy and not important enough to be emulated.
54 * GICv4 primarily adds support for virtualizing the GIC and its necessity will be
55 * evaluated in the future if/when there is support for nested virtualization on
56 * ARMv8 hosts.
57 */
58
59
60/*********************************************************************************************************************************
61* Header Files *
62*********************************************************************************************************************************/
63#define LOG_GROUP LOG_GROUP_DEV_GIC
64#include "GICInternal.h"
65#include <VBox/vmm/pdmgic.h>
66#include <VBox/vmm/pdmdev.h>
67#include <VBox/vmm/pdmapi.h>
68#include <VBox/vmm/vmcc.h>
69#include <VBox/vmm/vmm.h>
70#include <VBox/vmm/vmcpuset.h>
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76#define GIC_IDLE_PRIORITY 0xff
77#define GIC_IS_INTR_SGI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SGI_START < GIC_INTID_SGI_RANGE_SIZE)
78#define GIC_IS_INTR_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_PPI_START < GIC_INTID_PPI_RANGE_SIZE)
79#define GIC_IS_INTR_SGI_OR_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SGI_START < GIC_INTID_PPI_RANGE_SIZE)
80#define GIC_IS_INTR_SPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SPI_START < GIC_INTID_SPI_RANGE_SIZE)
81#define GIC_IS_INTR_SPECIAL(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SPECIAL_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
82#define GIC_IS_INTR_EXT_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
83#define GIC_IS_INTR_EXT_SPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
84#define GIC_IS_REG_IN_RANGE(a_offReg, a_offFirst, a_cbRegion) ((uint32_t)(a_offReg) - (a_offFirst) < (a_cbRegion))
85
86
87#ifdef LOG_ENABLED
88/**
89 * Gets the description of a CPU interface register.
90 *
91 * @returns The description.
92 * @param u32Reg The CPU interface register offset.
93 */
94static const char *gicIccGetRegDescription(uint32_t u32Reg)
95{
96 switch (u32Reg)
97 {
98#define GIC_ICC_REG_CASE(a_Reg) case ARMV8_AARCH64_SYSREG_ ## a_Reg: return #a_Reg
99 GIC_ICC_REG_CASE(ICC_PMR_EL1);
100 GIC_ICC_REG_CASE(ICC_IAR0_EL1);
101 GIC_ICC_REG_CASE(ICC_EOIR0_EL1);
102 GIC_ICC_REG_CASE(ICC_HPPIR0_EL1);
103 GIC_ICC_REG_CASE(ICC_BPR0_EL1);
104 GIC_ICC_REG_CASE(ICC_AP0R0_EL1);
105 GIC_ICC_REG_CASE(ICC_AP0R1_EL1);
106 GIC_ICC_REG_CASE(ICC_AP0R2_EL1);
107 GIC_ICC_REG_CASE(ICC_AP0R3_EL1);
108 GIC_ICC_REG_CASE(ICC_AP1R0_EL1);
109 GIC_ICC_REG_CASE(ICC_AP1R1_EL1);
110 GIC_ICC_REG_CASE(ICC_AP1R2_EL1);
111 GIC_ICC_REG_CASE(ICC_AP1R3_EL1);
112 GIC_ICC_REG_CASE(ICC_DIR_EL1);
113 GIC_ICC_REG_CASE(ICC_RPR_EL1);
114 GIC_ICC_REG_CASE(ICC_SGI1R_EL1);
115 GIC_ICC_REG_CASE(ICC_ASGI1R_EL1);
116 GIC_ICC_REG_CASE(ICC_SGI0R_EL1);
117 GIC_ICC_REG_CASE(ICC_IAR1_EL1);
118 GIC_ICC_REG_CASE(ICC_EOIR1_EL1);
119 GIC_ICC_REG_CASE(ICC_HPPIR1_EL1);
120 GIC_ICC_REG_CASE(ICC_BPR1_EL1);
121 GIC_ICC_REG_CASE(ICC_CTLR_EL1);
122 GIC_ICC_REG_CASE(ICC_SRE_EL1);
123 GIC_ICC_REG_CASE(ICC_IGRPEN0_EL1);
124 GIC_ICC_REG_CASE(ICC_IGRPEN1_EL1);
125#undef GIC_ICC_REG_CASE
126 default:
127 return "<UNKNOWN>";
128 }
129}
130
131
132/**
133 * Gets the description of a distributor register given it's register offset.
134 *
135 * @returns The register description.
136 * @param offReg The distributor register offset.
137 */
138static const char *gicDistGetRegDescription(uint16_t offReg)
139{
140 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE)) return "GICD_IGROUPRn";
141 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE)) return "GICD_IGROUPRnE";
142 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE)) return "GICD_IROUTERn";
143 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE)) return "GICD_IROUTERnE";
144 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE)) return "GICD_ISENABLERn";
145 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE)) return "GICD_ISENABLERnE";
146 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE)) return "GICD_ICENABLERn";
147 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE)) return "GICD_ICENABLERnE";
148 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE)) return "GICD_ISACTIVERn";
149 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE)) return "GICD_ISACTIVERnE";
150 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE)) return "GICD_ICACTIVERn";
151 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE)) return "GICD_ICACTIVERnE";
152 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE)) return "GICD_IPRIORITYRn";
153 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICD_IPRIORITYRnE";
154 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE)) return "GICD_ISPENDRn";
155 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE)) return "GICD_ISPENDRnE";
156 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE)) return "GICD_ICPENDRn";
157 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE)) return "GICD_ICPENDRnE";
158 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE)) return "GICD_ICFGRn";
159 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE)) return "GICD_ICFGRnE";
160 switch (offReg)
161 {
162 case GIC_DIST_REG_CTLR_OFF: return "GICD_CTLR";
163 case GIC_DIST_REG_TYPER_OFF: return "GICD_TYPER";
164 case GIC_DIST_REG_STATUSR_OFF: return "GICD_STATUSR";
165 case GIC_DIST_REG_ITARGETSRn_OFF_START: return "GICD_ITARGETSRn";
166 case GIC_DIST_REG_IGRPMODRn_OFF_START: return "GICD_IGRPMODRn";
167 case GIC_DIST_REG_NSACRn_OFF_START: return "GICD_NSACRn";
168 case GIC_DIST_REG_SGIR_OFF: return "GICD_SGIR";
169 case GIC_DIST_REG_CPENDSGIRn_OFF_START: return "GICD_CSPENDSGIRn";
170 case GIC_DIST_REG_SPENDSGIRn_OFF_START: return "GICD_SPENDSGIRn";
171 case GIC_DIST_REG_INMIn_OFF_START: return "GICD_INMIn";
172 case GIC_DIST_REG_PIDR2_OFF: return "GICD_PIDR2";
173 case GIC_DIST_REG_IIDR_OFF: return "GICD_IIDR";
174 case GIC_DIST_REG_TYPER2_OFF: return "GICD_TYPER2";
175 default:
176 return "<UNKNOWN>";
177 }
178}
179#endif /* LOG_ENABLED */
180
181
182/**
183 * Gets the description of a redistributor register given it's register offset.
184 *
185 * @returns The register description.
186 * @param offReg The redistributor register offset.
187 */
188static const char *gicReDistGetRegDescription(uint16_t offReg)
189{
190 switch (offReg)
191 {
192 case GIC_REDIST_REG_CTLR_OFF: return "GICR_CTLR";
193 case GIC_REDIST_REG_IIDR_OFF: return "GICR_IIDR";
194 case GIC_REDIST_REG_TYPER_OFF: return "GICR_TYPER";
195 case GIC_REDIST_REG_TYPER_AFFINITY_OFF: return "GICR_TYPER_AFF";
196 case GIC_REDIST_REG_STATUSR_OFF: return "GICR_STATUSR";
197 case GIC_REDIST_REG_WAKER_OFF: return "GICR_WAKER";
198 case GIC_REDIST_REG_MPAMIDR_OFF: return "GICR_MPAMIDR";
199 case GIC_REDIST_REG_PARTIDR_OFF: return "GICR_PARTIDR";
200 case GIC_REDIST_REG_SETLPIR_OFF: return "GICR_SETLPIR";
201 case GIC_REDIST_REG_CLRLPIR_OFF: return "GICR_CLRLPIR";
202 case GIC_REDIST_REG_PROPBASER_OFF: return "GICR_PROPBASER";
203 case GIC_REDIST_REG_PENDBASER_OFF: return "GICR_PENDBASER";
204 case GIC_REDIST_REG_INVLPIR_OFF: return "GICR_INVLPIR";
205 case GIC_REDIST_REG_INVALLR_OFF: return "GICR_INVALLR";
206 case GIC_REDIST_REG_SYNCR_OFF: return "GICR_SYNCR";
207 case GIC_REDIST_REG_PIDR2_OFF: return "GICR_PIDR2";
208 default:
209 return "<UNKNOWN>";
210 }
211}
212
213
214/**
215 * Gets the description of an SGI/PPI redistributor register given it's register
216 * offset.
217 *
218 * @returns The register description.
219 * @param offReg The redistributor register offset.
220 */
221static const char *gicReDistGetSgiPpiRegDescription(uint16_t offReg)
222{
223 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE)) return "GICR_IGROUPn";
224 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE)) return "GICR_ISENABLERn";
225 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE)) return "GICR_ICENABLERn";
226 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE)) return "GICR_ISACTIVERn";
227 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE)) return "GICR_ICACTIVERn";
228 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE)) return "GICR_ISPENDRn";
229 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE)) return "GICR_ICPENDRn";
230 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICR_IPREIORITYn";
231 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE)) return "GICR_ICFGRn";
232 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_INMIR0_OFF, GIC_REDIST_SGI_PPI_REG_INMIRnE_RANGE_SIZE)) return "GICR_INMIRn";
233 switch (offReg)
234 {
235 case GIC_REDIST_SGI_PPI_REG_NSACR_OFF: return "GICR_NSACR";
236 case GIC_REDIST_SGI_PPI_REG_IGRPMODR0_OFF: return "GICR_IGRPMODR0";
237 case GIC_REDIST_SGI_PPI_REG_IGRPMODR1E_OFF: return "GICR_IGRPMODR1E";
238 case GIC_REDIST_SGI_PPI_REG_IGRPMODR2E_OFF: return "GICR_IGRPMODR2E";
239 default:
240 return "<UNKNOWN>";
241 }
242}
243
244
245/**
246 * Gets the interrupt ID given a distributor interrupt index.
247 *
248 * @returns The interrupt ID.
249 * @param idxIntr The distributor interrupt index.
250 * @remarks A distributor interrupt is an interrupt type that belong in the
251 * distributor (e.g. SPIs, extended SPIs).
252 */
253DECLHIDDEN(uint16_t) gicDistGetIntIdFromIndex(uint16_t idxIntr)
254{
255 /*
256 * Distributor interrupts bits to interrupt ID mapping:
257 * +--------------------------------------------------------+
258 * | Range (incl) | SGI | PPI | SPI | Ext SPI |
259 * |--------------+--------+--------+----------+------------|
260 * | Bit | 0..15 | 16..31 | 32..1023 | 1024..2047 |
261 * | Int Id | 0..15 | 16..31 | 32..1023 | 4096..5119 |
262 * +--------------------------------------------------------+
263 */
264 uint16_t uIntId;
265 /* SGIs, PPIs, SPIs and specials. */
266 if (idxIntr < 1024)
267 uIntId = idxIntr;
268 /* Extended SPIs. */
269 else if (idxIntr < 2048)
270 uIntId = GIC_INTID_RANGE_EXT_SPI_START + idxIntr - 1024;
271 else
272 {
273 uIntId = 0;
274 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
275 }
276 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
277 || GIC_IS_INTR_SPI(uIntId)
278 || GIC_IS_INTR_SPECIAL(uIntId)
279 || GIC_IS_INTR_EXT_SPI(uIntId));
280 return uIntId;
281}
282
283
284/**
285 * Gets the distributor interrupt index given an interrupt ID.
286 *
287 * @returns The distributor interrupt index.
288 * @param uIntId The interrupt ID.
289 * @remarks A distributor interrupt is an interrupt type that belong in the
290 * distributor (e.g. SPIs, extended SPIs).
291 */
292static uint16_t gicDistGetIndexFromIntId(uint16_t uIntId)
293{
294 uint16_t idxIntr;
295 /* SGIs, PPIs, SPIs and specials. */
296 if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
297 idxIntr = uIntId;
298 /* Extended SPIs. */
299 else if (uIntId - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
300 idxIntr = 1024 + uIntId - GIC_INTID_RANGE_EXT_SPI_START;
301 else
302 {
303 idxIntr = 0;
304 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
305 }
306 Assert(idxIntr < sizeof(GICDEV::bmIntrPending) * 8);
307 return idxIntr;
308}
309
310
311/**
312 * Gets the interrupt ID given a redistributor interrupt index.
313 *
314 * @returns The interrupt ID.
315 * @param idxIntr The redistributor interrupt index.
316 * @remarks A redistributor interrupt is an interrupt type that belong in the
317 * redistributor (e.g. SGIs, PPIs, extended PPIs).
318 */
319DECLHIDDEN(uint16_t) gicReDistGetIntIdFromIndex(uint16_t idxIntr)
320{
321 /*
322 * Redistributor interrupts bits to interrupt ID mapping:
323 * +---------------------------------------------+
324 * | Range (incl) | SGI | PPI | Ext PPI |
325 * +---------------------------------------------+
326 * | Bit | 0..15 | 16..31 | 32..95 |
327 * | Int Id | 0..15 | 16..31 | 1056..1119 |
328 * +---------------------------------------------+
329 */
330 uint16_t uIntId;
331 /* SGIs and PPIs. */
332 if (idxIntr < 32)
333 uIntId = idxIntr;
334 /* Extended PPIs. */
335 else if (idxIntr < 96)
336 uIntId = GIC_INTID_RANGE_EXT_PPI_START + idxIntr - 32;
337 else
338 {
339 uIntId = 0;
340 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
341 }
342 Assert(GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId));
343 return uIntId;
344}
345
346
347/**
348 * Gets the redistributor interrupt index given an interrupt ID.
349 *
350 * @returns The interrupt ID.
351 * @param uIntId The interrupt ID.
352 * @remarks A redistributor interrupt is an interrupt type that belong in the
353 * redistributor (e.g. SGIs, PPIs, extended PPIs).
354 */
355static uint16_t gicReDistGetIndexFromIntId(uint16_t uIntId)
356{
357 /* SGIs and PPIs. */
358 uint16_t idxIntr;
359 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
360 idxIntr = uIntId;
361 /* Extended PPIs. */
362 else if (uIntId - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
363 idxIntr = 32 + uIntId - GIC_INTID_RANGE_EXT_PPI_START;
364 else
365 {
366 idxIntr = 0;
367 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
368 }
369 Assert(idxIntr < sizeof(GICCPU::bmIntrPending) * 8);
370 return idxIntr;
371}
372
373
374/**
375 * Sets the interrupt pending force-flag and pokes the EMT if required.
376 *
377 * @param pVCpu The cross context virtual CPU structure.
378 * @param fIrq Flag whether to assert the IRQ line or leave it alone.
379 * @param fFiq Flag whether to assert the FIQ line or leave it alone.
380 */
381static void gicSetInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
382{
383 Assert(fIrq || fFiq);
384 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
385
386#ifdef IN_RING3
387 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
388 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
389#endif
390
391 if (fIrq)
392 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
393 if (fFiq)
394 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
395
396 /*
397 * We need to wake up the target CPU if we're not on EMT.
398 */
399 /** @todo We could just use RTThreadNativeSelf() here, couldn't we? */
400#if defined(IN_RING0)
401# error "Implement me!"
402#elif defined(IN_RING3)
403 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
404 VMCPUID idCpu = pVCpu->idCpu;
405 if (VMMGetCpuId(pVM) != idCpu)
406 {
407 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
408 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
409 }
410#endif
411}
412
413
414/**
415 * Clears the interrupt pending force-flag.
416 *
417 * @param pVCpu The cross context virtual CPU structure.
418 * @param fIrq Flag whether to clear the IRQ flag.
419 * @param fFiq Flag whether to clear the FIQ flag.
420 */
421DECLINLINE(void) gicClearInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
422{
423 Assert(fIrq || fFiq);
424 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
425
426#ifdef IN_RING3
427 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
428 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
429#endif
430
431 if (fIrq)
432 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
433 if (fFiq)
434 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
435}
436
437
438/**
439 * Updates the interrupt force-flag.
440 *
441 * @param pVCpu The cross context virtual CPU structure.
442 * @param fIrq Flag whether to clear the IRQ flag.
443 * @param fFiq Flag whether to clear the FIQ flag.
444 */
445DECLINLINE(void) gicUpdateInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
446{
447 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
448
449 if (fIrq || fFiq)
450 gicSetInterruptFF(pVCpu, fIrq, fFiq);
451
452 if (!fIrq || !fFiq)
453 gicClearInterruptFF(pVCpu, !fIrq, !fFiq);
454}
455
456
457/**
458 * Gets whether the redistributor has pending interrupts with sufficient priority to
459 * be signalled to the PE.
460 *
461 * @param pGicCpu The GIC redistributor and CPU interface state.
462 * @param pfIrq Where to store whether IRQs can be signalled.
463 * @param pfFiq Where to store whether FIQs can be signalled.
464 */
465static void gicReDistHasIrqPending(PCGICCPU pGicCpu, bool *pfIrq, bool *pfFiq)
466{
467 bool const fIsGroup1Enabled = pGicCpu->fIntrGroup1Enabled;
468 bool const fIsGroup0Enabled = pGicCpu->fIntrGroup0Enabled;
469 LogFlowFunc(("fIsGroup0Enabled=%RTbool fIsGroup1Enabled=%RTbool\n", fIsGroup0Enabled, fIsGroup1Enabled));
470
471# if 1
472 uint32_t bmIntrs[3];
473 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
474 {
475 /* Collect interrupts that are pending, enabled and inactive. */
476 bmIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
477
478 /* Discard interrupts if the group they belong to is disabled. */
479 if (!fIsGroup1Enabled)
480 bmIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
481 if (!fIsGroup0Enabled)
482 bmIntrs[i] &= pGicCpu->bmIntrGroup[i];
483 }
484
485 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
486 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
487 AssertCompile(!(cIntrs % 32));
488 if (idxIntr >= 0)
489 {
490 /* Only allow interrupts with higher priority than the current configured and running one. */
491 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
492 do
493 {
494 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
495 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
496 {
497 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
498 bool const fInGroup0 = !fInGroup1;
499 *pfIrq = fInGroup1 && fIsGroup1Enabled;
500 *pfFiq = fInGroup0 && fIsGroup0Enabled;
501 return;
502 }
503 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
504 } while (idxIntr != -1);
505 }
506#else /** @todo Measure and pick the faster version. */
507 /* Only allow interrupts with higher priority than the current configured and running one. */
508 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
509
510 for (uint8_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
511 {
512 /* Collect interrupts that are pending, enabled and inactive. */
513 uint32_t bmIntr = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
514
515 /* Discard interrupts if the group they belong to is disabled. */
516 if (!fIsGroup1Enabled)
517 bmIntr &= ~pGicCpu->bmIntrGroup[i];
518 if (!fIsGroup0Enabled)
519 bmIntr &= pGicCpu->bmIntrGroup[i];
520
521 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
522 uint16_t const idxPending = ASMBitFirstSetU32(bmIntr);
523 if (idxPending > 0)
524 {
525 uint16_t const idxIntr = 32 * i + idxPending - 1;
526 AssertRelease(idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
527 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
528 {
529 AssertRelease(idxIntr < sizeof(pGicCpu->bmIntrGroup) * 8);
530 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
531 bool const fInGroup0 = !fInGroup1;
532 *pfIrq = fInGroup1 && fIsGroup1Enabled;
533 *pfFiq = fInGroup0 && fIsGroup0Enabled;
534 return;
535 }
536 }
537 }
538#endif
539 *pfIrq = false;
540 *pfFiq = false;
541}
542
543
544/**
545 * Gets whether the distributor has pending interrupts with sufficient priority to
546 * be signalled to the PE.
547 *
548 * @param pGicDev The GIC distributor state.
549 * @param pVCpu The cross context virtual CPU structure.
550 * @param idCpu The ID of the virtual CPU.
551 * @param pfIrq Where to store whether there are IRQs can be signalled.
552 * @param pfFiq Where to store whether there are FIQs can be signalled.
553 */
554static void gicDistHasIrqPendingForVCpu(PCGICDEV pGicDev, PCVMCPUCC pVCpu, VMCPUID idCpu, bool *pfIrq, bool *pfFiq)
555{
556 bool const fIsGroup1Enabled = pGicDev->fIntrGroup1Enabled;
557 bool const fIsGroup0Enabled = pGicDev->fIntrGroup0Enabled;
558 LogFlowFunc(("fIsGroup1Enabled=%RTbool fIsGroup0Enabled=%RTbool\n", fIsGroup1Enabled, fIsGroup0Enabled));
559
560#if 1
561 uint32_t bmIntrs[64];
562 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
563 {
564 /* Collect interrupts that are pending, enabled and inactive. */
565 bmIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
566
567 /* Discard interrupts if the group they belong to is disabled. */
568 if (!fIsGroup1Enabled)
569 bmIntrs[i] &= ~pGicDev->bmIntrGroup[i];
570 if (!fIsGroup0Enabled)
571 bmIntrs[i] &= pGicDev->bmIntrGroup[i];
572 }
573
574 /*
575 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
576 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
577 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
578 * routing is enabled (which it currently is always enabled in our implementation).
579 */
580 Assert(pGicDev->fAffRoutingEnabled);
581 Assert(bmIntrs[0] == 0);
582 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
583 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
584 AssertCompile(!(cIntrs % 32));
585 if (idxIntr >= 0)
586 {
587 /* Only allow interrupts with higher priority than the current configured and running one. */
588 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
589 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
590 do
591 {
592 AssertCompile(RT_ELEMENTS(pGicDev->abIntrPriority) == RT_ELEMENTS(pGicDev->au32IntrRouting));
593 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
594 Assert(idxIntr < GIC_INTID_RANGE_SPECIAL_START || idxIntr > GIC_INTID_RANGE_SPECIAL_LAST);
595 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
596 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
597 {
598 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
599 bool const fInGroup0 = !fInGroup1;
600 *pfFiq = fInGroup0 && fIsGroup0Enabled;
601 *pfIrq = fInGroup1 && fIsGroup1Enabled;
602 return;
603 }
604 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
605 } while (idxIntr != -1);
606 }
607#else /** @todo Measure and pick the faster version. */
608 /* Only allow interrupts with higher priority than the running one. */
609 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
610 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
611
612 for (uint8_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
613 {
614 /* Collect interrupts that are pending, enabled and inactive. */
615 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
616 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
617
618 /* Discard interrupts if the group they belong to is disabled. */
619 if (!fIsGroup1Enabled)
620 {
621 uLo &= ~pGicDev->bmIntrGroup[i];
622 uHi &= ~pGicDev->bmIntrGroup[i + 1];
623 }
624 if (!fIsGroup0Enabled)
625 {
626 uLo &= pGicDev->bmIntrGroup[i];
627 uHi &= pGicDev->bmIntrGroup[i + 1];
628 }
629
630 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
631 Assert(pGicDev->fAffRoutingEnabled);
632 uint64_t const bmIntrPending = RT_MAKE_U64(uLo, uHi);
633 uint16_t const idxPending = ASMBitFirstSetU64(bmIntrPending);
634 if (idxPending > 0)
635 {
636 /*
637 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
638 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
639 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
640 * routing is enabled (which it always is in our implementation).
641 */
642 uint32_t const idxIntr = 64 * i + idxPending - 1;
643 AssertRelease(idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
644 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
645 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
646 {
647 Assert(idxIntr > GIC_INTID_RANGE_PPI_LAST);
648 AssertRelease(idxIntr < sizeof(pGicDev->bmIntrGroup) * 8);
649 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
650 bool const fInGroup0 = !fInGroup1;
651 *pfFiq = fInGroup0 && fIsGroup0Enabled;
652 *pfIrq = fInGroup1 && fIsGroup1Enabled;
653 return;
654 }
655 }
656 }
657#endif
658 *pfIrq = false;
659 *pfFiq = false;
660}
661
662
663static void gicDistReadLpiConfigTableFromMem(PPDMDEVINS pDevIns, PGICDEV pGicDev)
664{
665 Assert(pGicDev->fEnableLpis);
666 LogFlowFunc(("\n"));
667
668 /* Check if the guest is disabling LPIs by setting the number of LPI INTID bits below the minimum required bits. */
669 uint8_t const cIdBits = RT_BF_GET(pGicDev->uLpiConfigBaseReg.u, GIC_BF_REDIST_REG_PROPBASER_ID_BITS) + 1;
670 if (cIdBits < GIC_LPI_ID_BITS_MIN)
671 return;
672
673 /* Copy the LPI config table from guest memory to our internal cache. */
674 Assert(UINT32_C(2) << pGicDev->uMaxLpi <= RT_ELEMENTS(pGicDev->abLpiConfig));
675 RTGCPHYS const GCPhysLpiConfigTable = pGicDev->uLpiConfigBaseReg.u & GIC_BF_REDIST_REG_PROPBASER_PHYS_ADDR_MASK;
676 uint32_t const cbLpiConfigTable = sizeof(pGicDev->abLpiConfig);
677
678 /** @todo Try releasing and re-acquiring the device critical section here.
679 * Probably safe, but haven't verified this... */
680 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysLpiConfigTable, (void *)&pGicDev->abLpiConfig[0], cbLpiConfigTable);
681 AssertRC(rc);
682}
683
684
685static void gicReDistReadLpiPendingBitmapFromMem(PPDMDEVINS pDevIns, PVMCPU pVCpu, PGICDEV pGicDev)
686{
687 Assert(pGicDev->fEnableLpis);
688 LogFlowFunc(("\n"));
689
690 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
691 bool const fIsZeroed = RT_BF_GET(pGicDev->uLpiPendingBaseReg.u, GIC_BF_REDIST_REG_PENDBASER_PTZ);
692 if (!fIsZeroed)
693 {
694 /* Copy the LPI pending bitmap from guest memory to our internal cache. */
695 RTGCPHYS const GCPhysLpiPendingBitmap = (pGicDev->uLpiPendingBaseReg.u & GIC_BF_REDIST_REG_PENDBASER_PHYS_ADDR_MASK)
696 + GIC_INTID_RANGE_LPI_START; /* Skip first 1KB (since LPI INTIDs start at 8192). */
697 uint32_t const cbLpiPendingBitmap = sizeof(pGicCpu->bmLpiPending);
698
699 /** @todo Try releasing and re-acquiring the device critical section here.
700 * Probably safe, but haven't verified this... */
701 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysLpiPendingBitmap, (void *)&pGicCpu->bmLpiPending[0],
702 cbLpiPendingBitmap);
703 AssertRC(rc);
704 }
705 else
706 RT_ZERO(pGicCpu->bmLpiPending); /* Paranoia. */
707}
708
709
710/**
711 * Updates the internal IRQ state and sets or clears the appropriate force action
712 * flags.
713 *
714 * @returns Strict VBox status code.
715 * @param pGicDev The GIC distributor state.
716 * @param pVCpu The cross context virtual CPU structure.
717 */
718static VBOXSTRICTRC gicReDistUpdateIrqState(PCGICDEV pGicDev, PVMCPUCC pVCpu)
719{
720 LogFlowFunc(("\n"));
721 bool fIrq;
722 bool fFiq;
723 gicReDistHasIrqPending(VMCPU_TO_GICCPU(pVCpu), &fIrq, &fFiq);
724
725 bool fIrqDist;
726 bool fFiqDist;
727 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, pVCpu->idCpu, &fIrqDist, &fFiqDist);
728 LogFlowFunc(("fIrq=%RTbool fFiq=%RTbool fIrqDist=%RTbool fFiqDist=%RTbool\n", fIrq, fFiq, fIrqDist, fFiqDist));
729
730 fIrq |= fIrqDist;
731 fFiq |= fFiqDist;
732 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
733 return VINF_SUCCESS;
734}
735
736
737/**
738 * Updates the internal IRQ state of the distributor and sets or clears the appropirate force action flags.
739 *
740 * @returns Strict VBox status code.
741 * @param pVM The cross context VM state.
742 * @param pGicDev The GIC distributor state.
743 */
744static VBOXSTRICTRC gicDistUpdateIrqState(PCVMCC pVM, PCGICDEV pGicDev)
745{
746 LogFlowFunc(("\n"));
747 for (uint32_t i = 0; i < pVM->cCpus; i++)
748 {
749 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[i];
750 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
751
752 bool fIrq, fFiq;
753 gicReDistHasIrqPending(pGicCpu, &fIrq, &fFiq);
754
755 bool fIrqDist, fFiqDist;
756 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, i, &fIrqDist, &fFiqDist);
757 fIrq |= fIrqDist;
758 fFiq |= fFiqDist;
759
760 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
761 }
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Reads the distributor's interrupt routing register (GICD_IROUTER).
768 *
769 * @returns Strict VBox status code.
770 * @param pGicDev The GIC distributor state.
771 * @param idxReg The index of the register in the GICD_IROUTER range.
772 * @param puValue Where to store the register's value.
773 */
774static VBOXSTRICTRC gicDistReadIntrRoutingReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
775{
776 /* When affinity routing is disabled, reads return 0. */
777 Assert(pGicDev->fAffRoutingEnabled);
778
779 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
780 idxReg += GIC_INTID_RANGE_SPI_START;
781 AssertReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), VERR_BUFFER_OVERFLOW);
782 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
783 if (!(idxReg % 2))
784 {
785 /* Lower 32-bits. */
786 uint8_t const fIrm = ASMBitTest(&pGicDev->bmIntrRoutingMode[0], idxReg);
787 *puValue = GIC_DIST_REG_IROUTERn_SET(fIrm, pGicDev->au32IntrRouting[idxReg]);
788 }
789 else
790 {
791 /* Upper 32-bits. */
792 *puValue = pGicDev->au32IntrRouting[idxReg] >> 24;
793 }
794
795 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
796 return VINF_SUCCESS;
797}
798
799
800/**
801 * Writes the distributor's interrupt routing register (GICD_IROUTER).
802 *
803 * @returns Strict VBox status code.
804 * @param pGicDev The GIC distributor state.
805 * @param idxReg The index of the register in the GICD_IROUTER range.
806 * @param uValue The value to write to the register.
807 */
808static VBOXSTRICTRC gicDistWriteIntrRoutingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
809{
810 /* When affinity routing is disabled, writes are ignored. */
811 Assert(pGicDev->fAffRoutingEnabled);
812
813 AssertMsgReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), ("idxReg=%u\n", idxReg), VERR_BUFFER_OVERFLOW);
814 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
815 if (!(idxReg % 2))
816 {
817 /* Lower 32-bits. */
818 bool const fIrm = GIC_DIST_REG_IROUTERn_IRM_GET(uValue);
819 if (fIrm)
820 ASMBitSet(&pGicDev->bmIntrRoutingMode[0], idxReg);
821 else
822 ASMBitClear(&pGicDev->bmIntrRoutingMode[0], idxReg);
823 uint32_t const fAff3 = pGicDev->au32IntrRouting[idxReg] & 0xff000000;
824 pGicDev->au32IntrRouting[idxReg] = fAff3 | (uValue & 0x00ffffff);
825 }
826 else
827 {
828 /* Upper 32-bits. */
829 uint32_t const fAffOthers = pGicDev->au32IntrRouting[idxReg] & 0x00ffffff;
830 pGicDev->au32IntrRouting[idxReg] = (uValue << 24) | fAffOthers;
831 }
832
833 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->au32IntrRouting[idxReg]));
834 return VINF_SUCCESS;
835}
836
837
838/**
839 * Reads the distributor's interrupt (set/clear) enable register (GICD_ISENABLER and
840 * GICD_ICENABLER).
841 *
842 * @returns Strict VBox status code.
843 * @param pGicDev The GIC distributor state.
844 * @param idxReg The index of the register in the GICD_ISENABLER and
845 * GICD_ICENABLER range.
846 * @param puValue Where to store the register's value.
847 */
848static VBOXSTRICTRC gicDistReadIntrEnableReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
849{
850 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
851 *puValue = pGicDev->bmIntrEnabled[idxReg];
852 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
853 return VINF_SUCCESS;
854}
855
856
857/**
858 * Writes the distributor's interrupt set-enable register (GICD_ISENABLER).
859 *
860 * @returns Strict VBox status code.
861 * @param pVM The cross context VM structure.
862 * @param pGicDev The GIC distributor state.
863 * @param idxReg The index of the register in the GICD_ISENABLER range.
864 * @param uValue The value to write to the register.
865 */
866static VBOXSTRICTRC gicDistWriteIntrSetEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
867{
868 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
869 Assert(pGicDev->fAffRoutingEnabled);
870 if (idxReg > 0)
871 {
872 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
873 pGicDev->bmIntrEnabled[idxReg] |= uValue;
874 return gicDistUpdateIrqState(pVM, pGicDev);
875 }
876 else
877 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
878 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
879 return VINF_SUCCESS;
880}
881
882
883/**
884 * Writes the distributor's interrupt clear-enable register (GICD_ICENABLER).
885 *
886 * @returns Strict VBox status code.
887 * @param pVM The cross context VM structure.
888 * @param pGicDev The GIC distributor state.
889 * @param idxReg The index of the register in the GICD_ICENABLER range.
890 * @param uValue The value to write to the register.
891 */
892static VBOXSTRICTRC gicDistWriteIntrClearEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
893{
894 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
895 Assert(pGicDev->fAffRoutingEnabled);
896 if (idxReg > 0)
897 {
898 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
899 pGicDev->bmIntrEnabled[idxReg] &= ~uValue;
900 return gicDistUpdateIrqState(pVM, pGicDev);
901 }
902 else
903 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
904 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
905 return VINF_SUCCESS;
906}
907
908
909/**
910 * Reads the distributor's interrupt active register (GICD_ISACTIVER and
911 * GICD_ICACTIVER).
912 *
913 * @returns Strict VBox status code.
914 * @param pGicDev The GIC distributor state.
915 * @param idxReg The index of the register in the GICD_ISACTIVER and
916 * GICD_ICACTIVER range.
917 * @param puValue Where to store the register's value.
918 */
919static VBOXSTRICTRC gicDistReadIntrActiveReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
920{
921 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
922 *puValue = pGicDev->bmIntrActive[idxReg];
923 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
924 return VINF_SUCCESS;
925}
926
927
928/**
929 * Writes the distributor's interrupt set-active register (GICD_ISACTIVER).
930 *
931 * @returns Strict VBox status code.
932 * @param pVM The cross context VM structure.
933 * @param pGicDev The GIC distributor state.
934 * @param idxReg The index of the register in the GICD_ISACTIVER range.
935 * @param uValue The value to write to the register.
936 */
937static VBOXSTRICTRC gicDistWriteIntrSetActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
938{
939 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
940 Assert(pGicDev->fAffRoutingEnabled);
941 if (idxReg > 0)
942 {
943 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
944 pGicDev->bmIntrActive[idxReg] |= uValue;
945 return gicDistUpdateIrqState(pVM, pGicDev);
946 }
947 else
948 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
949 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
950 return VINF_SUCCESS;
951}
952
953
954/**
955 * Writes the distributor's interrupt clear-active register (GICD_ICACTIVER).
956 *
957 * @returns Strict VBox status code.
958 * @param pVM The cross context VM structure.
959 * @param pGicDev The GIC distributor state.
960 * @param idxReg The index of the register in the GICD_ICACTIVER range.
961 * @param uValue The value to write to the register.
962 */
963static VBOXSTRICTRC gicDistWriteIntrClearActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
964{
965 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
966 Assert(pGicDev->fAffRoutingEnabled);
967 if (idxReg > 0)
968 {
969 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
970 pGicDev->bmIntrActive[idxReg] &= ~uValue;
971 return gicDistUpdateIrqState(pVM, pGicDev);
972 }
973 else
974 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
975 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
976 return VINF_SUCCESS;
977}
978
979
980/**
981 * Reads the distributor's interrupt priority register (GICD_IPRIORITYR).
982 *
983 * @returns Strict VBox status code.
984 * @param pGicDev The GIC distributor state.
985 * @param idxReg The index of the register in the GICD_IPRIORITY range.
986 * @param puValue Where to store the register's value.
987 */
988static VBOXSTRICTRC gicDistReadIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
989{
990 /* When affinity routing is enabled, reads to registers 0..7 (pertaining to SGIs and PPIs) return 0. */
991 Assert(pGicDev->fAffRoutingEnabled);
992 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
993 Assert(idxReg != 255);
994 if (idxReg > 7)
995 {
996 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
997 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
998 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
999 *puValue = *(uint32_t *)&pGicDev->abIntrPriority[idxPriority];
1000 }
1001 else
1002 {
1003 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1004 *puValue = 0;
1005 }
1006 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1007 return VINF_SUCCESS;
1008}
1009
1010
1011/**
1012 * Writes the distributor's interrupt priority register (GICD_IPRIORITYR).
1013 *
1014 * @returns Strict VBox status code.
1015 * @param pGicDev The GIC distributor state.
1016 * @param idxReg The index of the register in the GICD_IPRIORITY range.
1017 * @param uValue The value to write to the register.
1018 */
1019static VBOXSTRICTRC gicDistWriteIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1020{
1021 /* When affinity routing is enabled, writes to registers 0..7 are ignored. */
1022 Assert(pGicDev->fAffRoutingEnabled);
1023 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
1024 Assert(idxReg != 255);
1025 if (idxReg > 7)
1026 {
1027 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1028 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1029 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1030 *(uint32_t *)&pGicDev->abIntrPriority[idxPriority] = uValue;
1031 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicDev->abIntrPriority[idxPriority]));
1032 }
1033 else
1034 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1035 return VINF_SUCCESS;
1036}
1037
1038
1039/**
1040 * Reads the distributor's interrupt pending register (GICD_ISPENDR and
1041 * GICD_ICPENDR).
1042 *
1043 * @returns Strict VBox status code.
1044 * @param pGicDev The GIC distributor state.
1045 * @param idxReg The index of the register in the GICD_ISPENDR and
1046 * GICD_ICPENDR range.
1047 * @param puValue Where to store the register's value.
1048 */
1049static VBOXSTRICTRC gicDistReadIntrPendingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1050{
1051 /* When affinity routing is enabled, reads for SGIs and PPIs return 0. */
1052 Assert(pGicDev->fAffRoutingEnabled);
1053 if (idxReg > 0)
1054 {
1055 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1056 *puValue = pGicDev->bmIntrPending[idxReg];
1057 }
1058 else
1059 {
1060 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1061 *puValue = 0;
1062 }
1063 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1064 return VINF_SUCCESS;
1065}
1066
1067
1068/**
1069 * Write's the distributor's interrupt set-pending register (GICD_ISPENDR).
1070 *
1071 * @returns Strict VBox status code.
1072 * @param pVM The cross context VM structure.
1073 * @param pGicDev The GIC distributor state.
1074 * @param idxReg The index of the register in the GICD_ISPENDR range.
1075 * @param uValue The value to write to the register.
1076 */
1077static VBOXSTRICTRC gicDistWriteIntrSetPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1078{
1079 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1080 Assert(pGicDev->fAffRoutingEnabled);
1081 if (idxReg > 0)
1082 {
1083 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1084 pGicDev->bmIntrPending[idxReg] |= uValue;
1085 return gicDistUpdateIrqState(pVM, pGicDev);
1086 }
1087 else
1088 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1089 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1090 return VINF_SUCCESS;
1091}
1092
1093
1094/**
1095 * Write's the distributor's interrupt clear-pending register (GICD_ICPENDR).
1096 *
1097 * @returns Strict VBox status code.
1098 * @param pVM The cross context VM structure.
1099 * @param pGicDev The GIC distributor state.
1100 * @param idxReg The index of the register in the GICD_ICPENDR range.
1101 * @param uValue The value to write to the register.
1102 */
1103static VBOXSTRICTRC gicDistWriteIntrClearPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1104{
1105 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1106 Assert(pGicDev->fAffRoutingEnabled);
1107 if (idxReg > 0)
1108 {
1109 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1110 pGicDev->bmIntrPending[idxReg] &= ~uValue;
1111 return gicDistUpdateIrqState(pVM, pGicDev);
1112 }
1113 else
1114 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1115 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1116 return VINF_SUCCESS;
1117}
1118
1119
1120/**
1121 * Reads the distributor's interrupt config register (GICD_ICFGR).
1122 *
1123 * @returns Strict VBox status code.
1124 * @param pGicDev The GIC distributor state.
1125 * @param idxReg The index of the register in the GICD_ICFGR range.
1126 * @param puValue Where to store the register's value.
1127 */
1128static VBOXSTRICTRC gicDistReadIntrConfigReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1129{
1130 /* When affinity routing is enabled SGIs and PPIs, reads to SGIs and PPIs return 0. */
1131 Assert(pGicDev->fAffRoutingEnabled);
1132 if (idxReg >= 2)
1133 {
1134 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1135 *puValue = pGicDev->bmIntrConfig[idxReg];
1136 }
1137 else
1138 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1139 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1140 return VINF_SUCCESS;
1141}
1142
1143
1144/**
1145 * Writes the distributor's interrupt config register (GICD_ICFGR).
1146 *
1147 * @returns Strict VBox status code.
1148 * @param pGicDev The GIC distributor state.
1149 * @param idxReg The index of the register in the GICD_ICFGR range.
1150 * @param uValue The value to write to the register.
1151 */
1152static VBOXSTRICTRC gicDistWriteIntrConfigReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1153{
1154 /* When affinity routing is enabled SGIs and PPIs, writes to SGIs and PPIs are ignored. */
1155 Assert(pGicDev->fAffRoutingEnabled);
1156 if (idxReg >= 2)
1157 {
1158 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1159 pGicDev->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1160 }
1161 else
1162 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1163 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1164 return VINF_SUCCESS;
1165}
1166
1167
1168/**
1169 * Reads the distributor's interrupt config register (GICD_IGROUPR).
1170 *
1171 * @returns Strict VBox status code.
1172 * @param pGicDev The GIC distributor state.
1173 * @param idxReg The index of the register in the GICD_IGROUPR range.
1174 * @param puValue Where to store the register's value.
1175 */
1176static VBOXSTRICTRC gicDistReadIntrGroupReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1177{
1178 /* When affinity routing is enabled, reads to SGIs and PPIs return 0. */
1179 Assert(pGicDev->fAffRoutingEnabled);
1180 if (idxReg > 0)
1181 {
1182 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrGroup));
1183 *puValue = pGicDev->bmIntrGroup[idxReg];
1184 }
1185 else
1186 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1187 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1188 return VINF_SUCCESS;
1189}
1190
1191
1192/**
1193 * Writes the distributor's interrupt config register (GICD_ICFGR).
1194 *
1195 * @returns Strict VBox status code.
1196 * @param pVM The cross context VM structure.
1197 * @param pGicDev The GIC distributor state.
1198 * @param idxReg The index of the register in the GICD_ICFGR range.
1199 * @param uValue The value to write to the register.
1200 */
1201static VBOXSTRICTRC gicDistWriteIntrGroupReg(PCVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1202{
1203 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1204 Assert(pGicDev->fAffRoutingEnabled);
1205 if (idxReg > 0)
1206 {
1207 pGicDev->bmIntrGroup[idxReg] = uValue;
1208 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrGroup[idxReg]));
1209 }
1210 else
1211 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1212 return gicDistUpdateIrqState(pVM, pGicDev);
1213}
1214
1215
1216/**
1217 * Reads the redistributor's interrupt priority register (GICR_IPRIORITYR).
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pGicDev The GIC distributor state.
1221 * @param pGicCpu The GIC redistributor and CPU interface state.
1222 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1223 * @param puValue Where to store the register's value.
1224 */
1225static VBOXSTRICTRC gicReDistReadIntrPriorityReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1226{
1227 /* When affinity routing is disabled, reads return 0. */
1228 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1229 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1230 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1231 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
1232 *puValue = *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority];
1233 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1234 return VINF_SUCCESS;
1235}
1236
1237
1238/**
1239 * Writes the redistributor's interrupt priority register (GICR_IPRIORITYR).
1240 *
1241 * @returns Strict VBox status code.
1242 * @param pGicDev The GIC distributor state.
1243 * @param pVCpu The cross context virtual CPU structure.
1244 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1245 * @param uValue The value to write to the register.
1246 */
1247static VBOXSTRICTRC gicReDistWriteIntrPriorityReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1248{
1249 /* When affinity routing is disabled, writes are ignored. */
1250 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1251 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1252 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1253 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1254 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1255 *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority] = uValue;
1256 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority]));
1257 return VINF_SUCCESS;
1258}
1259
1260
1261/**
1262 * Reads the redistributor's interrupt pending register (GICR_ISPENDR and
1263 * GICR_ICPENDR).
1264 *
1265 * @returns Strict VBox status code.
1266 * @param pGicDev The GIC distributor state.
1267 * @param pGicCpu The GIC redistributor and CPU interface state.
1268 * @param idxReg The index of the register in the GICR_ISPENDR and
1269 * GICR_ICPENDR range.
1270 * @param puValue Where to store the register's value.
1271 */
1272static VBOXSTRICTRC gicReDistReadIntrPendingReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1273{
1274 /* When affinity routing is disabled, reads return 0. */
1275 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1276 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1277 *puValue = pGicCpu->bmIntrPending[idxReg];
1278 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1279 return VINF_SUCCESS;
1280}
1281
1282
1283/**
1284 * Writes the redistributor's interrupt set-pending register (GICR_ISPENDR).
1285 *
1286 * @returns Strict VBox status code.
1287 * @param pGicDev The GIC distributor state.
1288 * @param pVCpu The cross context virtual CPU structure.
1289 * @param idxReg The index of the register in the GICR_ISPENDR range.
1290 * @param uValue The value to write to the register.
1291 */
1292static VBOXSTRICTRC gicReDistWriteIntrSetPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1293{
1294 /* When affinity routing is disabled, writes are ignored. */
1295 Assert(pGicDev->fAffRoutingEnabled);
1296 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1297 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1298 pGicCpu->bmIntrPending[idxReg] |= uValue;
1299 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1300 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1301}
1302
1303
1304/**
1305 * Writes the redistributor's interrupt clear-pending register (GICR_ICPENDR).
1306 *
1307 * @returns Strict VBox status code.
1308 * @param pGicDev The GIC distributor state.
1309 * @param pVCpu The cross context virtual CPU structure.
1310 * @param idxReg The index of the register in the GICR_ICPENDR range.
1311 * @param uValue The value to write to the register.
1312 */
1313static VBOXSTRICTRC gicReDistWriteIntrClearPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1314{
1315 /* When affinity routing is disabled, writes are ignored. */
1316 Assert(pGicDev->fAffRoutingEnabled);
1317 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1318 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1319 pGicCpu->bmIntrPending[idxReg] &= ~uValue;
1320 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1321 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1322}
1323
1324
1325/**
1326 * Reads the redistributor's interrupt enable register (GICR_ISENABLER and
1327 * GICR_ICENABLER).
1328 *
1329 * @returns Strict VBox status code.
1330 * @param pGicDev The GIC distributor state.
1331 * @param pGicCpu The GIC redistributor and CPU interface state.
1332 * @param idxReg The index of the register in the GICR_ISENABLER and
1333 * GICR_ICENABLER range.
1334 * @param puValue Where to store the register's value.
1335 */
1336static VBOXSTRICTRC gicReDistReadIntrEnableReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1337{
1338 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1339 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1340 *puValue = pGicCpu->bmIntrEnabled[idxReg];
1341 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1342 return VINF_SUCCESS;
1343}
1344
1345
1346/**
1347 * Writes the redistributor's interrupt set-enable register (GICR_ISENABLER).
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pGicDev The GIC distributor state.
1351 * @param pVCpu The cross context virtual CPU structure.
1352 * @param idxReg The index of the register in the GICR_ISENABLER range.
1353 * @param uValue The value to write to the register.
1354 */
1355static VBOXSTRICTRC gicReDistWriteIntrSetEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1356{
1357 Assert(pGicDev->fAffRoutingEnabled);
1358 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1359 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1360 pGicCpu->bmIntrEnabled[idxReg] |= uValue;
1361 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1362 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1363}
1364
1365
1366/**
1367 * Writes the redistributor's interrupt clear-enable register (GICR_ICENABLER).
1368 *
1369 * @returns Strict VBox status code.
1370 * @param pGicDev The GIC distributor state.
1371 * @param pVCpu The cross context virtual CPU structure.
1372 * @param idxReg The index of the register in the GICR_ICENABLER range.
1373 * @param uValue The value to write to the register.
1374 */
1375static VBOXSTRICTRC gicReDistWriteIntrClearEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1376{
1377 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1378 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1379 pGicCpu->bmIntrEnabled[idxReg] &= ~uValue;
1380 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1381 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1382}
1383
1384
1385/**
1386 * Reads the redistributor's interrupt active register (GICR_ISACTIVER and
1387 * GICR_ICACTIVER).
1388 *
1389 * @returns Strict VBox status code.
1390 * @param pGicCpu The GIC redistributor and CPU interface state.
1391 * @param idxReg The index of the register in the GICR_ISACTIVER and
1392 * GICR_ICACTIVER range.
1393 * @param puValue Where to store the register's value.
1394 */
1395static VBOXSTRICTRC gicReDistReadIntrActiveReg(PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1396{
1397 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1398 *puValue = pGicCpu->bmIntrActive[idxReg];
1399 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1400 return VINF_SUCCESS;
1401}
1402
1403
1404/**
1405 * Writes the redistributor's interrupt set-active register (GICR_ISACTIVER).
1406 *
1407 * @returns Strict VBox status code.
1408 * @param pGicDev The GIC distributor state.
1409 * @param pVCpu The cross context virtual CPU structure.
1410 * @param idxReg The index of the register in the GICR_ISACTIVER range.
1411 * @param uValue The value to write to the register.
1412 */
1413static VBOXSTRICTRC gicReDistWriteIntrSetActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1414{
1415 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1416 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1417 pGicCpu->bmIntrActive[idxReg] |= uValue;
1418 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1419 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1420}
1421
1422
1423/**
1424 * Writes the redistributor's interrupt clear-active register (GICR_ICACTIVER).
1425 *
1426 * @returns Strict VBox status code.
1427 * @param pGicDev The GIC distributor state.
1428 * @param pVCpu The cross context virtual CPU structure.
1429 * @param idxReg The index of the register in the GICR_ICACTIVER range.
1430 * @param uValue The value to write to the register.
1431 */
1432static VBOXSTRICTRC gicReDistWriteIntrClearActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1433{
1434 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1435 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1436 pGicCpu->bmIntrActive[idxReg] &= ~uValue;
1437 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1438 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1439}
1440
1441
1442/**
1443 * Reads the redistributor's interrupt config register (GICR_ICFGR).
1444 *
1445 * @returns Strict VBox status code.
1446 * @param pGicDev The GIC distributor state.
1447 * @param pGicCpu The GIC redistributor and CPU interface state.
1448 * @param idxReg The index of the register in the GICR_ICFGR range.
1449 * @param puValue Where to store the register's value.
1450 */
1451static VBOXSTRICTRC gicReDistReadIntrConfigReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1452{
1453 /* When affinity routing is disabled, reads return 0. */
1454 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1455 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1456 *puValue = pGicCpu->bmIntrConfig[idxReg];
1457 /* Ensure SGIs are read-only and remain configured as edge-triggered. */
1458 Assert(idxReg > 0 || *puValue == 0xaaaaaaaa);
1459 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1460 return VINF_SUCCESS;
1461}
1462
1463
1464/**
1465 * Writes the redistributor's interrupt config register (GICR_ICFGR).
1466 *
1467 * @returns Strict VBox status code.
1468 * @param pGicDev The GIC distributor state.
1469 * @param pVCpu The cross context virtual CPU structure.
1470 * @param idxReg The index of the register in the GICR_ICFGR range.
1471 * @param uValue The value to write to the register.
1472 */
1473static VBOXSTRICTRC gicReDistWriteIntrConfigReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1474{
1475 /* When affinity routing is disabled, writes are ignored. */
1476 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1477 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1478 if (idxReg > 0)
1479 {
1480 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1481 pGicCpu->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1482 }
1483 else
1484 {
1485 /* SGIs are always edge-triggered ignore writes. Windows 11 (24H2) arm64 guests writes these. */
1486 Assert(uValue == 0xaaaaaaaa);
1487 Assert(pGicCpu->bmIntrConfig[0] == uValue);
1488 }
1489 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrConfig[idxReg]));
1490 return VINF_SUCCESS;
1491}
1492
1493
1494/**
1495 * Reads the redistributor's interrupt group register (GICD_IGROUPR).
1496 *
1497 * @returns Strict VBox status code.
1498 * @param pGicDev The GIC distributor state.
1499 * @param pGicCpu The GIC redistributor and CPU interface state.
1500 * @param idxReg The index of the register in the GICR_IGROUPR range.
1501 * @param puValue Where to store the register's value.
1502 */
1503static VBOXSTRICTRC gicReDistReadIntrGroupReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1504{
1505 /* When affinity routing is disabled, reads return 0. */
1506 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1507 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1508 *puValue = pGicCpu->bmIntrGroup[idxReg];
1509 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1510 return VINF_SUCCESS;
1511}
1512
1513
1514/**
1515 * Writes the redistributor's interrupt group register (GICR_IGROUPR).
1516 *
1517 * @returns Strict VBox status code.
1518 * @param pGicDev The GIC distributor state.
1519 * @param pVCpu The cross context virtual CPU structure.
1520 * @param idxReg The index of the register in the GICR_IGROUPR range.
1521 * @param uValue The value to write to the register.
1522 */
1523static VBOXSTRICTRC gicReDistWriteIntrGroupReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1524{
1525 /* When affinity routing is disabled, writes are ignored. */
1526 Assert(pGicDev->fAffRoutingEnabled);
1527 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1528 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1529 pGicCpu->bmIntrGroup[idxReg] = uValue;
1530 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1531 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1532}
1533
1534
1535/**
1536 * Gets the virtual CPUID given the affinity values.
1537 *
1538 * @returns The virtual CPUID.
1539 * @param idCpuInterface The virtual CPUID within the PE cluster (0..15).
1540 * @param uAff1 The affinity 1 value.
1541 * @param uAff2 The affinity 2 value.
1542 * @param uAff3 The affinity 3 value.
1543 */
1544DECL_FORCE_INLINE(VMCPUID) gicGetCpuIdFromAffinity(uint8_t idCpuInterface, uint8_t uAff1, uint8_t uAff2, uint8_t uAff3)
1545{
1546 AssertReturn(idCpuInterface < 16, 0);
1547 return (uAff3 * 1048576) + (uAff2 * 4096) + (uAff1 * 16) + idCpuInterface;
1548}
1549
1550
1551/**
1552 * Gets the highest priority pending interrupt that can be signalled to the PE.
1553 *
1554 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT if no interrupt
1555 * is pending or not in a state to be signalled to the PE.
1556 * @param pGicDev The GIC distributor state.
1557 * @param pGicCpu The GIC redistributor and CPU interface state.
1558 * @param fGroup0 Whether to consider group 0 interrupts.
1559 * @param fGroup1 Whether to consider group 1 interrupts.
1560 * @param pidxIntr Where to store the distributor interrupt index for the
1561 * returned interrupt ID. UINT16_MAX if this function returns
1562 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT. Optional, can be
1563 * NULL.
1564 * @param pbPriority Where to store the priority of the returned interrupt ID.
1565 * GIC_IDLE_PRIORITY if this function returns
1566 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT.
1567 */
1568static uint16_t gicGetHighestPriorityPendingIntr(PCGICDEV pGicDev, PCGICCPU pGicCpu, bool fGroup0, bool fGroup1,
1569 uint16_t *pidxIntr, uint8_t *pbPriority)
1570{
1571#if 1
1572 uint16_t idxIntr = UINT16_MAX;
1573 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1574 uint8_t uPriority = GIC_IDLE_PRIORITY;
1575
1576 /* Redistributor. */
1577 {
1578 uint32_t bmReDistIntrs[RT_ELEMENTS(pGicCpu->bmIntrPending)];
1579 AssertCompile(sizeof(pGicCpu->bmIntrPending) == sizeof(bmReDistIntrs));
1580 for (uint16_t i = 0; i < RT_ELEMENTS(bmReDistIntrs); i++)
1581 {
1582 /* Collect interrupts that are pending, enabled and inactive. */
1583 bmReDistIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1584 /* Discard interrupts if the group they belong to is disabled. */
1585 if (!fGroup1)
1586 bmReDistIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
1587 if (!fGroup0)
1588 bmReDistIntrs[i] &= pGicCpu->bmIntrGroup[i];
1589 }
1590 /* Among the collected interrupts, pick the one with the highest, non-idle priority. */
1591 uint16_t idxHighest = UINT16_MAX;
1592 const void *pvIntrs = &bmReDistIntrs[0];
1593 uint32_t const cIntrs = sizeof(bmReDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1594 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1595 if (idxPending >= 0)
1596 {
1597 do
1598 {
1599 if (pGicCpu->abIntrPriority[idxPending] < uPriority)
1600 {
1601 idxHighest = (uint16_t)idxPending;
1602 uPriority = pGicCpu->abIntrPriority[idxPending];
1603 }
1604 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1605 } while (idxPending != -1);
1606 if (idxHighest != UINT16_MAX)
1607 {
1608 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1609 idxIntr = idxHighest;
1610 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1611 || GIC_IS_INTR_EXT_PPI(uIntId));
1612 }
1613 }
1614 }
1615
1616 /* Distributor */
1617 {
1618 uint32_t bmDistIntrs[RT_ELEMENTS(pGicDev->bmIntrPending)];
1619 AssertCompile(sizeof(pGicDev->bmIntrPending) == sizeof(bmDistIntrs));
1620 for (uint16_t i = 0; i < RT_ELEMENTS(bmDistIntrs); i++)
1621 {
1622 /* Collect interrupts that are pending, enabled and inactive. */
1623 bmDistIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1624 /* Discard interrupts if the group they belong to is disabled. */
1625 if (!fGroup1)
1626 bmDistIntrs[i] &= ~pGicDev->bmIntrGroup[i];
1627 if (!fGroup0)
1628 bmDistIntrs[i] &= pGicDev->bmIntrGroup[i];
1629 }
1630 /* Among the collected interrupts, pick one with priority higher than what we picked from the redistributor. */
1631 {
1632 uint16_t idxHighest = UINT16_MAX;
1633 const void *pvIntrs = &bmDistIntrs[0];
1634 uint32_t const cIntrs = sizeof(bmDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1635 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1636 if (idxPending >= 0)
1637 {
1638 do
1639 {
1640 if (pGicDev->abIntrPriority[idxPending] < uPriority)
1641 {
1642 idxHighest = (uint16_t)idxPending;
1643 uPriority = pGicDev->abIntrPriority[idxPending];
1644 }
1645 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1646 } while (idxPending != -1);
1647 if (idxHighest != UINT16_MAX)
1648 {
1649 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1650 idxIntr = idxHighest;
1651 Assert( GIC_IS_INTR_SPI(uIntId)
1652 || GIC_IS_INTR_EXT_SPI(uIntId));
1653 }
1654 }
1655 }
1656 }
1657#else /** @todo Measure and pick the faster version. */
1658 /*
1659 * Collect interrupts that are pending, enabled and inactive.
1660 * Discard interrupts if the group they belong to is disabled.
1661 * While collecting the interrupts, pick the one with the highest, non-idle priority.
1662 */
1663 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1664 uint16_t idxIntr = UINT16_MAX;
1665 uint8_t uPriority = GIC_IDLE_PRIORITY;
1666
1667 /* Redistributor. */
1668 {
1669 uint16_t idxHighest = UINT16_MAX;
1670 for (uint16_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
1671 {
1672 uint32_t uIntrPending = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1673 if (!fGroup1)
1674 uIntrPending &= ~pGicCpu->bmIntrGroup[i];
1675 if (!fGroup0)
1676 uIntrPending &= pGicCpu->bmIntrGroup[i];
1677
1678 uint16_t const idxPending = ASMBitFirstSetU32(uIntrPending);
1679 if (idxPending > 0)
1680 {
1681 uint32_t const idxPriority = 32 * i + idxPending - 1;
1682 Assert(idxPriority < RT_ELEMENTS(pGicCpu->abIntrPriority));
1683 if (pGicCpu->abIntrPriority[idxPriority] < uPriority)
1684 {
1685 idxHighest = idxPriority;
1686 uPriority = pGicCpu->abIntrPriority[idxPriority];
1687 }
1688 }
1689 }
1690 if (idxHighest != UINT16_MAX)
1691 {
1692 idxIntr = idxHighest;
1693 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1694 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1695 || GIC_IS_INTR_EXT_PPI(uIntId));
1696 Assert(uPriority != GIC_IDLE_PRIORITY);
1697 }
1698 }
1699
1700 /* Distributor. */
1701 {
1702 uint16_t idxHighest = UINT16_MAX;
1703 for (uint16_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
1704 {
1705 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1706 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
1707 if (!fGroup1)
1708 {
1709 uLo &= ~pGicDev->bmIntrGroup[i];
1710 uHi &= ~pGicDev->bmIntrGroup[i + 1];
1711 }
1712 if (!fGroup0)
1713 {
1714 uLo &= pGicDev->bmIntrGroup[i];
1715 uHi &= pGicDev->bmIntrGroup[i + 1];
1716 }
1717
1718 uint64_t const uIntrPending = RT_MAKE_U64(uLo, uHi);
1719 uint16_t const idxPending = ASMBitFirstSetU64(uIntrPending);
1720 if (idxPending > 0)
1721 {
1722 uint32_t const idxPriority = 64 * i + idxPending - 1;
1723 if (pGicDev->abIntrPriority[idxPriority] < uPriority)
1724 {
1725 idxHighest = idxPriority;
1726 uPriority = pGicDev->abIntrPriority[idxPriority];
1727 }
1728 }
1729 }
1730 if (idxHighest != UINT16_MAX)
1731 {
1732 idxIntr = idxHighest;
1733 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1734 Assert( GIC_IS_INTR_SPI(uIntId)
1735 || GIC_IS_INTR_EXT_SPI(uIntId));
1736 Assert(uPriority != GIC_IDLE_PRIORITY);
1737 }
1738 }
1739#endif
1740
1741 /* Ensure that if no interrupt is pending, the idle priority is returned. */
1742 Assert(uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT || uPriority == GIC_IDLE_PRIORITY);
1743 if (pbPriority)
1744 *pbPriority = uPriority;
1745 if (pidxIntr)
1746 *pidxIntr = idxIntr;
1747
1748 LogFlowFunc(("uIntId=%u [idxIntr=%u uPriority=%u]\n", uIntId, idxIntr, uPriority));
1749 return uIntId;
1750}
1751
1752
1753/**
1754 * Get and acknowledge the interrupt ID of a signalled interrupt.
1755 *
1756 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT no interrupts
1757 * are pending or not in a state to be signalled.
1758 * @param pGicDev The GIC distributor state.
1759 * @param pVCpu The cross context virtual CPU structure.
1760 * @param fGroup0 Whether to consider group 0 interrupts.
1761 * @param fGroup1 Whether to consider group 1 interrupts.
1762 */
1763static uint16_t gicAckHighestPriorityPendingIntr(PGICDEV pGicDev, PVMCPUCC pVCpu, bool fGroup0, bool fGroup1)
1764{
1765 Assert(fGroup0 || fGroup1);
1766 LogFlowFunc(("fGroup0=%RTbool fGroup1=%RTbool\n", fGroup0, fGroup1));
1767
1768 /*
1769 * Get the pending interrupt with the highest priority for the given group.
1770 */
1771 uint8_t bIntrPriority;
1772 uint16_t idxIntr;
1773 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1774 STAM_PROFILE_START(&pGicCpu->StatProfIntrAck, x);
1775 uint16_t const uIntId = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, fGroup0, fGroup1, &idxIntr, &bIntrPriority);
1776 if (uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
1777 {
1778 /*
1779 * The interrupt priority must be higher than the priority mask of the CPU interface for the
1780 * interrupt to be signalled/acknowledged. Here, we must NOT use priority grouping when comparing
1781 * the priority of a pending interrupt with this priority mask (threshold).
1782 *
1783 * See ARM GIC spec. 4.8.6 "Priority masking".
1784 */
1785 if (bIntrPriority >= pGicCpu->bIntrPriorityMask)
1786 {
1787 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1788 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1789 }
1790
1791 /*
1792 * The group priority of the pending interrupt must be higher than that of the running priority.
1793 * The number of bits for the group priority depends on the the binary point registers.
1794 * We mask the sub-priority bits and only compare the group priority.
1795 *
1796 * When the binary point registers indicates no preemption, we must allow interrupts that have
1797 * a higher priority than idle. Hence, the use of two different masks below.
1798 *
1799 * See ARM GIC spec. 4.8.3 "Priority grouping".
1800 * See ARM GIC spec. 4.8.5 "Preemption".
1801 */
1802 static uint8_t const s_afGroupPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00 };
1803 static uint8_t const s_afRunningPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0xff };
1804 uint8_t const idxPriorityMask = (fGroup0 || (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_CBPR))
1805 ? pGicCpu->bBinaryPtGroup0 & 7
1806 : pGicCpu->bBinaryPtGroup1 & 7;
1807 uint8_t const bRunningPriority = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
1808 uint8_t const bRunningGroupPriority = bRunningPriority & s_afRunningPriorityMasks[idxPriorityMask];
1809 uint8_t const bIntrGroupPriority = bIntrPriority & s_afGroupPriorityMasks[idxPriorityMask];
1810 if (bIntrGroupPriority >= bRunningGroupPriority)
1811 {
1812 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1813 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1814 }
1815
1816 /*
1817 * Acknowledge the interrupt.
1818 */
1819 bool const fIsRedistIntId = GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId);
1820 if (fIsRedistIntId)
1821 {
1822 /* Mark the interrupt as active. */
1823 AssertMsg(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, ("idxIntr=%u\n", idxIntr));
1824 ASMBitSet(&pGicCpu->bmIntrActive[0], idxIntr);
1825
1826 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1827 /* Update the active priorities bitmap. */
1828 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1829 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1830 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1831 if (fGroup0)
1832 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1833 if (fGroup1)
1834 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1835
1836 /* Drop priority. */
1837 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1838 {
1839 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1840 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1841 bIntrPriority,
1842 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1843 ++pGicCpu->idxRunningPriority;
1844 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1845 }
1846 else
1847 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1848 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1849
1850 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1851 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicCpu->bmIntrConfig) * 8);
1852 bool const fEdgeTriggered = ASMBitTest(&pGicCpu->bmIntrConfig[0], 2 * idxIntr + 1);
1853 if (fEdgeTriggered)
1854 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
1855
1856 /* Update the redistributor IRQ state to reflect change to the active interrupt. */
1857 gicReDistUpdateIrqState(pGicDev, pVCpu);
1858 }
1859 else
1860 {
1861 /* Sanity check if the interrupt ID belongs to the distributor. */
1862 Assert(GIC_IS_INTR_SPI(uIntId) || GIC_IS_INTR_EXT_SPI(uIntId));
1863
1864 /* Mark the interrupt as active. */
1865 Assert(idxIntr < sizeof(pGicDev->bmIntrActive) * 8);
1866 ASMBitSet(&pGicDev->bmIntrActive[0], idxIntr);
1867
1868 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1869 /* Update the active priorities bitmap. */
1870 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1871 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1872 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1873 if (fGroup0)
1874 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1875 if (fGroup1)
1876 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1877
1878 /* Drop priority. */
1879 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1880 {
1881 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1882 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1883 bIntrPriority,
1884 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1885 ++pGicCpu->idxRunningPriority;
1886 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1887 }
1888 else
1889 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1890 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1891
1892 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1893 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicDev->bmIntrConfig) * 8);
1894 bool const fEdgeTriggered = ASMBitTest(&pGicDev->bmIntrConfig[0], 2 * idxIntr + 1);
1895 if (fEdgeTriggered)
1896 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
1897
1898 /* Update the distributor IRQ state to reflect change to the active interrupt. */
1899 gicDistUpdateIrqState(pVCpu->CTX_SUFF(pVM), pGicDev);
1900 }
1901 }
1902 else
1903 Assert(bIntrPriority == GIC_IDLE_PRIORITY);
1904
1905 LogFlowFunc(("uIntId=%u\n", uIntId));
1906 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1907 return uIntId;
1908}
1909
1910
1911/**
1912 * Reads a distributor register.
1913 *
1914 * @returns VBox status code.
1915 * @param pDevIns The device instance.
1916 * @param pVCpu The cross context virtual CPU structure.
1917 * @param offReg The offset of the register being read.
1918 * @param puValue Where to store the register value.
1919 */
1920DECLINLINE(VBOXSTRICTRC) gicDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
1921{
1922 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
1923 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
1924
1925 /*
1926 * 64-bit registers.
1927 */
1928 {
1929 /*
1930 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
1931 */
1932 uint16_t const cbReg = sizeof(uint64_t);
1933 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
1934 {
1935 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
1936 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
1937 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
1938 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1939 }
1940 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
1941 {
1942 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
1943 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
1944 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1945 }
1946 }
1947
1948 /*
1949 * 32-bit registers.
1950 */
1951 {
1952 /*
1953 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
1954 */
1955 uint16_t const cbReg = sizeof(uint32_t);
1956 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
1957 {
1958 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
1959 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1960 }
1961 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
1962 {
1963 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
1964 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
1965 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1966 }
1967
1968 /*
1969 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
1970 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
1971 */
1972 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
1973 {
1974 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
1975 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1976 }
1977 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
1978 {
1979 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
1980 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
1981 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1982 }
1983 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
1984 {
1985 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
1986 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1987 }
1988 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
1989 {
1990 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
1991 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
1992 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1993 }
1994
1995 /*
1996 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
1997 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
1998 */
1999 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
2000 {
2001 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
2002 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2003 }
2004 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
2005 {
2006 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2007 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
2008 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2009 }
2010 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
2011 {
2012 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
2013 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2014 }
2015 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
2016 {
2017 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2018 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
2019 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2020 }
2021
2022 /*
2023 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
2024 */
2025 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2026 {
2027 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2028 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2029 }
2030 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2031 {
2032 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2033 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2034 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2035 }
2036
2037 /*
2038 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2039 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2040 */
2041 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2042 {
2043 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2044 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2045 }
2046 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2047 {
2048 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2049 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2050 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2051 }
2052 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2053 {
2054 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2055 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2056 }
2057 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2058 {
2059 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2060 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2061 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2062 }
2063
2064 /*
2065 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2066 */
2067 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2068 {
2069 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2070 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2071 }
2072 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2073 {
2074 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2075 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2076 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2077 }
2078 }
2079
2080 switch (offReg)
2081 {
2082 case GIC_DIST_REG_CTLR_OFF:
2083 Assert(pGicDev->fAffRoutingEnabled);
2084 *puValue = (pGicDev->fIntrGroup0Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP0 : 0)
2085 | (pGicDev->fIntrGroup1Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP1_NS : 0)
2086 | GIC_DIST_REG_CTRL_DS /* We don't support multiple security states. */
2087 | GIC_DIST_REG_CTRL_ARE_S; /* We don't support GICv2 backwards compatibility, ARE is always enabled. */
2088 break;
2089 case GIC_DIST_REG_TYPER_OFF:
2090 {
2091 Assert(pGicDev->uMaxSpi > 0 && pGicDev->uMaxSpi <= GIC_DIST_REG_TYPER_NUM_ITLINES);
2092 Assert(pGicDev->fAffRoutingEnabled);
2093 *puValue = GIC_DIST_REG_TYPER_NUM_ITLINES_SET(pGicDev->uMaxSpi)
2094 | GIC_DIST_REG_TYPER_NUM_PES_SET(0) /* Affinity routing is always enabled, hence this MBZ. */
2095 /*| GIC_DIST_REG_TYPER_NMI*/ /** @todo Support non-maskable interrupts */
2096 /*| GIC_DIST_REG_TYPER_SECURITY_EXTN*/ /** @todo Support dual security states. */
2097 | (pGicDev->fMbi ? GIC_DIST_REG_TYPER_MBIS : 0)
2098 | (pGicDev->fRangeSel ? GIC_DIST_REG_TYPER_RSS : 0)
2099 | GIC_DIST_REG_TYPER_IDBITS_SET(15) /* We only support 16-bit interrupt IDs. */
2100 | (pGicDev->fAff3Levels ? GIC_DIST_REG_TYPER_A3V : 0);
2101 if (pGicDev->fExtSpi)
2102 *puValue |= GIC_DIST_REG_TYPER_ESPI
2103 | GIC_DIST_REG_TYPER_ESPI_RANGE_SET(pGicDev->uMaxExtSpi);
2104 if (pGicDev->fLpi)
2105 {
2106 Assert(pGicDev->uMaxLpi - 2 < 13);
2107 Assert(GIC_INTID_RANGE_LPI_START + (UINT32_C(2) << pGicDev->uMaxLpi) <= UINT16_MAX);
2108 *puValue |= GIC_DIST_REG_TYPER_LPIS
2109 | GIC_DIST_REG_TYPER_NUM_LPIS_SET(pGicDev->uMaxLpi);
2110 }
2111 break;
2112 }
2113 case GIC_DIST_REG_PIDR2_OFF:
2114 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2115 *puValue = GIC_DIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2116 break;
2117 case GIC_DIST_REG_IIDR_OFF:
2118 *puValue = GIC_DIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2119 break;
2120 case GIC_DIST_REG_TYPER2_OFF:
2121 *puValue = 0;
2122 break;
2123 default:
2124 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2125 *puValue = 0;
2126 break;
2127 }
2128 return VINF_SUCCESS;
2129}
2130
2131
2132/**
2133 * Writes a distributor register.
2134 *
2135 * @returns Strict VBox status code.
2136 * @param pDevIns The device instance.
2137 * @param pVCpu The cross context virtual CPU structure.
2138 * @param offReg The offset of the register being written.
2139 * @param uValue The register value.
2140 */
2141DECLINLINE(VBOXSTRICTRC) gicDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2142{
2143 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
2144 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2145 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
2146
2147 /*
2148 * 64-bit registers.
2149 */
2150 {
2151 /*
2152 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
2153 */
2154 uint16_t const cbReg = sizeof(uint64_t);
2155 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
2156 {
2157 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
2158 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
2159 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
2160 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2161 }
2162 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
2163 {
2164 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
2165 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
2166 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2167 }
2168
2169 }
2170
2171 /*
2172 * 32-bit registers.
2173 */
2174 {
2175 /*
2176 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
2177 */
2178 uint16_t const cbReg = sizeof(uint32_t);
2179 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
2180 {
2181 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
2182 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2183 }
2184 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
2185 {
2186 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
2187 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
2188 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2189 }
2190
2191 /*
2192 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
2193 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
2194 */
2195 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
2196 {
2197 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
2198 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2199 }
2200 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
2201 {
2202 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2203 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
2204 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2205 }
2206 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
2207 {
2208 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
2209 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2210 }
2211 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
2212 {
2213 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2214 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
2215 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2216 }
2217
2218 /*
2219 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
2220 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
2221 */
2222 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
2223 {
2224 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
2225 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2226 }
2227 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
2228 {
2229 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2230 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
2231 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2232 }
2233 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
2234 {
2235 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICACTIVERn_OFF_START) / cbReg;
2236 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2237 }
2238 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
2239 {
2240 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2241 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
2242 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2243 }
2244
2245 /*
2246 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
2247 */
2248 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2249 {
2250 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2251 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2252 }
2253 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2254 {
2255 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2256 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2257 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2258 }
2259
2260 /*
2261 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2262 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2263 */
2264 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2265 {
2266 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2267 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2268 }
2269 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2270 {
2271 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2272 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2273 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2274 }
2275 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2276 {
2277 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2278 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2279 }
2280 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2281 {
2282 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2283 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2284 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2285 }
2286
2287 /*
2288 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2289 */
2290 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2291 {
2292 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2293 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2294 }
2295 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2296 {
2297 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2298 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2299 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2300 }
2301 }
2302
2303 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2304 switch (offReg)
2305 {
2306 case GIC_DIST_REG_CTLR_OFF:
2307 Assert(!(uValue & GIC_DIST_REG_CTRL_ARE_NS));
2308 pGicDev->fIntrGroup0Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP0);
2309 pGicDev->fIntrGroup1Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP1_NS);
2310 rcStrict = gicDistUpdateIrqState(pVM, pGicDev);
2311 break;
2312 default:
2313 {
2314 /* Windows 11 arm64 (24H2) writes zeroes into these reserved registers. We ignore them. */
2315 if (offReg >= 0x7fe0 && offReg <= 0x7ffc)
2316 LogFlowFunc(("Bad guest writing to reserved GIC distributor register space [0x7fe0..0x7ffc] -- ignoring!"));
2317 else
2318 AssertReleaseMsgFailed(("offReg=%#x uValue=%#RX32\n", offReg, uValue));
2319 break;
2320 }
2321 }
2322
2323 return rcStrict;
2324}
2325
2326
2327/**
2328 * Reads a GIC redistributor register.
2329 *
2330 * @returns VBox status code.
2331 * @param pDevIns The device instance.
2332 * @param pVCpu The cross context virtual CPU structure.
2333 * @param idRedist The redistributor ID.
2334 * @param offReg The offset of the register being read.
2335 * @param puValue Where to store the register value.
2336 */
2337DECLINLINE(VBOXSTRICTRC) gicReDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint32_t idRedist, uint16_t offReg, uint32_t *puValue)
2338{
2339 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2340 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2341 Assert(idRedist == pVCpu->idCpu);
2342
2343 switch (offReg)
2344 {
2345 case GIC_REDIST_REG_TYPER_OFF:
2346 *puValue = (pVCpu->idCpu == pVM->cCpus - 1 ? GIC_REDIST_REG_TYPER_LAST : 0)
2347 | GIC_REDIST_REG_TYPER_CPU_NUMBER_SET(idRedist)
2348 | GIC_REDIST_REG_TYPER_CMN_LPI_AFF_SET(GIC_REDIST_REG_TYPER_CMN_LPI_AFF_ALL)
2349 | (pGicDev->fExtPpi ? GIC_REDIST_REG_TYPER_PPI_NUM_SET(pGicDev->uMaxExtPpi) : 0)
2350 | (pGicDev->fLpi ? GIC_REDIST_REG_TYPER_PLPIS : 0);
2351 Assert(!pGicDev->fExtPpi || pGicDev->uMaxExtPpi > 0);
2352 break;
2353 case GIC_REDIST_REG_WAKER_OFF:
2354 *puValue = 0;
2355 break;
2356 case GIC_REDIST_REG_IIDR_OFF:
2357 *puValue = GIC_REDIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2358 break;
2359 case GIC_REDIST_REG_TYPER_AFFINITY_OFF:
2360 *puValue = idRedist;
2361 break;
2362 case GIC_REDIST_REG_PIDR2_OFF:
2363 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2364 *puValue = GIC_REDIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2365 break;
2366 case GIC_REDIST_REG_CTLR_OFF:
2367 *puValue = (pGicDev->fEnableLpis ? GIC_REDIST_REG_CTLR_ENABLE_LPI : 0)
2368 | GIC_REDIST_REG_CTLR_CES_SET(1);
2369 break;
2370 case GIC_REDIST_REG_PROPBASER_OFF:
2371 *puValue = pGicDev->uLpiConfigBaseReg.s.Lo;
2372 break;
2373 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2374 *puValue = pGicDev->uLpiConfigBaseReg.s.Hi;
2375 break;
2376 case GIC_REDIST_REG_PENDBASER_OFF:
2377 *puValue = pGicDev->uLpiPendingBaseReg.s.Lo;
2378 break;
2379 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2380 *puValue = pGicDev->uLpiPendingBaseReg.s.Hi;
2381 break;
2382 default:
2383 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2384 *puValue = 0;
2385 break;
2386 }
2387 return VINF_SUCCESS;
2388}
2389
2390
2391/**
2392 * Reads a GIC redistributor SGI/PPI frame register.
2393 *
2394 * @returns VBox status code.
2395 * @param pDevIns The device instance.
2396 * @param pVCpu The cross context virtual CPU structure.
2397 * @param offReg The offset of the register being read.
2398 * @param puValue Where to store the register value.
2399 */
2400DECLINLINE(VBOXSTRICTRC) gicReDistReadSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
2401{
2402 VMCPU_ASSERT_EMT(pVCpu);
2403 RT_NOREF(pDevIns);
2404
2405 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2406 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2407 uint16_t const cbReg = sizeof(uint32_t);
2408
2409 /*
2410 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2411 */
2412 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2413 {
2414 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2415 return gicReDistReadIntrGroupReg(pGicDev, pGicCpu, idxReg, puValue);
2416 }
2417
2418 /*
2419 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2420 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2421 */
2422 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2423 {
2424 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2425 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2426 }
2427 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2428 {
2429 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLERnE_OFF_START) / cbReg;
2430 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2431 }
2432
2433 /*
2434 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2435 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2436 */
2437 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2438 {
2439 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2440 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2441 }
2442 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2443 {
2444 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2445 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2446 }
2447
2448 /*
2449 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2450 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2451 */
2452 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2453 {
2454 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2455 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2456 }
2457 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2458 {
2459 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2460 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2461 }
2462
2463 /*
2464 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2465 */
2466 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2467 {
2468 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2469 return gicReDistReadIntrPriorityReg(pGicDev, pGicCpu, idxReg, puValue);
2470 }
2471
2472 /*
2473 * GICR_ICFGR0, GICR_ICFGR1 and GICR_ICFGR<n>E.
2474 */
2475 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2476 {
2477 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2478 return gicReDistReadIntrConfigReg(pGicDev, pGicCpu, idxReg, puValue);
2479 }
2480
2481 AssertReleaseMsgFailed(("offReg=%#x (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2482 *puValue = 0;
2483 return VINF_SUCCESS;
2484}
2485
2486
2487/**
2488 * Writes a GIC redistributor frame register.
2489 *
2490 * @returns Strict VBox status code.
2491 * @param pDevIns The device instance.
2492 * @param pVCpu The cross context virtual CPU structure.
2493 * @param offReg The offset of the register being written.
2494 * @param uValue The register value.
2495 */
2496DECLINLINE(VBOXSTRICTRC) gicReDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2497{
2498 VMCPU_ASSERT_EMT(pVCpu);
2499 RT_NOREF(pVCpu, uValue);
2500
2501 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2502 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2503 switch (offReg)
2504 {
2505 case GIC_REDIST_REG_WAKER_OFF:
2506 Assert(uValue == 0);
2507 break;
2508 case GIC_REDIST_REG_CTLR_OFF:
2509 {
2510 /* Check if LPIs are supported and whether the enable LPI bit changed. */
2511 uint32_t const uOldCtlr = pGicDev->fEnableLpis ? GIC_REDIST_REG_CTLR_ENABLE_LPI : 0;
2512 uint32_t const uNewCtlr = uValue & GIC_REDIST_REG_CTLR_ENABLE_LPI;
2513 if ( pGicDev->fLpi
2514 && ((uNewCtlr ^ uOldCtlr) & GIC_REDIST_REG_CTLR_ENABLE_LPI))
2515 {
2516 pGicDev->fEnableLpis = RT_BOOL(uNewCtlr & GIC_REDIST_REG_CTLR_ENABLE_LPI);
2517 if (pGicDev->fEnableLpis)
2518 {
2519 gicDistReadLpiConfigTableFromMem(pDevIns, pGicDev);
2520 gicReDistReadLpiPendingBitmapFromMem(pDevIns, pVCpu, pGicDev);
2521 }
2522 else
2523 {
2524 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2525 RT_ZERO(pGicCpu->bmLpiPending);
2526 }
2527 }
2528 break;
2529 }
2530 case GIC_REDIST_REG_PROPBASER_OFF:
2531 pGicDev->uLpiConfigBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2532 break;
2533 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2534 pGicDev->uLpiConfigBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2535 break;
2536 case GIC_REDIST_REG_PENDBASER_OFF:
2537 pGicDev->uLpiPendingBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2538 break;
2539 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2540 pGicDev->uLpiPendingBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2541 break;
2542 default:
2543 AssertReleaseMsgFailed(("offReg=%#x (%s) uValue=%#RX32\n", offReg, gicReDistGetRegDescription(offReg), uValue));
2544 break;
2545 }
2546
2547 return rcStrict;
2548}
2549
2550
2551/**
2552 * Writes a GIC redistributor SGI/PPI frame register.
2553 *
2554 * @returns Strict VBox status code.
2555 * @param pDevIns The device instance.
2556 * @param pVCpu The cross context virtual CPU structure.
2557 * @param offReg The offset of the register being written.
2558 * @param uValue The register value.
2559 */
2560DECLINLINE(VBOXSTRICTRC) gicReDistWriteSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2561{
2562 VMCPU_ASSERT_EMT(pVCpu);
2563 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2564 uint16_t const cbReg = sizeof(uint32_t);
2565
2566 /*
2567 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2568 */
2569 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2570 {
2571 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2572 return gicReDistWriteIntrGroupReg(pGicDev, pVCpu, idxReg, uValue);
2573 }
2574
2575 /*
2576 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2577 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2578 */
2579 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2580 {
2581 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2582 return gicReDistWriteIntrSetEnableReg(pGicDev, pVCpu, idxReg, uValue);
2583 }
2584 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2585 {
2586 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF) / cbReg;
2587 return gicReDistWriteIntrClearEnableReg(pGicDev, pVCpu, idxReg, uValue);
2588 }
2589
2590 /*
2591 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2592 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2593 */
2594 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2595 {
2596 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2597 return gicReDistWriteIntrSetActiveReg(pGicDev, pVCpu, idxReg, uValue);
2598 }
2599 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2600 {
2601 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2602 return gicReDistWriteIntrClearActiveReg(pGicDev, pVCpu, idxReg, uValue);
2603 }
2604
2605 /*
2606 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2607 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2608 */
2609 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2610 {
2611 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2612 return gicReDistWriteIntrSetPendingReg(pGicDev, pVCpu, idxReg, uValue);
2613 }
2614 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2615 {
2616 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2617 return gicReDistWriteIntrClearPendingReg(pGicDev, pVCpu, idxReg, uValue);
2618 }
2619
2620 /*
2621 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2622 */
2623 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2624 {
2625 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2626 return gicReDistWriteIntrPriorityReg(pGicDev, pVCpu, idxReg, uValue);
2627 }
2628
2629 /*
2630 * GICR_ICFGR0, GIC_ICFGR1 and GICR_ICFGR<n>E.
2631 */
2632 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2633 {
2634 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2635 return gicReDistWriteIntrConfigReg(pGicDev, pVCpu, idxReg, uValue);
2636 }
2637
2638 AssertReleaseMsgFailed(("offReg=%#RX16 (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2639 return VERR_INTERNAL_ERROR_2;
2640}
2641
2642
2643/**
2644 * @interface_method_impl{PDMGICBACKEND,pfnSetSpi}
2645 */
2646static DECLCALLBACK(int) gicSetSpi(PVMCC pVM, uint32_t uSpiIntId, bool fAsserted)
2647{
2648 LogFlowFunc(("pVM=%p uSpiIntId=%u fAsserted=%RTbool\n",
2649 pVM, uSpiIntId, fAsserted));
2650
2651 PGIC pGic = VM_TO_GIC(pVM);
2652 PPDMDEVINS pDevIns = pGic->CTX_SUFF(pDevIns);
2653 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2654
2655#ifdef VBOX_WITH_STATISTICS
2656 PVMCPU pVCpu = VMMGetCpuById(pVM, 0);
2657 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSpi);
2658 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2659#endif
2660 STAM_PROFILE_START(&pGicCpu->StatProfSetSpi, a);
2661
2662 uint16_t const uIntId = GIC_INTID_RANGE_SPI_START + uSpiIntId;
2663 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
2664
2665 Assert(idxIntr >= GIC_INTID_RANGE_SPI_START);
2666 AssertMsgReturn(idxIntr < sizeof(pGicDev->bmIntrPending) * 8,
2667 ("out-of-range SPI interrupt ID %RU32 (%RU32)\n", uIntId, uSpiIntId),
2668 VERR_INVALID_PARAMETER);
2669
2670 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2671 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2672
2673 /* Update the interrupt pending state. */
2674 if (fAsserted)
2675 ASMBitSet(&pGicDev->bmIntrPending[0], idxIntr);
2676 else
2677 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
2678
2679 int const rc = VBOXSTRICTRC_VAL(gicDistUpdateIrqState(pVM, pGicDev));
2680 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSpi, a);
2681
2682 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
2683 return rc;
2684}
2685
2686
2687/**
2688 * @interface_method_impl{PDMGICBACKEND,pfnSetPpi}
2689 */
2690static DECLCALLBACK(int) gicSetPpi(PVMCPUCC pVCpu, uint32_t uPpiIntId, bool fAsserted)
2691{
2692 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uPpiIntId=%u fAsserted=%RTbool\n", pVCpu, pVCpu->idCpu, uPpiIntId, fAsserted));
2693
2694 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2695 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2696 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2697
2698 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetPpi);
2699 STAM_PROFILE_START(&pGicCpu->StatProfSetPpi, b);
2700
2701 uint32_t const uIntId = GIC_INTID_RANGE_PPI_START + uPpiIntId;
2702 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
2703
2704 Assert(idxIntr >= GIC_INTID_RANGE_PPI_START);
2705 AssertMsgReturn(idxIntr < sizeof(pGicCpu->bmIntrPending) * 8,
2706 ("out-of-range PPI interrupt ID %RU32 (%RU32)\n", uIntId, uPpiIntId),
2707 VERR_INVALID_PARAMETER);
2708
2709 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2710 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2711
2712 /* Update the interrupt pending state. */
2713 if (fAsserted)
2714 ASMBitSet(&pGicCpu->bmIntrPending[0], idxIntr);
2715 else
2716 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
2717
2718 int const rc = VBOXSTRICTRC_VAL(gicReDistUpdateIrqState(pGicDev, pVCpu));
2719 STAM_PROFILE_STOP(&pGicCpu->StatProfSetPpi, b);
2720
2721 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
2722 return rc;
2723}
2724
2725
2726/**
2727 * Sets the specified software generated interrupt (SGI).
2728 *
2729 * @returns Strict VBox status code.
2730 * @param pGicDev The GIC distributor state.
2731 * @param pVCpu The cross context virtual CPU structure.
2732 * @param pDestCpuSet Which CPUs to deliver the SGI to.
2733 * @param uIntId The SGI interrupt ID.
2734 */
2735static VBOXSTRICTRC gicSetSgi(PCGICDEV pGicDev, PVMCPUCC pVCpu, PCVMCPUSET pDestCpuSet, uint8_t uIntId)
2736{
2737 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uIntId=%u\n", pVCpu, pVCpu->idCpu, uIntId));
2738
2739 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2740 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2741 uint32_t const cCpus = pVM->cCpus;
2742 AssertReturn(uIntId <= GIC_INTID_RANGE_SGI_LAST, VERR_INVALID_PARAMETER);
2743 Assert(PDMDevHlpCritSectIsOwner(pDevIns, pDevIns->pCritSectRoR3)); RT_NOREF_PV(pDevIns);
2744
2745 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
2746 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
2747 {
2748 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVM->CTX_SUFF(apCpus)[idCpu]);
2749 pGicCpu->bmIntrPending[0] |= RT_BIT_32(uIntId);
2750 }
2751
2752 return gicDistUpdateIrqState(pVM, pGicDev);
2753}
2754
2755
2756/**
2757 * Writes to the redistributor's SGI group 1 register (ICC_SGI1R_EL1).
2758 *
2759 * @returns Strict VBox status code.
2760 * @param pGicDev The GIC distributor state.
2761 * @param pVCpu The cross context virtual CPU structure.
2762 * @param uValue The value being written to the ICC_SGI1R_EL1 register.
2763 */
2764static VBOXSTRICTRC gicReDistWriteSgiReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint64_t uValue)
2765{
2766#ifdef VBOX_WITH_STATISTICS
2767 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2768 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSgi);
2769 STAM_PROFILE_START(&pGicCpu->StatProfSetSgi, c);
2770#else
2771 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2772#endif
2773
2774 VMCPUSET DestCpuSet;
2775 if (uValue & ARMV8_ICC_SGI1R_EL1_AARCH64_IRM)
2776 {
2777 /*
2778 * Deliver to all VCPUs but this one.
2779 */
2780 VMCPUSET_FILL(&DestCpuSet);
2781 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
2782 }
2783 else
2784 {
2785 /*
2786 * Target specific VCPUs.
2787 * See ARM GICv3 and GICv4 Software Overview spec 3.3 "Affinity routing".
2788 */
2789 VMCPUSET_EMPTY(&DestCpuSet);
2790 bool const fRangeSelSupport = RT_BOOL(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_RSS);
2791 uint8_t const idRangeStart = ARMV8_ICC_SGI1R_EL1_AARCH64_RS_GET(uValue) * 16;
2792 uint16_t const bmCpuInterfaces = ARMV8_ICC_SGI1R_EL1_AARCH64_TARGET_LIST_GET(uValue);
2793 uint8_t const uAff1 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF1_GET(uValue);
2794 uint8_t const uAff2 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF2_GET(uValue);
2795 uint8_t const uAff3 = (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_A3V)
2796 ? ARMV8_ICC_SGI1R_EL1_AARCH64_AFF3_GET(uValue)
2797 : 0;
2798 uint32_t const cCpus = pVCpu->CTX_SUFF(pVM)->cCpus;
2799 for (uint8_t idCpuInterface = 0; idCpuInterface < 16; idCpuInterface++)
2800 {
2801 if (bmCpuInterfaces & RT_BIT(idCpuInterface))
2802 {
2803 VMCPUID idCpuTarget;
2804 if (fRangeSelSupport)
2805 idCpuTarget = RT_MAKE_U32_FROM_U8(idRangeStart + idCpuInterface, uAff1, uAff2, uAff3);
2806 else
2807 idCpuTarget = gicGetCpuIdFromAffinity(idCpuInterface, uAff1, uAff2, uAff3);
2808 if (RT_LIKELY(idCpuTarget < cCpus))
2809 VMCPUSET_ADD(&DestCpuSet, idCpuTarget);
2810 else
2811 AssertReleaseMsgFailed(("VCPU ID out-of-bounds %RU32, must be < %u\n", idCpuTarget, cCpus));
2812 }
2813 }
2814 }
2815
2816 if (!VMCPUSET_IS_EMPTY(&DestCpuSet))
2817 {
2818 uint8_t const uSgiIntId = ARMV8_ICC_SGI1R_EL1_AARCH64_INTID_GET(uValue);
2819 Assert(GIC_IS_INTR_SGI(uSgiIntId));
2820 VBOXSTRICTRC const rcStrict = gicSetSgi(pGicDev, pVCpu, &DestCpuSet, uSgiIntId);
2821 Assert(RT_SUCCESS(rcStrict)); RT_NOREF_PV(rcStrict);
2822 }
2823
2824 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSgi, c);
2825 return VINF_SUCCESS;
2826}
2827
2828
2829/**
2830 * @interface_method_impl{PDMGICBACKEND,pfnReadSysReg}
2831 */
2832static DECLCALLBACK(VBOXSTRICTRC) gicReadSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
2833{
2834 /*
2835 * Validate.
2836 */
2837 VMCPU_ASSERT_EMT(pVCpu);
2838 Assert(pu64Value);
2839
2840 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegRead);
2841
2842 *pu64Value = 0;
2843 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2844 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2845 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2846
2847 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2848 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2849
2850 switch (u32Reg)
2851 {
2852 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2853 *pu64Value = pGicCpu->bIntrPriorityMask;
2854 break;
2855 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2856 AssertReleaseFailed();
2857 break;
2858 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2859 AssertReleaseFailed();
2860 break;
2861 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
2862 AssertReleaseFailed();
2863 break;
2864 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
2865 *pu64Value = ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup0);
2866 break;
2867 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
2868 AssertReleaseFailed();
2869 *pu64Value = pGicCpu->bmActivePriorityGroup0[0];
2870 break;
2871 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
2872 AssertReleaseFailed();
2873 *pu64Value = pGicCpu->bmActivePriorityGroup0[1];
2874 break;
2875 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
2876 AssertReleaseFailed();
2877 *pu64Value = pGicCpu->bmActivePriorityGroup0[2];
2878 break;
2879 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
2880 AssertReleaseFailed();
2881 *pu64Value = pGicCpu->bmActivePriorityGroup0[3];
2882 break;
2883 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
2884 AssertReleaseFailed();
2885 *pu64Value = pGicCpu->bmActivePriorityGroup1[0];
2886 break;
2887 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
2888 AssertReleaseFailed();
2889 *pu64Value = pGicCpu->bmActivePriorityGroup1[1];
2890 break;
2891 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
2892 AssertReleaseFailed();
2893 *pu64Value = pGicCpu->bmActivePriorityGroup1[2];
2894 break;
2895 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
2896 AssertReleaseFailed();
2897 *pu64Value = pGicCpu->bmActivePriorityGroup1[3];
2898 break;
2899 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
2900 AssertReleaseFailed();
2901 break;
2902 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
2903 AssertReleaseFailed();
2904 break;
2905 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
2906 *pu64Value = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
2907 break;
2908 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
2909 AssertReleaseFailed();
2910 break;
2911 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
2912 AssertReleaseFailed();
2913 break;
2914 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
2915 AssertReleaseFailed();
2916 break;
2917 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
2918 *pu64Value = gicAckHighestPriorityPendingIntr(pGicDev, pVCpu, false /*fGroup0*/, true /*fGroup1*/);
2919 break;
2920 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
2921 AssertReleaseFailed();
2922 break;
2923 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
2924 {
2925 AssertReleaseFailed();
2926 *pu64Value = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, false /*fGroup0*/, true /*fGroup1*/,
2927 NULL /*pidxIntr*/, NULL /*pbPriority*/);
2928 break;
2929 }
2930 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
2931 *pu64Value = ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup1);
2932 break;
2933 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
2934 *pu64Value = pGicCpu->uIccCtlr;
2935 break;
2936 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
2937 AssertReleaseFailed();
2938 break;
2939 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
2940 *pu64Value = pGicCpu->fIntrGroup0Enabled ? ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE : 0;
2941 break;
2942 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
2943 *pu64Value = pGicCpu->fIntrGroup1Enabled ? ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE : 0;
2944 break;
2945 default:
2946 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
2947 break;
2948 }
2949
2950 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
2951
2952 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} pu64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), *pu64Value));
2953 return VINF_SUCCESS;
2954}
2955
2956
2957/**
2958 * @interface_method_impl{PDMGICBACKEND,pfnWriteSysReg}
2959 */
2960static DECLCALLBACK(VBOXSTRICTRC) gicWriteSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
2961{
2962 /*
2963 * Validate.
2964 */
2965 VMCPU_ASSERT_EMT(pVCpu);
2966 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} u64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), u64Value));
2967
2968 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegWrite);
2969
2970 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2971 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2972 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2973
2974 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2975 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2976
2977 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2978 switch (u32Reg)
2979 {
2980 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2981 LogFlowFunc(("ICC_PMR_EL1: Interrupt priority now %u\n", (uint8_t)u64Value));
2982 pGicCpu->bIntrPriorityMask = (uint8_t)u64Value;
2983 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
2984 break;
2985 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2986 AssertReleaseFailed();
2987 break;
2988 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2989 AssertReleaseFailed();
2990 break;
2991 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
2992 AssertReleaseFailed();
2993 break;
2994 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
2995 pGicCpu->bBinaryPtGroup0 = (uint8_t)ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_GET(u64Value);
2996 break;
2997 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
2998 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
2999 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
3000 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
3001 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
3002 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
3003 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
3004 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
3005 /* Writes ignored, well behaving guest would write all 0s or the last read value of the register. */
3006 break;
3007 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
3008 AssertReleaseFailed();
3009 break;
3010 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
3011 AssertReleaseFailed();
3012 break;
3013 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
3014 AssertReleaseFailed();
3015 break;
3016 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
3017 {
3018 gicReDistWriteSgiReg(pGicDev, pVCpu, u64Value);
3019 break;
3020 }
3021 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
3022 AssertReleaseFailed();
3023 break;
3024 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
3025 AssertReleaseFailed();
3026 break;
3027 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
3028 AssertReleaseFailed();
3029 break;
3030 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
3031 {
3032 /*
3033 * We only support priority drop + interrupt deactivation with writes to this register.
3034 * This avoids an extra access which would be required by software for deactivation.
3035 */
3036 Assert(!(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_EOIMODE));
3037
3038 /*
3039 * Mark the interrupt as inactive, though it might still be pending.
3040 * It is up to the guest to ensure the interrupt ID belongs to the right group as
3041 * failure to do so results in unpredictable behavior.
3042 *
3043 * See ARM GIC spec. 12.2.10 "ICC_EOIR1_EL1, Interrupt Controller End Of Interrupt Register 1".
3044 * NOTE! The order of the 'if' checks below are crucial.
3045 */
3046 uint16_t const uIntId = (uint16_t)u64Value;
3047 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
3048 {
3049 /* SGIs and PPIs. */
3050 AssertCompile(GIC_INTID_RANGE_PPI_LAST < 8 * sizeof(pGicDev->bmIntrActive[0]));
3051 Assert(pGicDev->fAffRoutingEnabled);
3052 pGicCpu->bmIntrActive[0] &= ~RT_BIT_32(uIntId);
3053 }
3054 else if (uIntId <= GIC_INTID_RANGE_SPI_LAST)
3055 {
3056 /* SPIs. */
3057 uint16_t const idxIntr = /*gicDistGetIndexFromIntId*/(uIntId);
3058 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3059 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3060 }
3061 else if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
3062 {
3063 /* Special interrupt IDs, ignored. */
3064 Log(("Ignoring write to EOI with special interrupt ID.\n"));
3065 break;
3066 }
3067 else if (uIntId <= GIC_INTID_RANGE_EXT_PPI_LAST)
3068 {
3069 /* Extended PPIs. */
3070 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
3071 AssertReturn(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3072 ASMBitClear(&pGicCpu->bmIntrActive[0], idxIntr);
3073 }
3074 else if (uIntId <= GIC_INTID_RANGE_EXT_SPI_LAST)
3075 {
3076 /* Extended SPIs. */
3077 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
3078 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3079 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3080 }
3081 else
3082 {
3083 AssertMsgFailed(("Invalid INTID %u\n", uIntId));
3084 break;
3085 }
3086
3087 /*
3088 * Drop priority by restoring previous interrupt.
3089 */
3090 if (RT_LIKELY(pGicCpu->idxRunningPriority))
3091 {
3092 LogFlowFunc(("Restoring interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
3093 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
3094 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority - 1],
3095 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority - 1));
3096
3097 /*
3098 * Clear the interrupt priority from the active priorities bitmap.
3099 * It is up to the guest to ensure that writes to EOI registers are done in the exact
3100 * reverse order of the reads from the IAR registers.
3101 *
3102 * See ARM GIC spec 4.1.1 "Physical CPU interface".
3103 */
3104 uint8_t const idxPreemptionLevel = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] >> 1;
3105 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
3106 ASMBitClear(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
3107
3108 pGicCpu->idxRunningPriority--;
3109 Assert(pGicCpu->abRunningPriorities[0] == GIC_IDLE_PRIORITY);
3110 }
3111 else
3112 AssertReleaseMsgFailed(("Index of running-priority interrupt out-of-bounds %u\n", pGicCpu->idxRunningPriority));
3113 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
3114 break;
3115 }
3116 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
3117 AssertReleaseFailed();
3118 break;
3119 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
3120 pGicCpu->bBinaryPtGroup1 = (uint8_t)ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_GET(u64Value);
3121 break;
3122 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
3123 pGicCpu->uIccCtlr &= ARMV8_ICC_CTLR_EL1_RW;
3124 /** @todo */
3125 break;
3126 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
3127 AssertReleaseFailed();
3128 break;
3129 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
3130 pGicCpu->fIntrGroup0Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE);
3131 break;
3132 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
3133 pGicCpu->fIntrGroup1Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE);
3134 break;
3135 default:
3136 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
3137 break;
3138 }
3139
3140 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
3141 return rcStrict;
3142}
3143
3144
3145/**
3146 * Initializes the GIC distributor state.
3147 *
3148 * @param pDevIns The device instance.
3149 * @remarks This is also called during VM reset, so do NOT remove values that are
3150 * cleared to zero!
3151 */
3152static void gicInit(PPDMDEVINS pDevIns)
3153{
3154 LogFlowFunc(("\n"));
3155 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3156
3157 /* Distributor. */
3158 RT_ZERO(pGicDev->bmIntrGroup);
3159 RT_ZERO(pGicDev->bmIntrConfig);
3160 RT_ZERO(pGicDev->bmIntrEnabled);
3161 RT_ZERO(pGicDev->bmIntrPending);
3162 RT_ZERO(pGicDev->bmIntrActive);
3163 RT_ZERO(pGicDev->abIntrPriority);
3164 RT_ZERO(pGicDev->au32IntrRouting);
3165 RT_ZERO(pGicDev->bmIntrRoutingMode);
3166 pGicDev->fIntrGroup0Enabled = false;
3167 pGicDev->fIntrGroup1Enabled = false;
3168 pGicDev->fAffRoutingEnabled = true; /* GICv2 backwards compatibility is not implemented, so this is RA1/WI. */
3169
3170 /* GITS. */
3171 PGITSDEV pGitsDev = &pGicDev->Gits;
3172 gitsInit(pGitsDev);
3173
3174 /* LPIs. */
3175 RT_ZERO(pGicDev->abLpiConfig);
3176 pGicDev->uLpiConfigBaseReg.u = 0;
3177 pGicDev->uLpiPendingBaseReg.u = 0;
3178 pGicDev->fEnableLpis = false;
3179}
3180
3181
3182/**
3183 * Initialies the GIC redistributor and CPU interface state.
3184 *
3185 * @param pDevIns The device instance.
3186 * @param pVCpu The cross context virtual CPU structure.
3187 * @remarks This is also called during VM reset, so do NOT remove values that are
3188 * cleared to zero!
3189 */
3190static void gicInitCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3191{
3192 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3193 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3194 PGICCPU pGicCpu = &pVCpu->gic.s;
3195
3196 RT_ZERO(pGicCpu->bmIntrGroup);
3197 RT_ZERO(pGicCpu->bmIntrConfig);
3198 /* SGIs are always edge-triggered, writes to GICR_ICFGR0 are to be ignored. */
3199 pGicCpu->bmIntrConfig[0] = 0xaaaaaaaa;
3200 RT_ZERO(pGicCpu->bmIntrEnabled);
3201 RT_ZERO(pGicCpu->bmIntrPending);
3202 RT_ZERO(pGicCpu->bmIntrActive);
3203 RT_ZERO(pGicCpu->abIntrPriority);
3204
3205 pGicCpu->uIccCtlr = ARMV8_ICC_CTLR_EL1_AARCH64_PMHE
3206 | ARMV8_ICC_CTLR_EL1_AARCH64_PRIBITS_SET(4)
3207 | ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_SET(ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_16BITS)
3208 | (pGicDev->fRangeSel ? ARMV8_ICC_CTLR_EL1_AARCH64_RSS : 0)
3209 | (pGicDev->fAff3Levels ? ARMV8_ICC_CTLR_EL1_AARCH64_A3V : 0)
3210 | (pGicDev->fExtPpi || pGicDev->fExtSpi ? ARMV8_ICC_CTLR_EL1_AARCH64_EXTRANGE : 0);
3211
3212 pGicCpu->bIntrPriorityMask = 0; /* Means no interrupt gets through to the PE. */
3213 pGicCpu->idxRunningPriority = 0;
3214 memset((void *)&pGicCpu->abRunningPriorities[0], 0xff, sizeof(pGicCpu->abRunningPriorities));
3215 RT_ZERO(pGicCpu->bmActivePriorityGroup0);
3216 RT_ZERO(pGicCpu->bmActivePriorityGroup1);
3217 pGicCpu->bBinaryPtGroup0 = 0;
3218 pGicCpu->bBinaryPtGroup1 = 0;
3219 pGicCpu->fIntrGroup0Enabled = false;
3220 pGicCpu->fIntrGroup1Enabled = false;
3221 RT_ZERO(pGicCpu->bmLpiPending);
3222}
3223
3224
3225/**
3226 * Initializes per-VM GIC to the state following a power-up or hardware
3227 * reset.
3228 *
3229 * @param pDevIns The device instance.
3230 */
3231DECLHIDDEN(void) gicReset(PPDMDEVINS pDevIns)
3232{
3233 LogFlowFunc(("\n"));
3234 gicInit(pDevIns);
3235}
3236
3237
3238/**
3239 * Initializes per-VCPU GIC to the state following a power-up or hardware
3240 * reset.
3241 *
3242 * @param pDevIns The device instance.
3243 * @param pVCpu The cross context virtual CPU structure.
3244 */
3245DECLHIDDEN(void) gicResetCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3246{
3247 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3248 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3249 gicInitCpu(pDevIns, pVCpu);
3250}
3251
3252
3253/**
3254 * @callback_method_impl{FNIOMMMIONEWREAD}
3255 */
3256DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3257{
3258 NOREF(pvUser);
3259 Assert(!(off & 0x3));
3260 Assert(cb == 4); RT_NOREF_PV(cb);
3261
3262 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3263 uint16_t offReg = off & 0xfffc;
3264 uint32_t uValue = 0;
3265
3266 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3267
3268 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(gicDistReadRegister(pDevIns, pVCpu, offReg, &uValue));
3269 *(uint32_t *)pv = uValue;
3270
3271 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3272 return rc;
3273}
3274
3275
3276/**
3277 * @callback_method_impl{FNIOMMMIONEWWRITE}
3278 */
3279DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3280{
3281 NOREF(pvUser);
3282 Assert(!(off & 0x3));
3283 Assert(cb == 4); RT_NOREF_PV(cb);
3284
3285 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3286 uint16_t offReg = off & 0xfffc;
3287 uint32_t uValue = *(uint32_t *)pv;
3288
3289 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3290 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3291
3292 return gicDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3293}
3294
3295
3296/**
3297 * @callback_method_impl{FNIOMMMIONEWREAD}
3298 */
3299DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3300{
3301 NOREF(pvUser);
3302 Assert(!(off & 0x3));
3303 Assert(cb == 4); RT_NOREF_PV(cb);
3304
3305 /*
3306 * Determine the redistributor being targeted. Each redistributor takes
3307 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3308 * and the redistributors are adjacent.
3309 */
3310 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3311 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3312
3313 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
3314 Assert(idReDist < pVM->cCpus);
3315 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3316
3317 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3318
3319 /* Redistributor or SGI/PPI frame? */
3320 uint16_t const offReg = off & 0xfffc;
3321 uint32_t uValue = 0;
3322 VBOXSTRICTRC rcStrict;
3323 if (off < GIC_REDIST_REG_FRAME_SIZE)
3324 rcStrict = gicReDistReadRegister(pDevIns, pVCpu, idReDist, offReg, &uValue);
3325 else
3326 rcStrict = gicReDistReadSgiPpiRegister(pDevIns, pVCpu, offReg, &uValue);
3327
3328 *(uint32_t *)pv = uValue;
3329 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3330 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3331 return rcStrict;
3332}
3333
3334
3335/**
3336 * @callback_method_impl{FNIOMMMIONEWWRITE}
3337 */
3338DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3339{
3340 NOREF(pvUser);
3341 Assert(!(off & 0x3));
3342 Assert(cb == 4); RT_NOREF_PV(cb);
3343
3344 uint32_t uValue = *(uint32_t *)pv;
3345
3346 /*
3347 * Determine the redistributor being targeted. Each redistributor takes
3348 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3349 * and the redistributors are adjacent.
3350 */
3351 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3352 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3353
3354 PCVMCC pVM = PDMDevHlpGetVM(pDevIns);
3355 Assert(idReDist < pVM->cCpus);
3356 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3357
3358 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3359
3360 /* Redistributor or SGI/PPI frame? */
3361 uint16_t const offReg = off & 0xfffc;
3362 VBOXSTRICTRC rcStrict;
3363 if (off < GIC_REDIST_REG_FRAME_SIZE)
3364 rcStrict = gicReDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3365 else
3366 rcStrict = gicReDistWriteSgiPpiRegister(pDevIns, pVCpu, offReg, uValue);
3367
3368 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3369 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3370 return rcStrict;
3371}
3372
3373
3374/**
3375 * @callback_method_impl{FNIOMMMIONEWREAD}
3376 */
3377DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3378{
3379 RT_NOREF_PV(pvUser);
3380 Assert(!(off & 0x3));
3381 Assert(cb == 8 || cb == 4);
3382
3383 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
3384 PCGITSDEV pGitsDev = &pGicDev->Gits;
3385 uint64_t uReg;
3386 if (off < GITS_REG_FRAME_SIZE)
3387 {
3388 /* Control registers space. */
3389 uint16_t const offReg = off & 0xfffc;
3390 uReg = gitsMmioReadCtrl(pGitsDev, offReg, cb);
3391 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uReg));
3392 }
3393 else
3394 {
3395 /* Translation registers space. */
3396 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3397 uReg = gitsMmioReadTranslate(pGitsDev, offReg, cb);
3398 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uReg));
3399 }
3400
3401 if (cb == 8)
3402 *(uint64_t *)pv = uReg;
3403 else
3404 *(uint32_t *)pv = uReg;
3405 return VINF_SUCCESS;
3406}
3407
3408
3409/**
3410 * @callback_method_impl{FNIOMMMIONEWWRITE}
3411 */
3412DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3413{
3414 RT_NOREF_PV(pvUser);
3415 Assert(!(off & 0x3));
3416 Assert(cb == 8 || cb == 4);
3417
3418 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3419 PGITSDEV pGitsDev = &pGicDev->Gits;
3420
3421 uint64_t const uValue = cb == 8 ? *(uint64_t *)pv : *(uint32_t *)pv;
3422 if (off < GITS_REG_FRAME_SIZE)
3423 {
3424 /* Control registers space. */
3425 uint16_t const offReg = off & 0xfffc;
3426 gitsMmioWriteCtrl(pDevIns, pGitsDev, offReg, uValue, cb);
3427 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uValue));
3428 }
3429 else
3430 {
3431 /* Translation registers space. */
3432 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3433 gitsMmioWriteTranslate(pGitsDev, offReg, uValue, cb);
3434 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uValue));
3435 }
3436 return VINF_SUCCESS;
3437}
3438
3439
3440/**
3441 * GIC device registration structure.
3442 */
3443const PDMDEVREG g_DeviceGIC =
3444{
3445 /* .u32Version = */ PDM_DEVREG_VERSION,
3446 /* .uReserved0 = */ 0,
3447 /* .szName = */ "gic",
3448 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
3449 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
3450 /* .cMaxInstances = */ 1,
3451 /* .uSharedVersion = */ 42,
3452 /* .cbInstanceShared = */ sizeof(GICDEV),
3453 /* .cbInstanceCC = */ 0,
3454 /* .cbInstanceRC = */ 0,
3455 /* .cMaxPciDevices = */ 0,
3456 /* .cMaxMsixVectors = */ 0,
3457 /* .pszDescription = */ "Generic Interrupt Controller",
3458#if defined(IN_RING3)
3459 /* .szRCMod = */ "VMMRC.rc",
3460 /* .szR0Mod = */ "VMMR0.r0",
3461 /* .pfnConstruct = */ gicR3Construct,
3462 /* .pfnDestruct = */ gicR3Destruct,
3463 /* .pfnRelocate = */ NULL,
3464 /* .pfnMemSetup = */ NULL,
3465 /* .pfnPowerOn = */ NULL,
3466 /* .pfnReset = */ gicR3Reset,
3467 /* .pfnSuspend = */ NULL,
3468 /* .pfnResume = */ NULL,
3469 /* .pfnAttach = */ NULL,
3470 /* .pfnDetach = */ NULL,
3471 /* .pfnQueryInterface = */ NULL,
3472 /* .pfnInitComplete = */ NULL,
3473 /* .pfnPowerOff = */ NULL,
3474 /* .pfnSoftReset = */ NULL,
3475 /* .pfnReserved0 = */ NULL,
3476 /* .pfnReserved1 = */ NULL,
3477 /* .pfnReserved2 = */ NULL,
3478 /* .pfnReserved3 = */ NULL,
3479 /* .pfnReserved4 = */ NULL,
3480 /* .pfnReserved5 = */ NULL,
3481 /* .pfnReserved6 = */ NULL,
3482 /* .pfnReserved7 = */ NULL,
3483#elif defined(IN_RING0)
3484 /* .pfnEarlyConstruct = */ NULL,
3485 /* .pfnConstruct = */ NULL,
3486 /* .pfnDestruct = */ NULL,
3487 /* .pfnFinalDestruct = */ NULL,
3488 /* .pfnRequest = */ NULL,
3489 /* .pfnReserved0 = */ NULL,
3490 /* .pfnReserved1 = */ NULL,
3491 /* .pfnReserved2 = */ NULL,
3492 /* .pfnReserved3 = */ NULL,
3493 /* .pfnReserved4 = */ NULL,
3494 /* .pfnReserved5 = */ NULL,
3495 /* .pfnReserved6 = */ NULL,
3496 /* .pfnReserved7 = */ NULL,
3497#elif defined(IN_RC)
3498 /* .pfnConstruct = */ NULL,
3499 /* .pfnReserved0 = */ NULL,
3500 /* .pfnReserved1 = */ NULL,
3501 /* .pfnReserved2 = */ NULL,
3502 /* .pfnReserved3 = */ NULL,
3503 /* .pfnReserved4 = */ NULL,
3504 /* .pfnReserved5 = */ NULL,
3505 /* .pfnReserved6 = */ NULL,
3506 /* .pfnReserved7 = */ NULL,
3507#else
3508# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3509#endif
3510 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3511};
3512
3513
3514/**
3515 * The VirtualBox GIC backend.
3516 */
3517const PDMGICBACKEND g_GicBackend =
3518{
3519 /* .pfnReadSysReg = */ gicReadSysReg,
3520 /* .pfnWriteSysReg = */ gicWriteSysReg,
3521 /* .pfnSetSpi = */ gicSetSpi,
3522 /* .pfnSetPpi = */ gicSetPpi,
3523 /* .pfnSendMsi = */ gitsSendMsi,
3524};
3525
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette