VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp@ 88544

Last change on this file since 88544 was 88537, checked in by vboxsync, 4 years ago

Intel IOMMU: bugref:9967 nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 60.3 KB
Line 
1/* $Id: DevIommuIntel.cpp 88537 2021-04-15 12:45:18Z vboxsync $ */
2/** @file
3 * IOMMU - Input/Output Memory Management Unit - Intel implementation.
4 */
5
6/*
7 * Copyright (C) 2021 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_IOMMU
23#include "VBoxDD.h"
24#include "DevIommuIntel.h"
25
26#include <iprt/string.h>
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32/** Gets the low uint32_t of a uint64_t or something equivalent.
33 *
34 * This is suitable for casting constants outside code (since RT_LO_U32 can't be
35 * used as it asserts for correctness when compiling on certain compilers). */
36#define DMAR_LO_U32(a) (uint32_t)(UINT32_MAX & (a))
37
38/** Gets the high uint32_t of a uint64_t or something equivalent.
39 *
40 * This is suitable for casting constants outside code (since RT_HI_U32 can't be
41 * used as it asserts for correctness when compiling on certain compilers). */
42#define DMAR_HI_U32(a) (uint32_t)((a) >> 32)
43
44/** Asserts MMIO access' offset and size are valid or returns appropriate error
45 * code suitable for returning from MMIO access handlers. */
46#define DMAR_ASSERT_MMIO_ACCESS_RET(a_off, a_cb) \
47 do { \
48 AssertReturn((a_cb) == 4 || (a_cb) == 8, VINF_IOM_MMIO_UNUSED_FF); \
49 AssertReturn(!((a_off) & ((a_cb) - 1)), VINF_IOM_MMIO_UNUSED_FF); \
50 } while (0);
51
52/** Checks whether the MMIO offset is valid. */
53#define DMAR_IS_MMIO_OFF_VALID(a_off) ( (a_off) < DMAR_MMIO_GROUP_0_OFF_END \
54 || (a_off) - DMAR_MMIO_GROUP_1_OFF_FIRST < DMAR_MMIO_GROUP_1_SIZE)
55
56/** @name DMAR implementation specifics.
57 * @{ */
58/** The number of fault recording registers our implementation supports.
59 * Normal guest operation shouldn't trigger faults anyway, so we only support the
60 * minimum number of registers (which is 1).
61 *
62 * See Intel VT-d spec. 10.4.2 "Capability Register" (CAP_REG.NFR). */
63#define DMAR_FRCD_REG_COUNT UINT32_C(1)
64
65/** Offset of first register in group 0. */
66#define DMAR_MMIO_GROUP_0_OFF_FIRST VTD_MMIO_OFF_VER_REG
67/** Offset of last register in group 0 (inclusive). */
68#define DMAR_MMIO_GROUP_0_OFF_LAST VTD_MMIO_OFF_MTRR_PHYSMASK9_REG
69/** Last valid offset in group 0 (exclusive). */
70#define DMAR_MMIO_GROUP_0_OFF_END (DMAR_MMIO_GROUP_0_OFF_LAST + 8 /* sizeof MTRR_PHYSMASK9_REG */)
71/** Size of the group 0 (in bytes). */
72#define DMAR_MMIO_GROUP_0_SIZE (DMAR_MMIO_GROUP_0_OFF_END - DMAR_MMIO_GROUP_0_OFF_FIRST)
73/**< Implementation-specific MMIO offset of IVA_REG. */
74#define DMAR_MMIO_OFF_IVA_REG 0xe50
75/**< Implementation-specific MMIO offset of IOTLB_REG. */
76#define DMAR_MMIO_OFF_IOTLB_REG 0xe58
77 /**< Implementation-specific MMIO offset of FRCD_LO_REG. */
78#define DMAR_MMIO_OFF_FRCD_LO_REG 0xe70
79/**< Implementation-specific MMIO offset of FRCD_HI_REG. */
80#define DMAR_MMIO_OFF_FRCD_HI_REG 0xe78
81AssertCompile(!(DMAR_MMIO_OFF_FRCD_LO_REG & 0xf));
82
83/** Offset of first register in group 1. */
84#define DMAR_MMIO_GROUP_1_OFF_FIRST VTD_MMIO_OFF_VCCAP_REG
85/** Offset of last register in group 1 (inclusive). */
86#define DMAR_MMIO_GROUP_1_OFF_LAST (DMAR_MMIO_OFF_FRCD_LO_REG + 8) * DMAR_FRCD_REG_COUNT
87/** Last valid offset in group 1 (exclusive). */
88#define DMAR_MMIO_GROUP_1_OFF_END (DMAR_MMIO_GROUP_1_OFF_LAST + 8 /* sizeof FRCD_HI_REG */)
89/** Size of the group 1 (in bytes). */
90#define DMAR_MMIO_GROUP_1_SIZE (DMAR_MMIO_GROUP_1_OFF_END - DMAR_MMIO_GROUP_1_OFF_FIRST)
91
92/** DMAR implementation's major version number (exposed to software).
93 * We report 6 as the major version since we support queued invalidations as
94 * software may make assumptions based on that.
95 *
96 * See Intel VT-d spec. 10.4.7 "Context Command Register" (CCMD_REG.CAIG). */
97#define DMAR_VER_MAJOR 6
98/** DMAR implementation's minor version number (exposed to software). */
99#define DMAR_VER_MINOR 0
100/** @} */
101
102/** Release log prefix string. */
103#define DMAR_LOG_PFX "Intel-IOMMU"
104/** The current saved state version. */
105#define DMAR_SAVED_STATE_VERSION 1
106
107
108/*********************************************************************************************************************************
109* Structures and Typedefs *
110*********************************************************************************************************************************/
111/**
112 * The shared DMAR device state.
113 */
114typedef struct DMAR
115{
116 /** IOMMU device index. */
117 uint32_t idxIommu;
118 /** DMAR magic. */
119 uint32_t u32Magic;
120
121 /** The MMIO handle. */
122 IOMMMIOHANDLE hMmio;
123
124 /** Registers (group 0). */
125 uint8_t abRegs0[DMAR_MMIO_GROUP_0_SIZE];
126 /** Registers (group 1). */
127 uint8_t abRegs1[DMAR_MMIO_GROUP_1_SIZE];
128
129 /** @name Register copies for a tiny bit faster and more convenient access.
130 * @{ */
131 /** Copy of VER_REG. */
132 uint8_t uVerReg;
133 /** Alignment. */
134 uint8_t abPadding[7];
135 /** Copy of CAP_REG. */
136 uint64_t fCap;
137 /** Copy of ECAP_REG. */
138 uint64_t fExtCap;
139 /** @} */
140
141#ifdef VBOX_WITH_STATISTICS
142 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */
143 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */
144 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */
145 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */
146
147 STAMCOUNTER StatMsiRemapR3; /**< Number of MSI remap requests in R3. */
148 STAMCOUNTER StatMsiRemapRZ; /**< Number of MSI remap requests in RZ. */
149
150 STAMCOUNTER StatMemReadR3; /**< Number of memory read translation requests in R3. */
151 STAMCOUNTER StatMemReadRZ; /**< Number of memory read translation requests in RZ. */
152 STAMCOUNTER StatMemWriteR3; /**< Number of memory write translation requests in R3. */
153 STAMCOUNTER StatMemWriteRZ; /**< Number of memory write translation requests in RZ. */
154
155 STAMCOUNTER StatMemBulkReadR3; /**< Number of memory read bulk translation requests in R3. */
156 STAMCOUNTER StatMemBulkReadRZ; /**< Number of memory read bulk translation requests in RZ. */
157 STAMCOUNTER StatMemBulkWriteR3; /**< Number of memory write bulk translation requests in R3. */
158 STAMCOUNTER StatMemBulkWriteRZ; /**< Number of memory write bulk translation requests in RZ. */
159#endif
160} DMAR;
161/** Pointer to the DMAR device state. */
162typedef DMAR *PDMAR;
163/** Pointer to the const DMAR device state. */
164typedef const DMAR *PCDMAR;
165
166/**
167 * The ring-3 DMAR device state.
168 */
169typedef struct DMARR3
170{
171 /** Device instance. */
172 PPDMDEVINSR3 pDevInsR3;
173 /** The IOMMU helper. */
174 R3PTRTYPE(PCPDMIOMMUHLPR3) pIommuHlpR3;
175} DMARR3;
176/** Pointer to the ring-3 DMAR device state. */
177typedef DMARR3 *PDMARR3;
178/** Pointer to the const ring-3 DMAR device state. */
179typedef const DMARR3 *PCDMARR3;
180
181/**
182 * The ring-0 DMAR device state.
183 */
184typedef struct DMARR0
185{
186 /** Device instance. */
187 PPDMDEVINSR0 pDevInsR0;
188 /** The IOMMU helper. */
189 R0PTRTYPE(PCPDMIOMMUHLPR0) pIommuHlpR0;
190} DMARR0;
191/** Pointer to the ring-0 IOMMU device state. */
192typedef DMARR0 *PDMARR0;
193/** Pointer to the const ring-0 IOMMU device state. */
194typedef const DMARR0 *PCDMARR0;
195
196/**
197 * The raw-mode DMAR device state.
198 */
199typedef struct DMARRC
200{
201 /** Device instance. */
202 PPDMDEVINSRC pDevInsRC;
203 /** The IOMMU helper. */
204 RCPTRTYPE(PCPDMIOMMUHLPRC) pIommuHlpRC;
205} DMARRC;
206/** Pointer to the raw-mode DMAR device state. */
207typedef DMARRC *PDMARRC;
208/** Pointer to the const raw-mode DMAR device state. */
209typedef const DMARRC *PCIDMARRC;
210
211/** The DMAR device state for the current context. */
212typedef CTX_SUFF(DMAR) DMARCC;
213/** Pointer to the DMAR device state for the current context. */
214typedef CTX_SUFF(PDMAR) PDMARCC;
215
216
217/*********************************************************************************************************************************
218* Global Variables *
219*********************************************************************************************************************************/
220/**
221 * Read-write masks for DMAR registers (group 0).
222 */
223static uint32_t const g_au32RwMasks0[] =
224{
225 /* Offset Register Low High */
226 /* 0x000 VER_REG */ VTD_VER_REG_RW_MASK,
227 /* 0x004 Reserved */ 0,
228 /* 0x008 CAP_REG */ DMAR_LO_U32(VTD_CAP_REG_RW_MASK), DMAR_HI_U32(VTD_CAP_REG_RW_MASK),
229 /* 0x010 ECAP_REG */ DMAR_LO_U32(VTD_ECAP_REG_RW_MASK), DMAR_HI_U32(VTD_ECAP_REG_RW_MASK),
230 /* 0x018 GCMD_REG */ VTD_GCMD_REG_RW_MASK,
231 /* 0x01c GSTS_REG */ VTD_GSTS_REG_RW_MASK,
232 /* 0x020 RTADDR_REG */ DMAR_LO_U32(VTD_RTADDR_REG_RW_MASK), DMAR_HI_U32(VTD_RTADDR_REG_RW_MASK),
233 /* 0x028 CCMD_REG */ DMAR_LO_U32(VTD_CCMD_REG_RW_MASK), DMAR_HI_U32(VTD_CCMD_REG_RW_MASK),
234 /* 0x030 Reserved */ 0,
235 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW_MASK,
236 /* 0x038 FECTL_REG */ VTD_FECTL_REG_RW_MASK,
237 /* 0x03c FEDATA_REG */ VTD_FEDATA_REG_RW_MASK,
238 /* 0x040 FEADDR_REG */ VTD_FEADDR_REG_RW_MASK,
239 /* 0x044 FEUADDR_REG */ VTD_FEUADDR_REG_RW_MASK,
240 /* 0x048 Reserved */ 0, 0,
241 /* 0x050 Reserved */ 0, 0,
242 /* 0x058 AFLOG_REG */ DMAR_LO_U32(VTD_AFLOG_REG_RW_MASK), DMAR_HI_U32(VTD_AFLOG_REG_RW_MASK),
243 /* 0x060 Reserved */ 0,
244 /* 0x064 PMEN_REG */ 0, /* RO as we don't support PLMR and PHMR. */
245 /* 0x068 PLMBASE_REG */ 0, /* RO as we don't support PLMR. */
246 /* 0x06c PLMLIMIT_REG */ 0, /* RO as we don't support PLMR. */
247 /* 0x070 PHMBASE_REG */ 0, 0, /* RO as we don't support PHMR. */
248 /* 0x078 PHMLIMIT_REG */ 0, 0, /* RO as we don't support PHMR. */
249 /* 0x080 IQH_REG */ DMAR_LO_U32(VTD_IQH_REG_RW_MASK), DMAR_HI_U32(VTD_IQH_REG_RW_MASK),
250 /* 0x088 IQT_REG */ DMAR_LO_U32(VTD_IQT_REG_RW_MASK), DMAR_HI_U32(VTD_IQT_REG_RW_MASK),
251 /* 0x090 IQA_REG */ DMAR_LO_U32(VTD_IQA_REG_RW_MASK), DMAR_HI_U32(VTD_IQA_REG_RW_MASK),
252 /* 0x098 Reserved */ 0,
253 /* 0x09c ICS_REG */ VTD_ICS_REG_RW_MASK,
254 /* 0x0a0 IECTL_REG */ VTD_IECTL_REG_RW_MASK,
255 /* 0x0a4 IEDATA_REG */ VTD_IEDATA_REG_RW_MASK,
256 /* 0x0a8 IEADDR_REG */ VTD_IEADDR_REG_RW_MASK,
257 /* 0x0ac IEUADDR_REG */ VTD_IEUADDR_REG_RW_MASK,
258 /* 0x0b0 IQERCD_REG */ DMAR_LO_U32(VTD_IQERCD_REG_RW_MASK), DMAR_HI_U32(VTD_IQERCD_REG_RW_MASK),
259 /* 0x0b8 IRTA_REG */ DMAR_LO_U32(VTD_IRTA_REG_RW_MASK), DMAR_HI_U32(VTD_IRTA_REG_RW_MASK),
260 /* 0x0c0 PQH_REG */ DMAR_LO_U32(VTD_PQH_REG_RW_MASK), DMAR_HI_U32(VTD_PQH_REG_RW_MASK),
261 /* 0x0c8 PQT_REG */ DMAR_LO_U32(VTD_PQT_REG_RW_MASK), DMAR_HI_U32(VTD_PQT_REG_RW_MASK),
262 /* 0x0d0 PQA_REG */ DMAR_LO_U32(VTD_PQA_REG_RW_MASK), DMAR_HI_U32(VTD_PQA_REG_RW_MASK),
263 /* 0x0d8 Reserved */ 0,
264 /* 0x0dc PRS_REG */ VTD_PRS_REG_RW_MASK,
265 /* 0x0e0 PECTL_REG */ VTD_PECTL_REG_RW_MASK,
266 /* 0x0e4 PEDATA_REG */ VTD_PEDATA_REG_RW_MASK,
267 /* 0x0e8 PEADDR_REG */ VTD_PEADDR_REG_RW_MASK,
268 /* 0x0ec PEUADDR_REG */ VTD_PEUADDR_REG_RW_MASK,
269 /* 0x0f0 Reserved */ 0, 0,
270 /* 0x0f8 Reserved */ 0, 0,
271 /* 0x100 MTRRCAP_REG */ DMAR_LO_U32(VTD_MTRRCAP_REG_RW_MASK), DMAR_HI_U32(VTD_MTRRCAP_REG_RW_MASK),
272 /* 0x108 MTRRDEF_REG */ 0, 0, /* RO as we don't support MTS. */
273 /* 0x110 Reserved */ 0, 0,
274 /* 0x118 Reserved */ 0, 0,
275 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0, /* RO as we don't support MTS. */
276 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
277 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
278 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
279 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
280 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
281 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
282 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
283 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
284 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
285 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
286 /* 0x178 Reserved */ 0, 0,
287 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0, /* RO as we don't support MTS. */
288 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
289 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
290 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
291 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
292 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
293 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
294 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
295 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
296 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
297 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
298 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
299 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
300 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
301 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
302 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
303 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
304 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
305 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
306 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
307};
308AssertCompile(sizeof(g_au32RwMasks0) == DMAR_MMIO_GROUP_0_SIZE);
309
310/**
311 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 0).
312 */
313static uint32_t const g_au32Rw1cMasks0[] =
314{
315 /* Offset Register Low High */
316 /* 0x000 VER_REG */ 0,
317 /* 0x004 Reserved */ 0,
318 /* 0x008 CAP_REG */ 0, 0,
319 /* 0x010 ECAP_REG */ 0, 0,
320 /* 0x018 GCMD_REG */ 0,
321 /* 0x01c GSTS_REG */ 0,
322 /* 0x020 RTADDR_REG */ 0, 0,
323 /* 0x028 CCMD_REG */ 0, 0,
324 /* 0x030 Reserved */ 0,
325 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW1C_MASK,
326 /* 0x038 FECTL_REG */ 0,
327 /* 0x03c FEDATA_REG */ 0,
328 /* 0x040 FEADDR_REG */ 0,
329 /* 0x044 FEUADDR_REG */ 0,
330 /* 0x048 Reserved */ 0, 0,
331 /* 0x050 Reserved */ 0, 0,
332 /* 0x058 AFLOG_REG */ 0, 0,
333 /* 0x060 Reserved */ 0,
334 /* 0x064 PMEN_REG */ 0,
335 /* 0x068 PLMBASE_REG */ 0,
336 /* 0x06c PLMLIMIT_REG */ 0,
337 /* 0x070 PHMBASE_REG */ 0, 0,
338 /* 0x078 PHMLIMIT_REG */ 0, 0,
339 /* 0x080 IQH_REG */ 0, 0,
340 /* 0x088 IQT_REG */ 0, 0,
341 /* 0x090 IQA_REG */ 0, 0,
342 /* 0x098 Reserved */ 0,
343 /* 0x09c ICS_REG */ VTD_ICS_REG_RW1C_MASK,
344 /* 0x0a0 IECTL_REG */ 0,
345 /* 0x0a4 IEDATA_REG */ 0,
346 /* 0x0a8 IEADDR_REG */ 0,
347 /* 0x0ac IEUADDR_REG */ 0,
348 /* 0x0b0 IQERCD_REG */ 0, 0,
349 /* 0x0b8 IRTA_REG */ 0, 0,
350 /* 0x0c0 PQH_REG */ 0, 0,
351 /* 0x0c8 PQT_REG */ 0, 0,
352 /* 0x0d0 PQA_REG */ 0, 0,
353 /* 0x0d8 Reserved */ 0,
354 /* 0x0dc PRS_REG */ 0,
355 /* 0x0e0 PECTL_REG */ 0,
356 /* 0x0e4 PEDATA_REG */ 0,
357 /* 0x0e8 PEADDR_REG */ 0,
358 /* 0x0ec PEUADDR_REG */ 0,
359 /* 0x0f0 Reserved */ 0, 0,
360 /* 0x0f8 Reserved */ 0, 0,
361 /* 0x100 MTRRCAP_REG */ 0, 0,
362 /* 0x108 MTRRDEF_REG */ 0, 0,
363 /* 0x110 Reserved */ 0, 0,
364 /* 0x118 Reserved */ 0, 0,
365 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0,
366 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
367 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
368 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
369 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
370 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
371 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
372 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
373 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
374 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
375 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
376 /* 0x178 Reserved */ 0, 0,
377 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0,
378 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
379 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
380 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
381 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
382 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
383 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
384 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
385 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
386 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
387 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
388 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
389 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
390 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
391 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
392 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
393 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
394 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
395 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
396 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
397};
398AssertCompile(sizeof(g_au32Rw1cMasks0) == DMAR_MMIO_GROUP_0_SIZE);
399
400/**
401 * Read-write masks for DMAR registers (group 1).
402 */
403static uint32_t const g_au32RwMasks1[] =
404{
405 /* Offset Register Low High */
406 /* 0xe00 VCCAP_REG */ DMAR_LO_U32(VTD_VCCAP_REG_RW_MASK), DMAR_HI_U32(VTD_VCCAP_REG_RW_MASK),
407 /* 0xe08 VCMD_EO_REG */ DMAR_LO_U32(VTD_VCMD_EO_REG_RW_MASK), DMAR_HI_U32(VTD_VCMD_EO_REG_RW_MASK),
408 /* 0xe10 VCMD_REG */ 0, 0, /* RO: VCS not supported. */
409 /* 0xe18 VCMDRSVD_REG */ 0, 0,
410 /* 0xe20 VCRSP_REG */ 0, 0, /* RO: VCS not supported. */
411 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
412 /* 0xe30 Reserved */ 0, 0,
413 /* 0xe38 Reserved */ 0, 0,
414 /* 0xe40 Reserved */ 0, 0,
415 /* 0xe48 Reserved */ 0, 0,
416 /* 0xe50 IVA_REG */ DMAR_LO_U32(VTD_IVA_REG_RW_MASK), DMAR_HI_U32(VTD_IVA_REG_RW_MASK),
417 /* 0xe58 IOTLB_REG */ DMAR_LO_U32(VTD_IOTLB_REG_RW_MASK), DMAR_HI_U32(VTD_IOTLB_REG_RW_MASK),
418 /* 0xe60 Reserved */ 0, 0,
419 /* 0xe68 Reserved */ 0, 0,
420 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW_MASK),
421 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW_MASK),
422};
423AssertCompile(sizeof(g_au32RwMasks1) == DMAR_MMIO_GROUP_1_SIZE);
424AssertCompile((DMAR_MMIO_OFF_FRCD_LO_REG - DMAR_MMIO_GROUP_1_OFF_FIRST) + DMAR_FRCD_REG_COUNT * 2 * sizeof(uint64_t) );
425
426/**
427 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 1).
428 */
429static uint32_t const g_au32Rw1cMasks1[] =
430{
431 /* Offset Register Low High */
432 /* 0xe00 VCCAP_REG */ 0, 0,
433 /* 0xe08 VCMD_EO_REG */ 0, 0,
434 /* 0xe10 VCMD_REG */ 0, 0,
435 /* 0xe18 VCMDRSVD_REG */ 0, 0,
436 /* 0xe20 VCRSP_REG */ 0, 0,
437 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
438 /* 0xe30 Reserved */ 0, 0,
439 /* 0xe38 Reserved */ 0, 0,
440 /* 0xe40 Reserved */ 0, 0,
441 /* 0xe48 Reserved */ 0, 0,
442 /* 0xe50 IVA_REG */ 0, 0,
443 /* 0xe58 IOTLB_REG */ 0, 0,
444 /* 0xe60 Reserved */ 0, 0,
445 /* 0xe68 Reserved */ 0, 0,
446 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW1C_MASK),
447 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW1C_MASK),
448};
449AssertCompile(sizeof(g_au32Rw1cMasks1) == DMAR_MMIO_GROUP_1_SIZE);
450
451/** Array of RW masks for each register group. */
452static uint8_t const *g_apbRwMasks[] = { (uint8_t *)&g_au32RwMasks0[0], (uint8_t *)&g_au32RwMasks1[0] };
453
454/** Array of RW1C masks for each register group. */
455static uint8_t const *g_apbRw1cMasks[] = { (uint8_t *)&g_au32Rw1cMasks0[0], (uint8_t *)&g_au32Rw1cMasks1[0] };
456
457/* Masks arrays must be identical in size (even bounds checking code assumes this). */
458AssertCompile(sizeof(g_apbRw1cMasks) == sizeof(g_apbRwMasks));
459
460
461#ifndef VBOX_DEVICE_STRUCT_TESTCASE
462/**
463 * Gets the number of supported adjusted guest-address width (SAGAW) in bits given a
464 * CAP_REG.SAGAW value.
465 *
466 * @returns Number of SAGAW bits.
467 * @param uSagaw The CAP_REG.SAGAW value.
468 */
469static uint8_t vtdCapRegGetSagawBits(uint8_t uSagaw)
470{
471 if (RT_LIKELY(uSagaw > 0 && uSagaw < 4))
472 return 30 + (uSagaw * 9);
473 return 0;
474}
475
476
477/**
478 * Gets the supported adjusted guest-address width (SAGAW) given the maximum guest
479 * address width (MGAW).
480 *
481 * @returns The CAP_REG.SAGAW value.
482 * @param uMgaw The CAP_REG.MGAW value.
483 */
484static uint8_t vtdCapRegGetSagaw(uint8_t uMgaw)
485{
486 switch (uMgaw + 1)
487 {
488 case 39: return 1;
489 case 48: return 2;
490 case 57: return 3;
491 }
492 return 0;
493}
494
495
496/**
497 * Gets the group the register belongs to given its MMIO offset.
498 *
499 * @returns Pointer to the first element of the register group.
500 * @param pThis The shared DMAR device state.
501 * @param offReg The MMIO offset of the register.
502 * @param cbReg The size of the access being made (for bounds checking on
503 * debug builds).
504 * @param pIdxGroup Where to store the index of the register group the register
505 * belongs to.
506 */
507DECLINLINE(uint8_t *) dmarRegGetGroup(PDMAR pThis, uint16_t offReg, uint8_t cbReg, uint8_t *pIdxGroup)
508{
509 uint16_t const offLast = offReg + cbReg - 1;
510 AssertCompile(DMAR_MMIO_GROUP_0_OFF_FIRST == 0);
511 AssertMsg(DMAR_IS_MMIO_OFF_VALID(offLast), ("off=%#x cb=%u\n", offReg, cbReg));
512
513 uint8_t *const apbRegs[] = { &pThis->abRegs0[0], &pThis->abRegs1[0] };
514 *pIdxGroup = !(offLast < DMAR_MMIO_GROUP_0_OFF_END);
515 return apbRegs[*pIdxGroup];
516}
517
518
519/**
520 * Writes a 64-bit register with the exactly the supplied value.
521 *
522 * @param pThis The shared DMAR device state.
523 * @param offReg The MMIO offset of the register.
524 * @param uReg The 64-bit value to write.
525 */
526DECLINLINE(void) dmarRegWriteRaw64(PDMAR pThis, uint16_t offReg, uint64_t uReg)
527{
528 uint8_t idxGroup;
529 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint64_t), &idxGroup);
530 NOREF(idxGroup);
531 *(uint64_t *)(pabRegs + offReg) = uReg;
532}
533
534
535/**
536 * Writes a 32-bit register with the exactly the supplied value.
537 *
538 * @param pThis The shared DMAR device state.
539 * @param offReg The MMIO offset of the register.
540 * @param uReg The 32-bit value to write.
541 */
542DECLINLINE(void) dmarRegWriteRaw32(PDMAR pThis, uint16_t offReg, uint32_t uReg)
543{
544 uint8_t idxGroup;
545 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint32_t), &idxGroup);
546 NOREF(idxGroup);
547 *(uint32_t *)(pabRegs + offReg) = uReg;
548}
549
550
551/**
552 * Reads a 64-bit register with exactly the value it contains.
553 *
554 * @param pThis The shared DMAR device state.
555 * @param offReg The MMIO offset of the register.
556 * @param puReg Where to store the raw 64-bit register value.
557 * @param pfRwMask Where to store the RW mask corresponding to this register.
558 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
559 */
560DECLINLINE(void) dmarRegReadRaw64(PDMAR pThis, uint16_t offReg, uint64_t *puReg, uint64_t *pfRwMask, uint64_t *pfRw1cMask)
561{
562 uint8_t idxGroup;
563 uint8_t const *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint64_t), &idxGroup);
564 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
565 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
566 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
567 *puReg = *(uint64_t *)(pabRegs + offReg);
568 *pfRwMask = *(uint64_t *)(pabRwMasks + offReg);
569 *pfRw1cMask = *(uint64_t *)(pabRw1cMasks + offReg);
570}
571
572
573/**
574 * Reads a 32-bit register with exactly the value it contains.
575 *
576 * @param pThis The shared DMAR device state.
577 * @param offReg The MMIO offset of the register.
578 * @param puReg Where to store the raw 32-bit register value.
579 * @param pfRwMask Where to store the RW mask corresponding to this register.
580 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
581 */
582DECLINLINE(void) dmarRegReadRaw32(PDMAR pThis, uint16_t offReg, uint32_t *puReg, uint32_t *pfRwMask, uint32_t *pfRw1cMask)
583{
584 uint8_t idxGroup;
585 uint8_t const *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint32_t), &idxGroup);
586 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
587 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
588 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
589 *puReg = *(uint32_t *)(pabRegs + offReg);
590 *pfRwMask = *(uint32_t *)(pabRwMasks + offReg);
591 *pfRw1cMask = *(uint32_t *)(pabRw1cMasks + offReg);
592}
593
594
595/**
596 * Writes a 64-bit register as it would be when written by software.
597 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
598 *
599 * @returns The value that's actually written to the register.
600 * @param pThis The shared DMAR device state.
601 * @param offReg The MMIO offset of the register.
602 * @param uReg The 64-bit value to write.
603 */
604static uint64_t dmarRegWrite64(PDMAR pThis, uint16_t offReg, uint64_t uReg)
605{
606 /* Read current value from the 64-bit register. */
607 uint64_t uCurReg;
608 uint64_t fRwMask;
609 uint64_t fRw1cMask;
610 dmarRegReadRaw64(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
611
612 uint64_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
613 uint64_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
614 uint64_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
615 uint64_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
616
617 /* Write new value to the 64-bit register. */
618 dmarRegWriteRaw64(pThis, offReg, uNewReg);
619 return uNewReg;
620}
621
622
623/**
624 * Writes a 32-bit register as it would be when written by software.
625 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
626 *
627 * @returns The value that's actually written to the register.
628 * @param pThis The shared DMAR device state.
629 * @param offReg The MMIO offset of the register.
630 * @param uReg The 32-bit value to write.
631 */
632static uint32_t dmarRegWrite32(PDMAR pThis, uint16_t offReg, uint32_t uReg)
633{
634 /* Read current value from the 32-bit register. */
635 uint32_t uCurReg;
636 uint32_t fRwMask;
637 uint32_t fRw1cMask;
638 dmarRegReadRaw32(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
639
640 uint32_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
641 uint32_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
642 uint32_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
643 uint32_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
644
645 /* Write new value to the 32-bit register. */
646 dmarRegWriteRaw32(pThis, offReg, uNewReg);
647 return uNewReg;
648}
649
650
651/**
652 * Reads a 64-bit register as it would be when read by software.
653 *
654 * @returns The 64-bit register value.
655 * @param pThis The shared DMAR device state.
656 * @param offReg The MMIO offset of the register.
657 */
658static uint64_t dmarRegRead64(PDMAR pThis, uint16_t offReg)
659{
660 uint64_t uCurReg;
661 uint64_t fRwMask;
662 uint64_t fRw1cMask;
663 dmarRegReadRaw64(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
664 NOREF(fRwMask); NOREF(fRw1cMask);
665 return uCurReg;
666}
667
668
669/**
670 * Reads a 32-bit register as it would be when read by software.
671 *
672 * @returns The 32-bit register value.
673 * @param pThis The shared DMAR device state.
674 * @param offReg The MMIO offset of the register.
675 */
676static uint32_t dmarRegRead32(PDMAR pThis, uint16_t offReg)
677{
678 uint32_t uCurReg;
679 uint32_t fRwMask;
680 uint32_t fRw1cMask;
681 dmarRegReadRaw32(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
682 NOREF(fRwMask); NOREF(fRw1cMask);
683 return uCurReg;
684}
685
686
687/**
688 * Handles writes to IQT_REG.
689 *
690 * @returns Strict VBox status code.
691 * @param pDevIns The IOMMU device instance.
692 * @param off The MMIO register offset.
693 * @param uIqtReg The value written to IQT_REG.
694 */
695static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint64_t uIqtReg)
696{
697 /* We only care about the low dword of VTD_MMIO_OFF_IQT_REG. */
698 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
699 if (off == VTD_MMIO_OFF_IQT_REG)
700 {
701 /* Verify if the queue tail offset is aligned according to the descriptor width in IQA_REG. */
702 uint16_t const offQueueTail = VTD_IQT_REG_GET_QT(uIqtReg);
703 uint64_t const uIqaReg = dmarRegRead64(pThis, VTD_MMIO_OFF_IQA_REG);
704 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
705 if ( fDw != VTD_IQA_REG_DW_256_BIT
706 || !(offQueueTail & 0x1f))
707 {
708 /** @todo IOMMU: Figure out what to do here, like waking up worker thread or
709 * something. */
710 }
711 else
712 {
713 /* Raise invalidation queue error as queue tail not aligned to 256-bits. */
714 /** @todo IOMMU: Raise error. */
715 }
716 }
717 return VINF_SUCCESS;
718}
719
720
721/**
722 * Memory access bulk (one or more 4K pages) request from a device.
723 *
724 * @returns VBox status code.
725 * @param pDevIns The IOMMU device instance.
726 * @param idDevice The device ID (bus, device, function).
727 * @param cIovas The number of addresses being accessed.
728 * @param pauIovas The I/O virtual addresses for each page being accessed.
729 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
730 * @param paGCPhysSpa Where to store the translated physical addresses.
731 *
732 * @thread Any.
733 */
734static DECLCALLBACK(int) iommuIntelMemBulkAccess(PPDMDEVINS pDevIns, uint16_t idDevice, size_t cIovas, uint64_t const *pauIovas,
735 uint32_t fFlags, PRTGCPHYS paGCPhysSpa)
736{
737 RT_NOREF6(pDevIns, idDevice, cIovas, pauIovas, fFlags, paGCPhysSpa);
738 return VERR_NOT_IMPLEMENTED;
739}
740
741
742/**
743 * Memory access transaction from a device.
744 *
745 * @returns VBox status code.
746 * @param pDevIns The IOMMU device instance.
747 * @param idDevice The device ID (bus, device, function).
748 * @param uIova The I/O virtual address being accessed.
749 * @param cbIova The size of the access.
750 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
751 * @param pGCPhysSpa Where to store the translated system physical address.
752 * @param pcbContiguous Where to store the number of contiguous bytes translated
753 * and permission-checked.
754 *
755 * @thread Any.
756 */
757static DECLCALLBACK(int) iommuIntelMemAccess(PPDMDEVINS pDevIns, uint16_t idDevice, uint64_t uIova, size_t cbIova,
758 uint32_t fFlags, PRTGCPHYS pGCPhysSpa, size_t *pcbContiguous)
759{
760 RT_NOREF7(pDevIns, idDevice, uIova, cbIova, fFlags, pGCPhysSpa, pcbContiguous);
761 return VERR_NOT_IMPLEMENTED;
762}
763
764
765/**
766 * Interrupt remap request from a device.
767 *
768 * @returns VBox status code.
769 * @param pDevIns The IOMMU device instance.
770 * @param idDevice The device ID (bus, device, function).
771 * @param pMsiIn The source MSI.
772 * @param pMsiOut Where to store the remapped MSI.
773 */
774static DECLCALLBACK(int) iommuIntelMsiRemap(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
775{
776 RT_NOREF3(idDevice, pMsiIn, pMsiOut);
777 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
778 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemap)); NOREF(pThis);
779
780 return VERR_NOT_IMPLEMENTED;
781}
782
783
784/**
785 * @callback_method_impl{FNIOMMMIONEWWRITE}
786 */
787static DECLCALLBACK(VBOXSTRICTRC) dmarMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
788{
789 RT_NOREF1(pvUser);
790 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
791
792 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
793 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioWrite));
794
795 uint16_t const offReg = off;
796 uint16_t const offLast = offReg + cb - 1;
797 if (DMAR_IS_MMIO_OFF_VALID(offLast))
798 {
799 uint64_t const uRegWritten = cb == 8 ? dmarRegWrite64(pThis, offReg, *(uint64_t *)pv)
800 : dmarRegWrite32(pThis, offReg, *(uint32_t *)pv);
801 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
802 switch (off)
803 {
804 case VTD_MMIO_OFF_IQT_REG:
805 case VTD_MMIO_OFF_IQT_REG + 4:
806 {
807 rcStrict = dmarIqtRegWrite(pDevIns, offReg, uRegWritten);
808 break;
809 }
810 }
811
812 LogFlowFunc(("offReg=%#x rc=%Rrc\n", offReg, VBOXSTRICTRC_VAL(rcStrict)));
813 return rcStrict;
814 }
815
816 return VINF_IOM_MMIO_UNUSED_FF;
817}
818
819
820/**
821 * @callback_method_impl{FNIOMMMIONEWREAD}
822 */
823static DECLCALLBACK(VBOXSTRICTRC) dmarMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
824{
825 RT_NOREF1(pvUser);
826 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
827
828 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
829 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioRead));
830
831 uint16_t const offReg = off;
832 uint16_t const offLast = offReg + cb - 1;
833 if (DMAR_IS_MMIO_OFF_VALID(offLast))
834 {
835 if (cb == 8)
836 {
837 *(uint64_t *)pv = dmarRegRead64(pThis, offReg);
838 LogFlowFunc(("offReg=%#x pv=%#RX64\n", offReg, *(uint64_t *)pv));
839 }
840 else
841 {
842 *(uint32_t *)pv = dmarRegRead32(pThis, offReg);
843 LogFlowFunc(("offReg=%#x pv=%#RX32\n", offReg, *(uint32_t *)pv));
844 }
845
846 return VINF_SUCCESS;
847 }
848
849 return VINF_IOM_MMIO_UNUSED_FF;
850}
851
852
853#ifdef IN_RING3
854/**
855 * Initializes all registers in the DMAR unit.
856 *
857 * @param pDevIns The IOMMU device instance.
858 */
859static void dmarR3RegsInit(PPDMDEVINS pDevIns)
860{
861 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
862 RT_ZERO(pThis->abRegs0);
863 RT_ZERO(pThis->abRegs1);
864
865 /*
866 * Initialize registers not mutable by software prior to initializing other registers.
867 */
868 /* VER_REG */
869 {
870 pThis->uVerReg = RT_BF_MAKE(VTD_BF_VER_REG_MIN, DMAR_VER_MINOR)
871 | RT_BF_MAKE(VTD_BF_VER_REG_MAX, DMAR_VER_MAJOR);
872 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_VER_REG, pThis->uVerReg);
873 }
874
875 uint8_t const fFlts = 1; /* First-Level translation support. */
876 uint8_t const fSlts = 1; /* Second-Level translation support. */
877 uint8_t const fPt = 1; /* Pass-Through support. */
878 uint8_t const fSmts = fFlts & fSlts & fPt; /* Scalable mode translation support.*/
879 uint8_t const fNest = 0; /* Nested translation support. */
880
881 /* CAP_REG */
882 {
883 uint8_t cGstPhysAddrBits;
884 uint8_t cGstLinearAddrBits;
885 PDMDevHlpCpuGetGuestAddrWidths(pDevIns, &cGstPhysAddrBits, &cGstLinearAddrBits);
886
887 uint8_t const fFl1gp = 1; /* First-Level 1GB pages support. */
888 uint8_t const fFl5lp = 1; /* First-level 5-level paging support (PML5E). */
889 uint8_t const fSl2mp = fSlts & 1; /* Second-Level 2MB pages support. */
890 uint8_t const fSl2gp = fSlts & 1; /* Second-Level 1GB pages support. */
891 uint8_t const fSllps = fSl2mp /* Second-Level large page Support. */
892 | ((fSl2mp & fFl1gp) & RT_BIT(1));
893 uint8_t const fMamv = (fSl2gp ? /* Maximum address mask value (for second-level invalidations). */
894 X86_PAGE_1G_SHIFT : X86_PAGE_2M_SHIFT) - X86_PAGE_4K_SHIFT;
895 uint8_t const fNd = 2; /* Number of domains (0=16, 1=64, 2=256, 3=1K, 4=4K, 5=16K, 6=64K,
896 7=Reserved). */
897 uint8_t const fPsi = 1; /* Page selective invalidation. */
898 uint8_t const uMgaw = cGstPhysAddrBits - 1; /* Maximum guest address width. */
899 uint8_t const uSagaw = vtdCapRegGetSagaw(uMgaw); /* Supported adjust guest address width. */
900 uint16_t const offFro = DMAR_MMIO_OFF_FRCD_LO_REG >> 4; /* MMIO offset of FRCD registers. */
901
902 pThis->fCap = RT_BF_MAKE(VTD_BF_CAP_REG_ND, fNd)
903 | RT_BF_MAKE(VTD_BF_CAP_REG_AFL, 0) /* Advanced fault logging not supported. */
904 | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF, 0) /* Software need not flush write-buffers. */
905 | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR, 0) /* Protected Low-Memory Region not supported. */
906 | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR, 0) /* Protected High-Memory Region not supported. */
907 | RT_BF_MAKE(VTD_BF_CAP_REG_CM, 1) /** @todo Figure out if required when we impl. caching. */
908 | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW, fSlts & uSagaw)
909 | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW, uMgaw)
910 | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR, 1) /** @todo Figure out if/how to support zero-length reads. */
911 | RT_BF_MAKE(VTD_BF_CAP_REG_FRO, offFro)
912 | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS, fSlts & fSllps)
913 | RT_BF_MAKE(VTD_BF_CAP_REG_PSI, fPsi)
914 | RT_BF_MAKE(VTD_BF_CAP_REG_NFR, DMAR_FRCD_REG_COUNT - 1)
915 | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV, fPsi & fMamv)
916 | RT_BF_MAKE(VTD_BF_CAP_REG_DWD, 1)
917 | RT_BF_MAKE(VTD_BF_CAP_REG_DRD, 1)
918 | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP, fFlts & fFl1gp)
919 | RT_BF_MAKE(VTD_BF_CAP_REG_PI, 0) /* Posted Interrupts not supported. */
920 | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP, fFlts & fFl5lp)
921 | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, 0) /* Whether we invalidate interrupt cache on SIRTP flow. */
922 | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS, 0); /* Whether we invalidate translation cache on SRTP flow. */
923 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCap);
924 }
925
926 /* ECAP_REG */
927 {
928 uint8_t const fQi = 1; /* Queued invalidations. */
929 uint8_t const fIr = !!(DMAR_ACPI_DMAR_FLAGS & ACPI_DMAR_F_INTR_REMAP); /* Interrupt remapping support. */
930 uint8_t const fMhmv = 0xf; /* Maximum handle mask value. */
931 uint16_t const offIro = DMAR_MMIO_OFF_IVA_REG >> 4; /* MMIO offset of IOTLB registers. */
932 uint8_t const fSrs = 1; /* Supervisor request support. */
933 uint8_t const fEim = 1; /* Extended interrupt mode.*/
934 uint8_t const fAdms = 1; /* Abort DMA mode support. */
935
936 pThis->fExtCap = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */
937 | RT_BF_MAKE(VTD_BF_ECAP_REG_QI, 1)
938 | RT_BF_MAKE(VTD_BF_ECAP_REG_DT, 0) /* Device-TLBs not supported. */
939 | RT_BF_MAKE(VTD_BF_ECAP_REG_IR, fQi & fIr)
940 | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM, fIr & fEim)
941 | RT_BF_MAKE(VTD_BF_ECAP_REG_PT, fPt)
942 | RT_BF_MAKE(VTD_BF_ECAP_REG_SC, 0) /* Snoop control not supported. */
943 | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO, offIro)
944 | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV, fIr & fMhmv)
945 | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS, 0) /* Memory type not supported. */
946 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest)
947 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */
948 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, 0) /* Execute request not supported. */
949 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, fSmts & fSrs)
950 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */
951 | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS, 0) /** @todo figure out if EAFS is required? */
952 | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS, 0) /* 0 as PASID not supported. */
953 | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID, 0) /* PASID support. */
954 | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT, 0) /* 0 as DT not supported. */
955 | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS, 0) /* 0 as DT not supported. */
956 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS, fSmts)
957 | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS, 0) /* 0 as PASID not supported (commands seem PASID specific). */
958 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS, 0) /* Second-level accessed/dirty not supported. */
959 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS, fSlts)
960 | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS, fFlts)
961 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0) /* 0 as PASID not supported. */
962 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS, 0) /* We don't support RID_PASID field in SM context entry. */
963 | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS, fAdms)
964 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /** @todo figure out if we should/can support this? */
965 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCap);
966 }
967
968 /*
969 * Initialize registers mutable by software.
970 */
971 /* FECTL_REG */
972 {
973 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_FECTL_REG_IM, 1);
974 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, uCtl);
975 }
976
977 /* ICETL_REG */
978 {
979 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_IECTL_REG_IM, 1);
980 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_IECTL_REG, uCtl);
981 }
982
983#ifdef VBOX_STRICT
984 Assert(!RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_PRS)); /* PECTL_REG - Reserved if don't support PRS. */
985 Assert(!RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_MTS)); /* MTRRCAP_REG - Reserved if we don't support MTS. */
986#endif
987}
988
989
990/**
991 * @interface_method_impl{PDMDEVREG,pfnReset}
992 */
993static DECLCALLBACK(void) iommuIntelR3Reset(PPDMDEVINS pDevIns)
994{
995 RT_NOREF1(pDevIns);
996 LogFlowFunc(("\n"));
997
998 dmarR3RegsInit(pDevIns);
999}
1000
1001
1002/**
1003 * @interface_method_impl{PDMDEVREG,pfnDestruct}
1004 */
1005static DECLCALLBACK(int) iommuIntelR3Destruct(PPDMDEVINS pDevIns)
1006{
1007 RT_NOREF(pDevIns);
1008 LogFlowFunc(("\n"));
1009 return VINF_SUCCESS;
1010}
1011
1012
1013/**
1014 * @interface_method_impl{PDMDEVREG,pfnConstruct}
1015 */
1016static DECLCALLBACK(int) iommuIntelR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
1017{
1018 RT_NOREF(pCfg);
1019
1020 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1021 PDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PDMARR3);
1022 pThisR3->pDevInsR3 = pDevIns;
1023
1024 LogFlowFunc(("iInstance=%d\n", iInstance));
1025 NOREF(iInstance);
1026
1027 /*
1028 * Register the IOMMU with PDM.
1029 */
1030 PDMIOMMUREGR3 IommuReg;
1031 RT_ZERO(IommuReg);
1032 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
1033 IommuReg.pfnMemAccess = iommuIntelMemAccess;
1034 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
1035 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
1036 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
1037 int rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
1038 if (RT_FAILURE(rc))
1039 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
1040 if (pThisR3->CTX_SUFF(pIommuHlp)->u32Version != PDM_IOMMUHLPR3_VERSION)
1041 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
1042 N_("IOMMU helper version mismatch; got %#x expected %#x"),
1043 pThisR3->CTX_SUFF(pIommuHlp)->u32Version, PDM_IOMMUHLPR3_VERSION);
1044 if (pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd != PDM_IOMMUHLPR3_VERSION)
1045 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
1046 N_("IOMMU helper end-version mismatch; got %#x expected %#x"),
1047 pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd, PDM_IOMMUHLPR3_VERSION);
1048 /*
1049 * Use PDM's critical section (via helpers) for the IOMMU device.
1050 */
1051 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
1052 AssertRCReturn(rc, rc);
1053
1054 /*
1055 * Initialize PCI configuration registers.
1056 */
1057 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1058 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1059
1060 /* Header. */
1061 PDMPciDevSetVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
1062 PDMPciDevSetDeviceId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
1063 PDMPciDevSetRevisionId(pPciDev, DMAR_PCI_REVISION_ID); /* VirtualBox specific device implementation revision */
1064 PDMPciDevSetClassBase(pPciDev, VBOX_PCI_CLASS_SYSTEM); /* System Base Peripheral */
1065 PDMPciDevSetClassSub(pPciDev, VBOX_PCI_SUB_SYSTEM_OTHER); /* Other */
1066 PDMPciDevSetHeaderType(pPciDev, 0); /* Single function, type 0 */
1067 PDMPciDevSetSubSystemId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
1068 PDMPciDevSetSubSystemVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
1069
1070 /** @todo VTD: Chipset spec says PCI Express Capability Id. Relevant for us? */
1071 PDMPciDevSetStatus(pPciDev, 0);
1072 PDMPciDevSetCapabilityList(pPciDev, 0);
1073
1074 /** @todo VTD: VTBAR at 0x180? */
1075
1076 /*
1077 * Register the PCI function with PDM.
1078 */
1079 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1080 AssertLogRelRCReturn(rc, rc);
1081
1082 /** @todo VTD: Register MSI but what's the MSI capability offset? */
1083#if 0
1084 /*
1085 * Register MSI support for the PCI device.
1086 * This must be done -after- registering it as a PCI device!
1087 */
1088#endif
1089
1090 /*
1091 * Register MMIO region.
1092 */
1093 AssertCompile(!(DMAR_MMIO_BASE_PHYSADDR & X86_PAGE_4K_OFFSET_MASK));
1094 rc = PDMDevHlpMmioCreateAndMap(pDevIns, DMAR_MMIO_BASE_PHYSADDR, DMAR_MMIO_SIZE, dmarMmioWrite, dmarMmioRead,
1095 IOMMMIO_FLAGS_READ_DWORD_QWORD | IOMMMIO_FLAGS_WRITE_DWORD_QWORD_ZEROED,
1096 "Intel-IOMMU", &pThis->hMmio);
1097 AssertRCReturn(rc, rc);
1098
1099#ifdef VBOX_WITH_STATISTICS
1100 /*
1101 * Statistics.
1102 */
1103 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadR3, STAMTYPE_COUNTER, "R3/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in R3");
1104 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadRZ, STAMTYPE_COUNTER, "RZ/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in RZ.");
1105
1106 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteR3, STAMTYPE_COUNTER, "R3/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in R3.");
1107 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteRZ, STAMTYPE_COUNTER, "RZ/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in RZ.");
1108
1109 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapR3, STAMTYPE_COUNTER, "R3/MsiRemap", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in R3.");
1110 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRZ, STAMTYPE_COUNTER, "RZ/MsiRemap", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in RZ.");
1111
1112 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadR3, STAMTYPE_COUNTER, "R3/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in R3.");
1113 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadRZ, STAMTYPE_COUNTER, "RZ/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in RZ.");
1114
1115 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteR3, STAMTYPE_COUNTER, "R3/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in R3.");
1116 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteRZ, STAMTYPE_COUNTER, "RZ/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in RZ.");
1117
1118 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadR3, STAMTYPE_COUNTER, "R3/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in R3.");
1119 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadRZ, STAMTYPE_COUNTER, "RZ/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in RZ.");
1120
1121 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteR3, STAMTYPE_COUNTER, "R3/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in R3.");
1122 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteRZ, STAMTYPE_COUNTER, "RZ/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in RZ.");
1123#endif
1124
1125 /*
1126 * Initialize registers.
1127 */
1128 dmarR3RegsInit(pDevIns);
1129
1130 /*
1131 * Log some of the features exposed to software.
1132 */
1133 uint32_t const uVerReg = pThis->uVerReg;
1134 uint8_t const cMaxGstAddrBits = RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_MGAW) + 1;
1135 uint8_t const cSupGstAddrBits = vtdCapRegGetSagawBits(RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_SAGAW));
1136 uint16_t const offFrcd = RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_FRO);
1137 uint16_t const offIva = RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_IRO);
1138 LogRel(("%s: VER=%u.%u CAP=%#RX64 ECAP=%#RX64 (MGAW=%u bits, SAGAW=%u bits, FRO=%#x, IRO=%#x) mapped at %#RGp\n", DMAR_LOG_PFX,
1139 RT_BF_GET(uVerReg, VTD_BF_VER_REG_MAX), RT_BF_GET(uVerReg, VTD_BF_VER_REG_MIN),
1140 pThis->fCap, pThis->fExtCap, cMaxGstAddrBits, cSupGstAddrBits, offFrcd, offIva, DMAR_MMIO_BASE_PHYSADDR));
1141 return VINF_SUCCESS;
1142}
1143
1144#else
1145
1146/**
1147 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
1148 */
1149static DECLCALLBACK(int) iommuIntelRZConstruct(PPDMDEVINS pDevIns)
1150{
1151 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
1152 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1153 PDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PDMARCC);
1154 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
1155
1156 /* We will use PDM's critical section (via helpers) for the IOMMU device. */
1157 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
1158 AssertRCReturn(rc, rc);
1159
1160 /* Set up the MMIO RZ handlers. */
1161 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, dmarMmioWrite, dmarMmioRead, NULL /* pvUser */);
1162 AssertRCReturn(rc, rc);
1163
1164 /* Set up the IOMMU RZ callbacks. */
1165 PDMIOMMUREGCC IommuReg;
1166 RT_ZERO(IommuReg);
1167 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
1168 IommuReg.idxIommu = pThis->idxIommu;
1169 IommuReg.pfnMemAccess = iommuIntelMemAccess;
1170 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
1171 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
1172 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
1173
1174 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp));
1175 AssertRCReturn(rc, rc);
1176 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp), VERR_IOMMU_IPE_1);
1177 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32Version == CTX_SUFF(PDM_IOMMUHLP)_VERSION, VERR_VERSION_MISMATCH);
1178 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd == CTX_SUFF(PDM_IOMMUHLP)_VERSION, VERR_VERSION_MISMATCH);
1179 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnLock, VERR_INVALID_POINTER);
1180 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnUnlock, VERR_INVALID_POINTER);
1181
1182 return VINF_SUCCESS;
1183}
1184
1185#endif
1186
1187
1188/**
1189 * The device registration structure.
1190 */
1191PDMDEVREG const g_DeviceIommuIntel =
1192{
1193 /* .u32Version = */ PDM_DEVREG_VERSION,
1194 /* .uReserved0 = */ 0,
1195 /* .szName = */ "iommu-intel",
1196 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
1197 /* .fClass = */ PDM_DEVREG_CLASS_PCI_BUILTIN,
1198 /* .cMaxInstances = */ 1,
1199 /* .uSharedVersion = */ 42,
1200 /* .cbInstanceShared = */ sizeof(DMAR),
1201 /* .cbInstanceCC = */ sizeof(DMARCC),
1202 /* .cbInstanceRC = */ sizeof(DMARRC),
1203 /* .cMaxPciDevices = */ 1,
1204 /* .cMaxMsixVectors = */ 0,
1205 /* .pszDescription = */ "IOMMU (Intel)",
1206#if defined(IN_RING3)
1207 /* .pszRCMod = */ "VBoxDDRC.rc",
1208 /* .pszR0Mod = */ "VBoxDDR0.r0",
1209 /* .pfnConstruct = */ iommuIntelR3Construct,
1210 /* .pfnDestruct = */ iommuIntelR3Destruct,
1211 /* .pfnRelocate = */ NULL,
1212 /* .pfnMemSetup = */ NULL,
1213 /* .pfnPowerOn = */ NULL,
1214 /* .pfnReset = */ iommuIntelR3Reset,
1215 /* .pfnSuspend = */ NULL,
1216 /* .pfnResume = */ NULL,
1217 /* .pfnAttach = */ NULL,
1218 /* .pfnDetach = */ NULL,
1219 /* .pfnQueryInterface = */ NULL,
1220 /* .pfnInitComplete = */ NULL,
1221 /* .pfnPowerOff = */ NULL,
1222 /* .pfnSoftReset = */ NULL,
1223 /* .pfnReserved0 = */ NULL,
1224 /* .pfnReserved1 = */ NULL,
1225 /* .pfnReserved2 = */ NULL,
1226 /* .pfnReserved3 = */ NULL,
1227 /* .pfnReserved4 = */ NULL,
1228 /* .pfnReserved5 = */ NULL,
1229 /* .pfnReserved6 = */ NULL,
1230 /* .pfnReserved7 = */ NULL,
1231#elif defined(IN_RING0)
1232 /* .pfnEarlyConstruct = */ NULL,
1233 /* .pfnConstruct = */ iommuIntelRZConstruct,
1234 /* .pfnDestruct = */ NULL,
1235 /* .pfnFinalDestruct = */ NULL,
1236 /* .pfnRequest = */ NULL,
1237 /* .pfnReserved0 = */ NULL,
1238 /* .pfnReserved1 = */ NULL,
1239 /* .pfnReserved2 = */ NULL,
1240 /* .pfnReserved3 = */ NULL,
1241 /* .pfnReserved4 = */ NULL,
1242 /* .pfnReserved5 = */ NULL,
1243 /* .pfnReserved6 = */ NULL,
1244 /* .pfnReserved7 = */ NULL,
1245#elif defined(IN_RC)
1246 /* .pfnConstruct = */ iommuIntelRZConstruct,
1247 /* .pfnReserved0 = */ NULL,
1248 /* .pfnReserved1 = */ NULL,
1249 /* .pfnReserved2 = */ NULL,
1250 /* .pfnReserved3 = */ NULL,
1251 /* .pfnReserved4 = */ NULL,
1252 /* .pfnReserved5 = */ NULL,
1253 /* .pfnReserved6 = */ NULL,
1254 /* .pfnReserved7 = */ NULL,
1255#else
1256# error "Not in IN_RING3, IN_RING0 or IN_RC!"
1257#endif
1258 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
1259};
1260
1261#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
1262
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette