VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp@ 88621

Last change on this file since 88621 was 88605, checked in by vboxsync, 4 years ago

Intel IOMMU: bugref:9967 doxygen and whitespace.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 72.8 KB
Line 
1/* $Id: DevIommuIntel.cpp 88605 2021-04-20 15:43:24Z vboxsync $ */
2/** @file
3 * IOMMU - Input/Output Memory Management Unit - Intel implementation.
4 */
5
6/*
7 * Copyright (C) 2021 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_IOMMU
23#include "VBoxDD.h"
24#include "DevIommuIntel.h"
25
26#include <iprt/string.h>
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32/** Gets the low uint32_t of a uint64_t or something equivalent.
33 *
34 * This is suitable for casting constants outside code (since RT_LO_U32 can't be
35 * used as it asserts for correctness when compiling on certain compilers). */
36#define DMAR_LO_U32(a) (uint32_t)(UINT32_MAX & (a))
37
38/** Gets the high uint32_t of a uint64_t or something equivalent.
39 *
40 * This is suitable for casting constants outside code (since RT_HI_U32 can't be
41 * used as it asserts for correctness when compiling on certain compilers). */
42#define DMAR_HI_U32(a) (uint32_t)((a) >> 32)
43
44/** Asserts MMIO access' offset and size are valid or returns appropriate error
45 * code suitable for returning from MMIO access handlers. */
46#define DMAR_ASSERT_MMIO_ACCESS_RET(a_off, a_cb) \
47 do { \
48 AssertReturn((a_cb) == 4 || (a_cb) == 8, VINF_IOM_MMIO_UNUSED_FF); \
49 AssertReturn(!((a_off) & ((a_cb) - 1)), VINF_IOM_MMIO_UNUSED_FF); \
50 } while (0);
51
52/** Checks whether the MMIO offset is valid. */
53#define DMAR_IS_MMIO_OFF_VALID(a_off) ( (a_off) < DMAR_MMIO_GROUP_0_OFF_END \
54 || (a_off) - DMAR_MMIO_GROUP_1_OFF_FIRST < DMAR_MMIO_GROUP_1_SIZE)
55
56/** Acquires the DMAR lock but returns with the given error code on failure. */
57#define DMAR_LOCK_RET(a_pDevIns, a_pThisCC, a_rcBusy) \
58 do { \
59 if ((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), (a_rcBusy)) == VINF_SUCCESS) \
60 { /* likely */ } \
61 else \
62 return (a_rcBusy); \
63 } while (0)
64
65/** Release the DMAR lock. */
66#define DMAR_UNLOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock(a_pDevIns)
67
68/** Checks whether the calling thread is the owner of the DMAR lock. */
69#define DMAR_LOCK_IS_OWNER(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)
70
71/** Asserts that the calling thread owns the DMAR lock. */
72#define DMAR_ASSERT_LOCK_IS_OWNER(a_pDevIns, a_pThisCC) \
73 do { \
74 Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)); \
75 RT_NOREF1(a_pThisCC); \
76 } while (0)
77
78/** The number of fault recording registers our implementation supports.
79 * Normal guest operation shouldn't trigger faults anyway, so we only support the
80 * minimum number of registers (which is 1).
81 *
82 * See Intel VT-d spec. 10.4.2 "Capability Register" (CAP_REG.NFR). */
83#define DMAR_FRCD_REG_COUNT UINT32_C(1)
84
85/** Offset of first register in group 0. */
86#define DMAR_MMIO_GROUP_0_OFF_FIRST VTD_MMIO_OFF_VER_REG
87/** Offset of last register in group 0 (inclusive). */
88#define DMAR_MMIO_GROUP_0_OFF_LAST VTD_MMIO_OFF_MTRR_PHYSMASK9_REG
89/** Last valid offset in group 0 (exclusive). */
90#define DMAR_MMIO_GROUP_0_OFF_END (DMAR_MMIO_GROUP_0_OFF_LAST + 8 /* sizeof MTRR_PHYSMASK9_REG */)
91/** Size of the group 0 (in bytes). */
92#define DMAR_MMIO_GROUP_0_SIZE (DMAR_MMIO_GROUP_0_OFF_END - DMAR_MMIO_GROUP_0_OFF_FIRST)
93/**< Implementation-specific MMIO offset of IVA_REG. */
94#define DMAR_MMIO_OFF_IVA_REG 0xe50
95/**< Implementation-specific MMIO offset of IOTLB_REG. */
96#define DMAR_MMIO_OFF_IOTLB_REG 0xe58
97/**< Implementation-specific MMIO offset of FRCD_LO_REG. */
98#define DMAR_MMIO_OFF_FRCD_LO_REG 0xe70
99/**< Implementation-specific MMIO offset of FRCD_HI_REG. */
100#define DMAR_MMIO_OFF_FRCD_HI_REG 0xe78
101AssertCompile(!(DMAR_MMIO_OFF_FRCD_LO_REG & 0xf));
102
103/** Offset of first register in group 1. */
104#define DMAR_MMIO_GROUP_1_OFF_FIRST VTD_MMIO_OFF_VCCAP_REG
105/** Offset of last register in group 1 (inclusive). */
106#define DMAR_MMIO_GROUP_1_OFF_LAST (DMAR_MMIO_OFF_FRCD_LO_REG + 8) * DMAR_FRCD_REG_COUNT
107/** Last valid offset in group 1 (exclusive). */
108#define DMAR_MMIO_GROUP_1_OFF_END (DMAR_MMIO_GROUP_1_OFF_LAST + 8 /* sizeof FRCD_HI_REG */)
109/** Size of the group 1 (in bytes). */
110#define DMAR_MMIO_GROUP_1_SIZE (DMAR_MMIO_GROUP_1_OFF_END - DMAR_MMIO_GROUP_1_OFF_FIRST)
111
112/** DMAR implementation's major version number (exposed to software).
113 * We report 6 as the major version since we support queued invalidations as
114 * software may make assumptions based on that.
115 *
116 * See Intel VT-d spec. 10.4.7 "Context Command Register" (CCMD_REG.CAIG). */
117#define DMAR_VER_MAJOR 6
118/** DMAR implementation's minor version number (exposed to software). */
119#define DMAR_VER_MINOR 0
120
121/** Release log prefix string. */
122#define DMAR_LOG_PFX "Intel-IOMMU"
123/** The current saved state version. */
124#define DMAR_SAVED_STATE_VERSION 1
125
126
127/*********************************************************************************************************************************
128* Structures and Typedefs *
129*********************************************************************************************************************************/
130/**
131 * DMAR error diagnostics.
132 *
133 * @note Members of this enum are used as array indices, so no gaps in enum
134 * values are not allowed. Update g_apszDmarDiagDesc when you modify
135 * fields in this enum.
136 */
137typedef enum
138{
139 kDmarDiag_None = 0,
140 kDmarDiag_IqtReg_Qt_NotAligned,
141 kDmarDiag_IqaReg_Dw_Invalid,
142 /* Last member for determining array index limit. */
143 kDmarDiag_End
144} DMARDIAG;
145AssertCompileSize(DMARDIAG, 4);
146
147/** DMAR diagnostic enum description expansion. */
148#define DMARDIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
149
150/** DMAR diagnostics description. */
151static const char *const g_apszDmarDiagDesc[] =
152{
153 DMARDIAG_DESC(kNone , "None" ),
154 DMARDIAG_DESC(kDmarDiag_IqtReg_Qt_NotAligned, "IqtReg_Qt_NotAligned"),
155 DMARDIAG_DESC(kDmarDiag_IqaReg_Dw_Invalid , "IqaReg_Dw_Invalid" )
156 /* kDmarDiag_End */
157};
158AssertCompile(RT_ELEMENTS(g_apszDmarDiagDesc) == kDmarDiag_End);
159#undef DMARDIAG_DESC
160
161/**
162 * The shared DMAR device state.
163 */
164typedef struct DMAR
165{
166 /** IOMMU device index. */
167 uint32_t idxIommu;
168 /** DMAR magic. */
169 uint32_t u32Magic;
170
171 /** The MMIO handle. */
172 IOMMMIOHANDLE hMmio;
173
174 /** Registers (group 0). */
175 uint8_t abRegs0[DMAR_MMIO_GROUP_0_SIZE];
176 /** Registers (group 1). */
177 uint8_t abRegs1[DMAR_MMIO_GROUP_1_SIZE];
178
179 /** @name Register copies for a tiny bit faster and more convenient access.
180 * @{ */
181 /** Copy of VER_REG. */
182 uint8_t uVerReg;
183 /** Alignment. */
184 uint8_t abPadding[3];
185 /** Error diagnostic. */
186 DMARDIAG enmDiag;
187 /** Copy of CAP_REG. */
188 uint64_t fCap;
189 /** Copy of ECAP_REG. */
190 uint64_t fExtCap;
191 /** @} */
192
193#ifdef VBOX_WITH_STATISTICS
194 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */
195 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */
196 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */
197 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */
198
199 STAMCOUNTER StatMsiRemapR3; /**< Number of MSI remap requests in R3. */
200 STAMCOUNTER StatMsiRemapRZ; /**< Number of MSI remap requests in RZ. */
201
202 STAMCOUNTER StatMemReadR3; /**< Number of memory read translation requests in R3. */
203 STAMCOUNTER StatMemReadRZ; /**< Number of memory read translation requests in RZ. */
204 STAMCOUNTER StatMemWriteR3; /**< Number of memory write translation requests in R3. */
205 STAMCOUNTER StatMemWriteRZ; /**< Number of memory write translation requests in RZ. */
206
207 STAMCOUNTER StatMemBulkReadR3; /**< Number of memory read bulk translation requests in R3. */
208 STAMCOUNTER StatMemBulkReadRZ; /**< Number of memory read bulk translation requests in RZ. */
209 STAMCOUNTER StatMemBulkWriteR3; /**< Number of memory write bulk translation requests in R3. */
210 STAMCOUNTER StatMemBulkWriteRZ; /**< Number of memory write bulk translation requests in RZ. */
211#endif
212} DMAR;
213/** Pointer to the DMAR device state. */
214typedef DMAR *PDMAR;
215/** Pointer to the const DMAR device state. */
216typedef DMAR const *PCDMAR;
217
218/**
219 * The ring-3 DMAR device state.
220 */
221typedef struct DMARR3
222{
223 /** Device instance. */
224 PPDMDEVINSR3 pDevInsR3;
225 /** The IOMMU helper. */
226 R3PTRTYPE(PCPDMIOMMUHLPR3) pIommuHlpR3;
227} DMARR3;
228/** Pointer to the ring-3 DMAR device state. */
229typedef DMARR3 *PDMARR3;
230/** Pointer to the const ring-3 DMAR device state. */
231typedef DMARR3 const *PCDMARR3;
232
233/**
234 * The ring-0 DMAR device state.
235 */
236typedef struct DMARR0
237{
238 /** Device instance. */
239 PPDMDEVINSR0 pDevInsR0;
240 /** The IOMMU helper. */
241 R0PTRTYPE(PCPDMIOMMUHLPR0) pIommuHlpR0;
242} DMARR0;
243/** Pointer to the ring-0 IOMMU device state. */
244typedef DMARR0 *PDMARR0;
245/** Pointer to the const ring-0 IOMMU device state. */
246typedef DMARR0 const *PCDMARR0;
247
248/**
249 * The raw-mode DMAR device state.
250 */
251typedef struct DMARRC
252{
253 /** Device instance. */
254 PPDMDEVINSRC pDevInsRC;
255 /** The IOMMU helper. */
256 RCPTRTYPE(PCPDMIOMMUHLPRC) pIommuHlpRC;
257} DMARRC;
258/** Pointer to the raw-mode DMAR device state. */
259typedef DMARRC *PDMARRC;
260/** Pointer to the const raw-mode DMAR device state. */
261typedef DMARRC const *PCIDMARRC;
262
263/** The DMAR device state for the current context. */
264typedef CTX_SUFF(DMAR) DMARCC;
265/** Pointer to the DMAR device state for the current context. */
266typedef CTX_SUFF(PDMAR) PDMARCC;
267/** Pointer to the const DMAR device state for the current context. */
268typedef CTX_SUFF(PDMAR) const PCDMARCC;
269
270
271/*********************************************************************************************************************************
272* Global Variables *
273*********************************************************************************************************************************/
274/**
275 * Read-write masks for DMAR registers (group 0).
276 */
277static uint32_t const g_au32RwMasks0[] =
278{
279 /* Offset Register Low High */
280 /* 0x000 VER_REG */ VTD_VER_REG_RW_MASK,
281 /* 0x004 Reserved */ 0,
282 /* 0x008 CAP_REG */ DMAR_LO_U32(VTD_CAP_REG_RW_MASK), DMAR_HI_U32(VTD_CAP_REG_RW_MASK),
283 /* 0x010 ECAP_REG */ DMAR_LO_U32(VTD_ECAP_REG_RW_MASK), DMAR_HI_U32(VTD_ECAP_REG_RW_MASK),
284 /* 0x018 GCMD_REG */ VTD_GCMD_REG_RW_MASK,
285 /* 0x01c GSTS_REG */ VTD_GSTS_REG_RW_MASK,
286 /* 0x020 RTADDR_REG */ DMAR_LO_U32(VTD_RTADDR_REG_RW_MASK), DMAR_HI_U32(VTD_RTADDR_REG_RW_MASK),
287 /* 0x028 CCMD_REG */ DMAR_LO_U32(VTD_CCMD_REG_RW_MASK), DMAR_HI_U32(VTD_CCMD_REG_RW_MASK),
288 /* 0x030 Reserved */ 0,
289 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW_MASK,
290 /* 0x038 FECTL_REG */ VTD_FECTL_REG_RW_MASK,
291 /* 0x03c FEDATA_REG */ VTD_FEDATA_REG_RW_MASK,
292 /* 0x040 FEADDR_REG */ VTD_FEADDR_REG_RW_MASK,
293 /* 0x044 FEUADDR_REG */ VTD_FEUADDR_REG_RW_MASK,
294 /* 0x048 Reserved */ 0, 0,
295 /* 0x050 Reserved */ 0, 0,
296 /* 0x058 AFLOG_REG */ DMAR_LO_U32(VTD_AFLOG_REG_RW_MASK), DMAR_HI_U32(VTD_AFLOG_REG_RW_MASK),
297 /* 0x060 Reserved */ 0,
298 /* 0x064 PMEN_REG */ 0, /* RO as we don't support PLMR and PHMR. */
299 /* 0x068 PLMBASE_REG */ 0, /* RO as we don't support PLMR. */
300 /* 0x06c PLMLIMIT_REG */ 0, /* RO as we don't support PLMR. */
301 /* 0x070 PHMBASE_REG */ 0, 0, /* RO as we don't support PHMR. */
302 /* 0x078 PHMLIMIT_REG */ 0, 0, /* RO as we don't support PHMR. */
303 /* 0x080 IQH_REG */ DMAR_LO_U32(VTD_IQH_REG_RW_MASK), DMAR_HI_U32(VTD_IQH_REG_RW_MASK),
304 /* 0x088 IQT_REG */ DMAR_LO_U32(VTD_IQT_REG_RW_MASK), DMAR_HI_U32(VTD_IQT_REG_RW_MASK),
305 /* 0x090 IQA_REG */ DMAR_LO_U32(VTD_IQA_REG_RW_MASK), DMAR_HI_U32(VTD_IQA_REG_RW_MASK),
306 /* 0x098 Reserved */ 0,
307 /* 0x09c ICS_REG */ VTD_ICS_REG_RW_MASK,
308 /* 0x0a0 IECTL_REG */ VTD_IECTL_REG_RW_MASK,
309 /* 0x0a4 IEDATA_REG */ VTD_IEDATA_REG_RW_MASK,
310 /* 0x0a8 IEADDR_REG */ VTD_IEADDR_REG_RW_MASK,
311 /* 0x0ac IEUADDR_REG */ VTD_IEUADDR_REG_RW_MASK,
312 /* 0x0b0 IQERCD_REG */ DMAR_LO_U32(VTD_IQERCD_REG_RW_MASK), DMAR_HI_U32(VTD_IQERCD_REG_RW_MASK),
313 /* 0x0b8 IRTA_REG */ DMAR_LO_U32(VTD_IRTA_REG_RW_MASK), DMAR_HI_U32(VTD_IRTA_REG_RW_MASK),
314 /* 0x0c0 PQH_REG */ DMAR_LO_U32(VTD_PQH_REG_RW_MASK), DMAR_HI_U32(VTD_PQH_REG_RW_MASK),
315 /* 0x0c8 PQT_REG */ DMAR_LO_U32(VTD_PQT_REG_RW_MASK), DMAR_HI_U32(VTD_PQT_REG_RW_MASK),
316 /* 0x0d0 PQA_REG */ DMAR_LO_U32(VTD_PQA_REG_RW_MASK), DMAR_HI_U32(VTD_PQA_REG_RW_MASK),
317 /* 0x0d8 Reserved */ 0,
318 /* 0x0dc PRS_REG */ VTD_PRS_REG_RW_MASK,
319 /* 0x0e0 PECTL_REG */ VTD_PECTL_REG_RW_MASK,
320 /* 0x0e4 PEDATA_REG */ VTD_PEDATA_REG_RW_MASK,
321 /* 0x0e8 PEADDR_REG */ VTD_PEADDR_REG_RW_MASK,
322 /* 0x0ec PEUADDR_REG */ VTD_PEUADDR_REG_RW_MASK,
323 /* 0x0f0 Reserved */ 0, 0,
324 /* 0x0f8 Reserved */ 0, 0,
325 /* 0x100 MTRRCAP_REG */ DMAR_LO_U32(VTD_MTRRCAP_REG_RW_MASK), DMAR_HI_U32(VTD_MTRRCAP_REG_RW_MASK),
326 /* 0x108 MTRRDEF_REG */ 0, 0, /* RO as we don't support MTS. */
327 /* 0x110 Reserved */ 0, 0,
328 /* 0x118 Reserved */ 0, 0,
329 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0, /* RO as we don't support MTS. */
330 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
331 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
332 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
333 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
334 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
335 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
336 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
337 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
338 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
339 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
340 /* 0x178 Reserved */ 0, 0,
341 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0, /* RO as we don't support MTS. */
342 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
343 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
344 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
345 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
346 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
347 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
348 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
349 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
350 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
351 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
352 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
353 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
354 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
355 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
356 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
357 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
358 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
359 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
360 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
361};
362AssertCompile(sizeof(g_au32RwMasks0) == DMAR_MMIO_GROUP_0_SIZE);
363
364/**
365 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 0).
366 */
367static uint32_t const g_au32Rw1cMasks0[] =
368{
369 /* Offset Register Low High */
370 /* 0x000 VER_REG */ 0,
371 /* 0x004 Reserved */ 0,
372 /* 0x008 CAP_REG */ 0, 0,
373 /* 0x010 ECAP_REG */ 0, 0,
374 /* 0x018 GCMD_REG */ 0,
375 /* 0x01c GSTS_REG */ 0,
376 /* 0x020 RTADDR_REG */ 0, 0,
377 /* 0x028 CCMD_REG */ 0, 0,
378 /* 0x030 Reserved */ 0,
379 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW1C_MASK,
380 /* 0x038 FECTL_REG */ 0,
381 /* 0x03c FEDATA_REG */ 0,
382 /* 0x040 FEADDR_REG */ 0,
383 /* 0x044 FEUADDR_REG */ 0,
384 /* 0x048 Reserved */ 0, 0,
385 /* 0x050 Reserved */ 0, 0,
386 /* 0x058 AFLOG_REG */ 0, 0,
387 /* 0x060 Reserved */ 0,
388 /* 0x064 PMEN_REG */ 0,
389 /* 0x068 PLMBASE_REG */ 0,
390 /* 0x06c PLMLIMIT_REG */ 0,
391 /* 0x070 PHMBASE_REG */ 0, 0,
392 /* 0x078 PHMLIMIT_REG */ 0, 0,
393 /* 0x080 IQH_REG */ 0, 0,
394 /* 0x088 IQT_REG */ 0, 0,
395 /* 0x090 IQA_REG */ 0, 0,
396 /* 0x098 Reserved */ 0,
397 /* 0x09c ICS_REG */ VTD_ICS_REG_RW1C_MASK,
398 /* 0x0a0 IECTL_REG */ 0,
399 /* 0x0a4 IEDATA_REG */ 0,
400 /* 0x0a8 IEADDR_REG */ 0,
401 /* 0x0ac IEUADDR_REG */ 0,
402 /* 0x0b0 IQERCD_REG */ 0, 0,
403 /* 0x0b8 IRTA_REG */ 0, 0,
404 /* 0x0c0 PQH_REG */ 0, 0,
405 /* 0x0c8 PQT_REG */ 0, 0,
406 /* 0x0d0 PQA_REG */ 0, 0,
407 /* 0x0d8 Reserved */ 0,
408 /* 0x0dc PRS_REG */ 0,
409 /* 0x0e0 PECTL_REG */ 0,
410 /* 0x0e4 PEDATA_REG */ 0,
411 /* 0x0e8 PEADDR_REG */ 0,
412 /* 0x0ec PEUADDR_REG */ 0,
413 /* 0x0f0 Reserved */ 0, 0,
414 /* 0x0f8 Reserved */ 0, 0,
415 /* 0x100 MTRRCAP_REG */ 0, 0,
416 /* 0x108 MTRRDEF_REG */ 0, 0,
417 /* 0x110 Reserved */ 0, 0,
418 /* 0x118 Reserved */ 0, 0,
419 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0,
420 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
421 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
422 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
423 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
424 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
425 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
426 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
427 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
428 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
429 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
430 /* 0x178 Reserved */ 0, 0,
431 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0,
432 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
433 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
434 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
435 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
436 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
437 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
438 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
439 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
440 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
441 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
442 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
443 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
444 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
445 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
446 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
447 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
448 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
449 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
450 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
451};
452AssertCompile(sizeof(g_au32Rw1cMasks0) == DMAR_MMIO_GROUP_0_SIZE);
453
454/**
455 * Read-write masks for DMAR registers (group 1).
456 */
457static uint32_t const g_au32RwMasks1[] =
458{
459 /* Offset Register Low High */
460 /* 0xe00 VCCAP_REG */ DMAR_LO_U32(VTD_VCCAP_REG_RW_MASK), DMAR_HI_U32(VTD_VCCAP_REG_RW_MASK),
461 /* 0xe08 VCMD_EO_REG */ DMAR_LO_U32(VTD_VCMD_EO_REG_RW_MASK), DMAR_HI_U32(VTD_VCMD_EO_REG_RW_MASK),
462 /* 0xe10 VCMD_REG */ 0, 0, /* RO: VCS not supported. */
463 /* 0xe18 VCMDRSVD_REG */ 0, 0,
464 /* 0xe20 VCRSP_REG */ 0, 0, /* RO: VCS not supported. */
465 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
466 /* 0xe30 Reserved */ 0, 0,
467 /* 0xe38 Reserved */ 0, 0,
468 /* 0xe40 Reserved */ 0, 0,
469 /* 0xe48 Reserved */ 0, 0,
470 /* 0xe50 IVA_REG */ DMAR_LO_U32(VTD_IVA_REG_RW_MASK), DMAR_HI_U32(VTD_IVA_REG_RW_MASK),
471 /* 0xe58 IOTLB_REG */ DMAR_LO_U32(VTD_IOTLB_REG_RW_MASK), DMAR_HI_U32(VTD_IOTLB_REG_RW_MASK),
472 /* 0xe60 Reserved */ 0, 0,
473 /* 0xe68 Reserved */ 0, 0,
474 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW_MASK),
475 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW_MASK),
476};
477AssertCompile(sizeof(g_au32RwMasks1) == DMAR_MMIO_GROUP_1_SIZE);
478AssertCompile((DMAR_MMIO_OFF_FRCD_LO_REG - DMAR_MMIO_GROUP_1_OFF_FIRST) + DMAR_FRCD_REG_COUNT * 2 * sizeof(uint64_t) );
479
480/**
481 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 1).
482 */
483static uint32_t const g_au32Rw1cMasks1[] =
484{
485 /* Offset Register Low High */
486 /* 0xe00 VCCAP_REG */ 0, 0,
487 /* 0xe08 VCMD_EO_REG */ 0, 0,
488 /* 0xe10 VCMD_REG */ 0, 0,
489 /* 0xe18 VCMDRSVD_REG */ 0, 0,
490 /* 0xe20 VCRSP_REG */ 0, 0,
491 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
492 /* 0xe30 Reserved */ 0, 0,
493 /* 0xe38 Reserved */ 0, 0,
494 /* 0xe40 Reserved */ 0, 0,
495 /* 0xe48 Reserved */ 0, 0,
496 /* 0xe50 IVA_REG */ 0, 0,
497 /* 0xe58 IOTLB_REG */ 0, 0,
498 /* 0xe60 Reserved */ 0, 0,
499 /* 0xe68 Reserved */ 0, 0,
500 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW1C_MASK),
501 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW1C_MASK),
502};
503AssertCompile(sizeof(g_au32Rw1cMasks1) == DMAR_MMIO_GROUP_1_SIZE);
504
505/** Array of RW masks for each register group. */
506static uint8_t const *g_apbRwMasks[] = { (uint8_t *)&g_au32RwMasks0[0], (uint8_t *)&g_au32RwMasks1[0] };
507
508/** Array of RW1C masks for each register group. */
509static uint8_t const *g_apbRw1cMasks[] = { (uint8_t *)&g_au32Rw1cMasks0[0], (uint8_t *)&g_au32Rw1cMasks1[0] };
510
511/* Masks arrays must be identical in size (even bounds checking code assumes this). */
512AssertCompile(sizeof(g_apbRw1cMasks) == sizeof(g_apbRwMasks));
513
514
515#ifndef VBOX_DEVICE_STRUCT_TESTCASE
516/**
517 * Gets the number of supported adjusted guest-address width (SAGAW) in bits given a
518 * CAP_REG.SAGAW value.
519 *
520 * @returns Number of SAGAW bits.
521 * @param uSagaw The CAP_REG.SAGAW value.
522 */
523static uint8_t vtdCapRegGetSagawBits(uint8_t uSagaw)
524{
525 if (RT_LIKELY(uSagaw > 0 && uSagaw < 4))
526 return 30 + (uSagaw * 9);
527 return 0;
528}
529
530
531/**
532 * Gets the supported adjusted guest-address width (SAGAW) given the maximum guest
533 * address width (MGAW).
534 *
535 * @returns The CAP_REG.SAGAW value.
536 * @param uMgaw The CAP_REG.MGAW value.
537 */
538static uint8_t vtdCapRegGetSagaw(uint8_t uMgaw)
539{
540 switch (uMgaw + 1)
541 {
542 case 39: return 1;
543 case 48: return 2;
544 case 57: return 3;
545 }
546 return 0;
547}
548
549
550/**
551 * Gets the index of the group the register belongs to given its MMIO offset.
552 *
553 * @returns The group index.
554 * @param offReg The MMIO offset of the register.
555 * @param cbReg The size of the access being made (for bounds checking on
556 * debug builds).
557 */
558DECLINLINE(uint8_t) dmarRegGetGroupIndex(uint16_t offReg, uint8_t cbReg)
559{
560 uint16_t const offLast = offReg + cbReg - 1;
561 AssertCompile(DMAR_MMIO_GROUP_0_OFF_FIRST == 0);
562 AssertMsg(DMAR_IS_MMIO_OFF_VALID(offLast), ("off=%#x cb=%u\n", offReg, cbReg));
563 return !(offLast < DMAR_MMIO_GROUP_0_OFF_END);
564}
565
566
567/**
568 * Gets the group the register belongs to given its MMIO offset.
569 *
570 * @returns Pointer to the first element of the register group.
571 * @param pThis The shared DMAR device state.
572 * @param offReg The MMIO offset of the register.
573 * @param cbReg The size of the access being made (for bounds checking on
574 * debug builds).
575 * @param pIdxGroup Where to store the index of the register group the register
576 * belongs to.
577 */
578DECLINLINE(uint8_t *) dmarRegGetGroup(PDMAR pThis, uint16_t offReg, uint8_t cbReg, uint8_t *pIdxGroup)
579{
580 *pIdxGroup = dmarRegGetGroupIndex(offReg, cbReg);
581 uint8_t *apbRegs[] = { &pThis->abRegs0[0], &pThis->abRegs1[0] };
582 return apbRegs[*pIdxGroup];
583}
584
585
586/**
587 * Const/read-only version of dmarRegGetGroup.
588 *
589 * @copydoc dmarRegGetGroup
590 */
591DECLINLINE(uint8_t const*) dmarRegGetGroupRo(PCDMAR pThis, uint16_t offReg, uint8_t cbReg, uint8_t *pIdxGroup)
592{
593 *pIdxGroup = dmarRegGetGroupIndex(offReg, cbReg);
594 uint8_t const *apbRegs[] = { &pThis->abRegs0[0], &pThis->abRegs1[0] };
595 return apbRegs[*pIdxGroup];
596}
597
598
599/**
600 * Writes a 32-bit register with the exactly the supplied value.
601 *
602 * @param pThis The shared DMAR device state.
603 * @param offReg The MMIO offset of the register.
604 * @param uReg The 32-bit value to write.
605 */
606static void dmarRegWriteRaw32(PDMAR pThis, uint16_t offReg, uint32_t uReg)
607{
608 uint8_t idxGroup;
609 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint32_t), &idxGroup);
610 NOREF(idxGroup);
611 *(uint32_t *)(pabRegs + offReg) = uReg;
612}
613
614
615/**
616 * Writes a 64-bit register with the exactly the supplied value.
617 *
618 * @param pThis The shared DMAR device state.
619 * @param offReg The MMIO offset of the register.
620 * @param uReg The 64-bit value to write.
621 */
622static void dmarRegWriteRaw64(PDMAR pThis, uint16_t offReg, uint64_t uReg)
623{
624 uint8_t idxGroup;
625 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint64_t), &idxGroup);
626 NOREF(idxGroup);
627 *(uint64_t *)(pabRegs + offReg) = uReg;
628}
629
630
631/**
632 * Reads a 32-bit register with exactly the value it contains.
633 *
634 * @param pThis The shared DMAR device state.
635 * @param offReg The MMIO offset of the register.
636 */
637static uint32_t dmarRegReadRaw32(PCDMAR pThis, uint16_t offReg)
638{
639 uint8_t idxGroup;
640 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint32_t), &idxGroup);
641 NOREF(idxGroup);
642 return *(uint32_t *)(pabRegs + offReg);
643}
644
645
646/**
647 * Reads a 64-bit register with exactly the value it contains.
648 *
649 * @param pThis The shared DMAR device state.
650 * @param offReg The MMIO offset of the register.
651 */
652static uint32_t dmarRegReadRaw64(PCDMAR pThis, uint16_t offReg)
653{
654 uint8_t idxGroup;
655 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint64_t), &idxGroup);
656 NOREF(idxGroup);
657 return *(uint64_t *)(pabRegs + offReg);
658}
659
660
661/**
662 * Reads a 32-bit register with exactly the value it contains along with their
663 * corresponding masks
664 *
665 * @param pThis The shared DMAR device state.
666 * @param offReg The MMIO offset of the register.
667 * @param puReg Where to store the raw 32-bit register value.
668 * @param pfRwMask Where to store the RW mask corresponding to this register.
669 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
670 */
671static void dmarRegReadRaw32Ex(PCDMAR pThis, uint16_t offReg, uint32_t *puReg, uint32_t *pfRwMask, uint32_t *pfRw1cMask)
672{
673 uint8_t idxGroup;
674 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint32_t), &idxGroup);
675 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
676 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
677 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
678 *puReg = *(uint32_t *)(pabRegs + offReg);
679 *pfRwMask = *(uint32_t *)(pabRwMasks + offReg);
680 *pfRw1cMask = *(uint32_t *)(pabRw1cMasks + offReg);
681}
682
683
684/**
685 * Reads a 64-bit register with exactly the value it contains along with their
686 * corresponding masks.
687 *
688 * @param pThis The shared DMAR device state.
689 * @param offReg The MMIO offset of the register.
690 * @param puReg Where to store the raw 64-bit register value.
691 * @param pfRwMask Where to store the RW mask corresponding to this register.
692 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
693 */
694static void dmarRegReadRaw64Ex(PCDMAR pThis, uint16_t offReg, uint64_t *puReg, uint64_t *pfRwMask, uint64_t *pfRw1cMask)
695{
696 uint8_t idxGroup;
697 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint64_t), &idxGroup);
698 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
699 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
700 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
701 *puReg = *(uint64_t *)(pabRegs + offReg);
702 *pfRwMask = *(uint64_t *)(pabRwMasks + offReg);
703 *pfRw1cMask = *(uint64_t *)(pabRw1cMasks + offReg);
704}
705
706
707/**
708 * Writes a 32-bit register as it would be when written by software.
709 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
710 *
711 * @returns The value that's actually written to the register.
712 * @param pThis The shared DMAR device state.
713 * @param offReg The MMIO offset of the register.
714 * @param uReg The 32-bit value to write.
715 */
716static uint32_t dmarRegWrite32(PDMAR pThis, uint16_t offReg, uint32_t uReg)
717{
718 /* Read current value from the 32-bit register. */
719 uint32_t uCurReg;
720 uint32_t fRwMask;
721 uint32_t fRw1cMask;
722 dmarRegReadRaw32Ex(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
723
724 uint32_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
725 uint32_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
726 uint32_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
727 uint32_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
728
729 /* Write new value to the 32-bit register. */
730 dmarRegWriteRaw32(pThis, offReg, uNewReg);
731 return uNewReg;
732}
733
734
735/**
736 * Writes a 64-bit register as it would be when written by software.
737 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
738 *
739 * @returns The value that's actually written to the register.
740 * @param pThis The shared DMAR device state.
741 * @param offReg The MMIO offset of the register.
742 * @param uReg The 64-bit value to write.
743 */
744static uint64_t dmarRegWrite64(PDMAR pThis, uint16_t offReg, uint64_t uReg)
745{
746 /* Read current value from the 64-bit register. */
747 uint64_t uCurReg;
748 uint64_t fRwMask;
749 uint64_t fRw1cMask;
750 dmarRegReadRaw64Ex(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
751
752 uint64_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
753 uint64_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
754 uint64_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
755 uint64_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
756
757 /* Write new value to the 64-bit register. */
758 dmarRegWriteRaw64(pThis, offReg, uNewReg);
759 return uNewReg;
760}
761
762
763/**
764 * Reads a 32-bit register as it would be when read by software.
765 *
766 * @returns The 32-bit register value.
767 * @param pThis The shared DMAR device state.
768 * @param offReg The MMIO offset of the register.
769 */
770static uint32_t dmarRegRead32(PCDMAR pThis, uint16_t offReg)
771{
772 return dmarRegReadRaw32(pThis, offReg);
773}
774
775
776/**
777 * Reads a 64-bit register as it would be when read by software.
778 *
779 * @returns The 64-bit register value.
780 * @param pThis The shared DMAR device state.
781 * @param offReg The MMIO offset of the register.
782 */
783static uint64_t dmarRegRead64(PCDMAR pThis, uint16_t offReg)
784{
785 return dmarRegReadRaw64(pThis, offReg);
786}
787
788
789/**
790 * Modifies a 32-bit register.
791 *
792 * @param pThis The shared DMAR device state.
793 * @param offReg The MMIO offset of the register.
794 * @param fAndMask The AND mask (applied first).
795 * @param fOrMask The OR mask.
796 * @remarks This does NOT apply RO or RW1C masks while modifying the
797 * register.
798 */
799static void dmarRegChange32(PDMAR pThis, uint16_t offReg, uint32_t fAndMask, uint32_t fOrMask)
800{
801 uint32_t uReg = dmarRegRead32(pThis, offReg);
802 uReg = (uReg & fAndMask) | fOrMask;
803 dmarRegWriteRaw32(pThis, offReg, uReg);
804}
805
806
807/**
808 * Modifies a 64-bit register.
809 *
810 * @param pThis The shared DMAR device state.
811 * @param offReg The MMIO offset of the register.
812 * @param fAndMask The AND mask (applied first).
813 * @param fOrMask The OR mask.
814 * @remarks This does NOT apply RO or RW1C masks while modifying the
815 * register.
816 */
817static void dmarRegChange64(PDMAR pThis, uint16_t offReg, uint64_t fAndMask, uint64_t fOrMask)
818{
819 uint64_t uReg = dmarRegRead64(pThis, offReg);
820 uReg = (uReg & fAndMask) | fOrMask;
821 dmarRegWriteRaw64(pThis, offReg, uReg);
822}
823
824
825/**
826 * Gets the table translation mode from the RTADDR_REG.
827 *
828 * @returns The table translation mode.
829 * @param pThis The shared DMAR device state.
830 */
831static uint8_t dmarRtAddrRegGetTtm(PCDMAR pThis)
832{
833 uint64_t const uRtAddrReg = dmarRegRead64(pThis, VTD_MMIO_OFF_RTADDR_REG);
834 return RT_BF_GET(uRtAddrReg, VTD_BF_RTADDR_REG_TTM);
835}
836
837
838/**
839 * Raises an interrupt in response to an event.
840 *
841 * @param pDevIns The IOMMU device instance.
842 */
843static void dmarFaultRaiseInterrupt(PPDMDEVINS pDevIns)
844{
845 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
846 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
847#ifdef RT_STRICT
848 {
849 uint32_t const uFstsReg = dmarRegRead32(pThis, VTD_MMIO_OFF_FSTS_REG);
850 uint32_t const fFaultMask = VTD_BF_FSTS_REG_PPF_MASK | VTD_BF_FSTS_REG_PFO_MASK
851 /* | VTD_BF_FSTS_REG_APF_MASK | VTD_BF_FSTS_REG_AFO_MASK */ /* AFL not supported */
852 /* | VTD_BF_FSTS_REG_ICE_MASK | VTD_BF_FSTS_REG_ITE_MASK */ /* Device-TLBs not supported */
853 | VTD_BF_FSTS_REG_IQE_MASK;
854 Assert(uFstsReg & fFaultMask);
855 }
856#endif
857
858 uint32_t uFectlReg = dmarRegRead32(pThis, VTD_MMIO_OFF_FECTL_REG);
859 if (!(uFectlReg & VTD_BF_FECTL_REG_IM_MASK))
860 {
861 MSIMSG Msi;
862 Msi.Addr.u64 = RT_MAKE_U64(dmarRegRead32(pThis, VTD_MMIO_OFF_FEADDR_REG),
863 dmarRegRead32(pThis, VTD_MMIO_OFF_FEUADDR_REG));
864 Msi.Data.u32 = dmarRegRead32(pThis, VTD_MMIO_OFF_FEDATA_REG);
865
866 /** @todo Assert Msi.Addr is in the MSR_IA32_APICBASE_ADDR range and ensure on
867 * FEADD_REG write it can't be anything else. */
868
869 /* Software has unmasked the interrupt, raise it. */
870 pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi(pDevIns, &Msi, 0 /* uTagSrc */);
871
872 /* Clear interrupt pending bit. */
873 uFectlReg &= ~VTD_BF_FECTL_REG_IP_MASK;
874 dmarRegWrite32(pThis, VTD_MMIO_OFF_FECTL_REG, uFectlReg);
875 }
876 else
877 {
878 /* Interrupt is masked, set the interrupt pending bit. */
879 uFectlReg |= VTD_BF_FECTL_REG_IP_MASK;
880 dmarRegWrite32(pThis, VTD_MMIO_OFF_FECTL_REG, uFectlReg);
881 }
882}
883
884
885#if 0
886/**
887 * Checks whether a primary fault can be recorded.
888 *
889 * @returns @c true if the fault can be recorded, @c false otherwise.
890 * @param pThis The shared DMAR device state.
891 */
892static bool dmarPrimaryFaultCanRecord(PDMAR pThis)
893{
894 uint32_t uFstsReg = dmarRegRead32(pThis, VTD_MMIO_OFF_FSTS_REG);
895 if (uFstsReg & VTD_BF_FSTS_REG_PFO_MASK)
896 return false;
897
898 /*
899 * If we add more FRCD registers, we'll have to loop through them here.
900 * Since we support only one FRCD_REG, we don't support "compression of multiple faults",
901 * nor do we need to increment FRI.
902 *
903 * See Intel VT-d spec. 7.2.1 "Primary Fault Logging".
904 */
905 AssertCompile(DMAR_FRCD_REG_COUNT == 1);
906 uint64_t const uFrcdRegHi = dmarRegRead64(pThis, DMAR_MMIO_OFF_FRCD_HI_REG);
907 if (uFrcdRegHi & VTD_BF_1_FRCD_REG_F_MASK)
908 {
909 uFstsReg |= VTD_BF_FSTS_REG_PFO_MASK;
910 dmarRegWrite32(pThis, VTD_MMIO_OFF_FSTS_REG, uFstsReg);
911 return false;
912 }
913
914 uFstsReg |= VTD_BF_FSTS_REG_PPF_MASK;
915 dmarRegWrite32(pThis, VTD_MMIO_OFF_FSTS_REG, uFstsReg);
916 return true;
917}
918#endif
919
920
921/**
922 * Records an IQE fault.
923 *
924 * @param pDevIns The IOMMU device instance.
925 * @param enmIqei The IQE information.
926 * @param enmDiag The diagnostic reason.
927 */
928static void dmarIqeFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, VTD_IQERCD_IQEI_T enmIqei)
929{
930 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
931 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
932 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
933
934 /* Always update the latest diagnostic reason. */
935 pThis->enmDiag = enmDiag;
936
937 /* Set the error bit. */
938 uint32_t const fIqe = RT_BF_MAKE(VTD_BF_FSTS_REG_IQE, 1);
939 dmarRegChange32(pThis, VTD_MMIO_OFF_FSTS_REG, UINT32_MAX, fIqe);
940
941 /* Set the error information. */
942 uint64_t const fIqei = RT_BF_MAKE(VTD_BF_IQERCD_REG_IQEI, enmIqei);
943 dmarRegChange64(pThis, VTD_MMIO_OFF_IQERCD_REG, UINT64_MAX, fIqei);
944
945 dmarFaultRaiseInterrupt(pDevIns);
946}
947
948
949/**
950 * Handles writes to CCMD_REG.
951 *
952 * @returns Strict VBox status code.
953 * @param pDevIns The IOMMU device instance.
954 * @param off The MMIO register offset.
955 * @param cb The size of the MMIO access (in bytes).
956 * @param uCcmdReg The value written to CCMD_REG.
957 */
958static VBOXSTRICTRC dmarCcmdRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint8_t cb, uint64_t uCcmdReg)
959{
960 /* At present, we only care about responding to high 32-bits writes, low 32-bits are data. */
961 if (off + cb > VTD_MMIO_OFF_CCMD_REG + 4)
962 {
963 /* Check if we need to invalidate the context-context. */
964 bool const fIcc = RT_BF_GET(uCcmdReg, VTD_BF_CCMD_REG_ICC);
965 if (fIcc)
966 {
967 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
968 uint8_t const uMajorVersion = RT_BF_GET(pThis->uVerReg, VTD_BF_VER_REG_MAX);
969 if (uMajorVersion < 6)
970 {
971 /** @todo Verify queued-invalidation is not enabled.
972 * See Intel VT-d spec. 6.5.1 "Register-based Invalidation Interface" */
973
974 /* Verify table translation mode is legacy. */
975 uint8_t const fTtm = dmarRtAddrRegGetTtm(pThis);
976 if (fTtm == VTD_TTM_LEGACY_MODE)
977 {
978 /** @todo Invalidate. */
979 return VINF_SUCCESS;
980 }
981 }
982
983 /** @todo Record error. */
984 }
985 }
986 return VINF_SUCCESS;
987}
988
989
990/**
991 * Handles writes to IQT_REG.
992 *
993 * @returns Strict VBox status code.
994 * @param pDevIns The IOMMU device instance.
995 * @param off The MMIO register offset.
996 * @param uIqtReg The value written to IQT_REG.
997 */
998static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint64_t uIqtReg)
999{
1000 /* We only care about the low 32-bits, high 32-bits are reserved. */
1001 if (off == VTD_MMIO_OFF_IQT_REG)
1002 {
1003 /* Verify if the queue tail offset is aligned according to the descriptor width in IQA_REG. */
1004 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1005 uint16_t const offQueueTail = VTD_IQT_REG_GET_QT(uIqtReg);
1006 uint64_t const uIqaReg = dmarRegRead64(pThis, VTD_MMIO_OFF_IQA_REG);
1007 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
1008 if ( fDw != VTD_IQA_REG_DW_256_BIT
1009 || !(offQueueTail & 0x1f))
1010 {
1011 /** @todo Figure out what to do here, like waking up worker thread or
1012 * something. */
1013 }
1014 else
1015 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_NotAligned, kQueueTailNotAligned);
1016 }
1017 return VINF_SUCCESS;
1018}
1019
1020
1021/**
1022 * Handles writes to IQA_REG.
1023 *
1024 * @returns Strict VBox status code.
1025 * @param pDevIns The IOMMU device instance.
1026 * @param off The MMIO register offset.
1027 * @param uIqaReg The value written to IQA_REG.
1028 */
1029static VBOXSTRICTRC dmarIqaRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint64_t uIqaReg)
1030{
1031 /** @todo Don't allow writing this when GSTS.QIES is set? */
1032
1033 /* At present, we only care about the low 32-bits, high 32-bits are data. */
1034 if (off == VTD_MMIO_OFF_IQA_REG)
1035 {
1036 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1037 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
1038 if (fDw == VTD_IQA_REG_DW_256_BIT)
1039 {
1040 uint64_t const fDwMask = VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK;
1041 bool const fSupports256BitDw = RT_BOOL(pThis->fExtCap & fDwMask);
1042 if (fSupports256BitDw)
1043 { /* likely */ }
1044 else
1045 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_Invalid, kInvalidDescriptorWidth);
1046 }
1047 }
1048 return VINF_SUCCESS;
1049}
1050
1051
1052/**
1053 * Memory access bulk (one or more 4K pages) request from a device.
1054 *
1055 * @returns VBox status code.
1056 * @param pDevIns The IOMMU device instance.
1057 * @param idDevice The device ID (bus, device, function).
1058 * @param cIovas The number of addresses being accessed.
1059 * @param pauIovas The I/O virtual addresses for each page being accessed.
1060 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
1061 * @param paGCPhysSpa Where to store the translated physical addresses.
1062 *
1063 * @thread Any.
1064 */
1065static DECLCALLBACK(int) iommuIntelMemBulkAccess(PPDMDEVINS pDevIns, uint16_t idDevice, size_t cIovas, uint64_t const *pauIovas,
1066 uint32_t fFlags, PRTGCPHYS paGCPhysSpa)
1067{
1068 RT_NOREF6(pDevIns, idDevice, cIovas, pauIovas, fFlags, paGCPhysSpa);
1069 return VERR_NOT_IMPLEMENTED;
1070}
1071
1072
1073/**
1074 * Memory access transaction from a device.
1075 *
1076 * @returns VBox status code.
1077 * @param pDevIns The IOMMU device instance.
1078 * @param idDevice The device ID (bus, device, function).
1079 * @param uIova The I/O virtual address being accessed.
1080 * @param cbIova The size of the access.
1081 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
1082 * @param pGCPhysSpa Where to store the translated system physical address.
1083 * @param pcbContiguous Where to store the number of contiguous bytes translated
1084 * and permission-checked.
1085 *
1086 * @thread Any.
1087 */
1088static DECLCALLBACK(int) iommuIntelMemAccess(PPDMDEVINS pDevIns, uint16_t idDevice, uint64_t uIova, size_t cbIova,
1089 uint32_t fFlags, PRTGCPHYS pGCPhysSpa, size_t *pcbContiguous)
1090{
1091 RT_NOREF7(pDevIns, idDevice, uIova, cbIova, fFlags, pGCPhysSpa, pcbContiguous);
1092 return VERR_NOT_IMPLEMENTED;
1093}
1094
1095
1096/**
1097 * Interrupt remap request from a device.
1098 *
1099 * @returns VBox status code.
1100 * @param pDevIns The IOMMU device instance.
1101 * @param idDevice The device ID (bus, device, function).
1102 * @param pMsiIn The source MSI.
1103 * @param pMsiOut Where to store the remapped MSI.
1104 */
1105static DECLCALLBACK(int) iommuIntelMsiRemap(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
1106{
1107 RT_NOREF3(idDevice, pMsiIn, pMsiOut);
1108 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1109 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemap)); NOREF(pThis);
1110
1111 return VERR_NOT_IMPLEMENTED;
1112}
1113
1114
1115/**
1116 * @callback_method_impl{FNIOMMMIONEWWRITE}
1117 */
1118static DECLCALLBACK(VBOXSTRICTRC) dmarMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1119{
1120 RT_NOREF1(pvUser);
1121 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
1122
1123 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1124 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioWrite));
1125
1126 uint16_t const offReg = off;
1127 uint16_t const offLast = offReg + cb - 1;
1128 if (DMAR_IS_MMIO_OFF_VALID(offLast))
1129 {
1130 uint64_t const uRegWritten = cb == 8 ? dmarRegWrite64(pThis, offReg, *(uint64_t *)pv)
1131 : dmarRegWrite32(pThis, offReg, *(uint32_t *)pv);
1132 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1133 switch (off)
1134 {
1135 case VTD_MMIO_OFF_IQT_REG:
1136 case VTD_MMIO_OFF_IQT_REG + 4:
1137 {
1138 rcStrict = dmarIqtRegWrite(pDevIns, offReg, uRegWritten);
1139 break;
1140 }
1141
1142 case VTD_MMIO_OFF_CCMD_REG:
1143 case VTD_MMIO_OFF_CCMD_REG + 4:
1144 {
1145 rcStrict = dmarCcmdRegWrite(pDevIns, offReg, cb, uRegWritten);
1146 break;
1147 }
1148
1149 case VTD_MMIO_OFF_IQA_REG:
1150 case VTD_MMIO_OFF_IQA_REG + 4:
1151 {
1152 rcStrict = dmarIqaRegWrite(pDevIns, offReg, uRegWritten);
1153 break;
1154 }
1155 }
1156
1157 LogFlowFunc(("offReg=%#x rc=%Rrc\n", offReg, VBOXSTRICTRC_VAL(rcStrict)));
1158 return rcStrict;
1159 }
1160
1161 return VINF_IOM_MMIO_UNUSED_FF;
1162}
1163
1164
1165/**
1166 * @callback_method_impl{FNIOMMMIONEWREAD}
1167 */
1168static DECLCALLBACK(VBOXSTRICTRC) dmarMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1169{
1170 RT_NOREF1(pvUser);
1171 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
1172
1173 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1174 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioRead));
1175
1176 uint16_t const offReg = off;
1177 uint16_t const offLast = offReg + cb - 1;
1178 if (DMAR_IS_MMIO_OFF_VALID(offLast))
1179 {
1180 if (cb == 8)
1181 {
1182 *(uint64_t *)pv = dmarRegRead64(pThis, offReg);
1183 LogFlowFunc(("offReg=%#x pv=%#RX64\n", offReg, *(uint64_t *)pv));
1184 }
1185 else
1186 {
1187 *(uint32_t *)pv = dmarRegRead32(pThis, offReg);
1188 LogFlowFunc(("offReg=%#x pv=%#RX32\n", offReg, *(uint32_t *)pv));
1189 }
1190
1191 return VINF_SUCCESS;
1192 }
1193
1194 return VINF_IOM_MMIO_UNUSED_FF;
1195}
1196
1197
1198#ifdef IN_RING3
1199/**
1200 * @callback_method_impl{FNDBGFHANDLERDEV}
1201 */
1202static DECLCALLBACK(void) dmarR3DbgInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
1203{
1204 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1205 PCPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1206 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1207
1208 bool const fVerbose = RTStrCmp(pszArgs, "verbose") == 0;
1209
1210 DMARDIAG const enmDiag = pThis->enmDiag;
1211 const char *pszDiag = enmDiag < RT_ELEMENTS(g_apszDmarDiagDesc) ? g_apszDmarDiagDesc[enmDiag] : "(Unknown)";
1212
1213 pHlp->pfnPrintf(pHlp, "Intel-IOMMU:\n");
1214 pHlp->pfnPrintf(pHlp, " Diag = %u (%s)\n", enmDiag, pszDiag);
1215 pHlp->pfnPrintf(pHlp, "\n");
1216}
1217
1218
1219/**
1220 * Initializes all registers in the DMAR unit.
1221 *
1222 * @param pDevIns The IOMMU device instance.
1223 */
1224static void dmarR3RegsInit(PPDMDEVINS pDevIns)
1225{
1226 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1227 RT_ZERO(pThis->abRegs0);
1228 RT_ZERO(pThis->abRegs1);
1229
1230 /*
1231 * Initialize registers not mutable by software prior to initializing other registers.
1232 */
1233 /* VER_REG */
1234 {
1235 pThis->uVerReg = RT_BF_MAKE(VTD_BF_VER_REG_MIN, DMAR_VER_MINOR)
1236 | RT_BF_MAKE(VTD_BF_VER_REG_MAX, DMAR_VER_MAJOR);
1237 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_VER_REG, pThis->uVerReg);
1238 }
1239
1240 uint8_t const fFlts = 1; /* First-Level translation support. */
1241 uint8_t const fSlts = 1; /* Second-Level translation support. */
1242 uint8_t const fPt = 1; /* Pass-Through support. */
1243 uint8_t const fSmts = fFlts & fSlts & fPt; /* Scalable mode translation support.*/
1244 uint8_t const fNest = 0; /* Nested translation support. */
1245
1246 /* CAP_REG */
1247 {
1248 uint8_t cGstPhysAddrBits;
1249 uint8_t cGstLinearAddrBits;
1250 PDMDevHlpCpuGetGuestAddrWidths(pDevIns, &cGstPhysAddrBits, &cGstLinearAddrBits);
1251
1252 uint8_t const fFl1gp = 1; /* First-Level 1GB pages support. */
1253 uint8_t const fFl5lp = 1; /* First-level 5-level paging support (PML5E). */
1254 uint8_t const fSl2mp = fSlts & 1; /* Second-Level 2MB pages support. */
1255 uint8_t const fSl2gp = fSlts & 1; /* Second-Level 1GB pages support. */
1256 uint8_t const fSllps = fSl2mp /* Second-Level large page Support. */
1257 | ((fSl2mp & fFl1gp) & RT_BIT(1));
1258 uint8_t const fMamv = (fSl2gp ? /* Maximum address mask value (for second-level invalidations). */
1259 X86_PAGE_1G_SHIFT : X86_PAGE_2M_SHIFT) - X86_PAGE_4K_SHIFT;
1260 uint8_t const fNd = 2; /* Number of domains (0=16, 1=64, 2=256, 3=1K, 4=4K, 5=16K, 6=64K,
1261 7=Reserved). */
1262 uint8_t const fPsi = 1; /* Page selective invalidation. */
1263 uint8_t const uMgaw = cGstPhysAddrBits - 1; /* Maximum guest address width. */
1264 uint8_t const uSagaw = vtdCapRegGetSagaw(uMgaw); /* Supported adjust guest address width. */
1265 uint16_t const offFro = DMAR_MMIO_OFF_FRCD_LO_REG >> 4; /* MMIO offset of FRCD registers. */
1266
1267 pThis->fCap = RT_BF_MAKE(VTD_BF_CAP_REG_ND, fNd)
1268 | RT_BF_MAKE(VTD_BF_CAP_REG_AFL, 0) /* Advanced fault logging not supported. */
1269 | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF, 0) /* Software need not flush write-buffers. */
1270 | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR, 0) /* Protected Low-Memory Region not supported. */
1271 | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR, 0) /* Protected High-Memory Region not supported. */
1272 | RT_BF_MAKE(VTD_BF_CAP_REG_CM, 1) /** @todo Figure out if required when we impl. caching. */
1273 | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW, fSlts & uSagaw)
1274 | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW, uMgaw)
1275 | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR, 1) /** @todo Figure out if/how to support zero-length reads. */
1276 | RT_BF_MAKE(VTD_BF_CAP_REG_FRO, offFro)
1277 | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS, fSlts & fSllps)
1278 | RT_BF_MAKE(VTD_BF_CAP_REG_PSI, fPsi)
1279 | RT_BF_MAKE(VTD_BF_CAP_REG_NFR, DMAR_FRCD_REG_COUNT - 1)
1280 | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV, fPsi & fMamv)
1281 | RT_BF_MAKE(VTD_BF_CAP_REG_DWD, 1)
1282 | RT_BF_MAKE(VTD_BF_CAP_REG_DRD, 1)
1283 | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP, fFlts & fFl1gp)
1284 | RT_BF_MAKE(VTD_BF_CAP_REG_PI, 0) /* Posted Interrupts not supported. */
1285 | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP, fFlts & fFl5lp)
1286 | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, 0) /* Whether we invalidate interrupt cache on SIRTP flow. */
1287 | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS, 0); /* Whether we invalidate translation cache on SRTP flow. */
1288 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCap);
1289 }
1290
1291 /* ECAP_REG */
1292 {
1293 uint8_t const fQi = 1; /* Queued invalidations. */
1294 uint8_t const fIr = !!(DMAR_ACPI_DMAR_FLAGS & ACPI_DMAR_F_INTR_REMAP); /* Interrupt remapping support. */
1295 uint8_t const fMhmv = 0xf; /* Maximum handle mask value. */
1296 uint16_t const offIro = DMAR_MMIO_OFF_IVA_REG >> 4; /* MMIO offset of IOTLB registers. */
1297 uint8_t const fSrs = 1; /* Supervisor request support. */
1298 uint8_t const fEim = 1; /* Extended interrupt mode.*/
1299 uint8_t const fAdms = 1; /* Abort DMA mode support. */
1300
1301 pThis->fExtCap = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */
1302 | RT_BF_MAKE(VTD_BF_ECAP_REG_QI, 1)
1303 | RT_BF_MAKE(VTD_BF_ECAP_REG_DT, 0) /* Device-TLBs not supported. */
1304 | RT_BF_MAKE(VTD_BF_ECAP_REG_IR, fQi & fIr)
1305 | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM, fIr & fEim)
1306 | RT_BF_MAKE(VTD_BF_ECAP_REG_PT, fPt)
1307 | RT_BF_MAKE(VTD_BF_ECAP_REG_SC, 0) /* Snoop control not supported. */
1308 | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO, offIro)
1309 | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV, fIr & fMhmv)
1310 | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS, 0) /* Memory type not supported. */
1311 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest)
1312 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */
1313 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, 0) /* Execute request not supported. */
1314 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, fSmts & fSrs)
1315 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */
1316 | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS, 0) /** @todo figure out if EAFS is required? */
1317 | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS, 0) /* 0 as PASID not supported. */
1318 | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID, 0) /* PASID support. */
1319 | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT, 0) /* 0 as DT not supported. */
1320 | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS, 0) /* 0 as DT not supported. */
1321 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS, fSmts)
1322 | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS, 0) /* 0 as PASID not supported (commands seem PASID specific). */
1323 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS, 0) /* Second-level accessed/dirty not supported. */
1324 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS, fSlts)
1325 | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS, fFlts)
1326 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0) /* 0 as PASID not supported. */
1327 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS, 0) /* We don't support RID_PASID field in SM context entry. */
1328 | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS, fAdms)
1329 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /** @todo figure out if we should/can support this? */
1330 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCap);
1331 }
1332
1333 /*
1334 * Initialize registers mutable by software.
1335 */
1336 /* FECTL_REG */
1337 {
1338 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_FECTL_REG_IM, 1);
1339 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, uCtl);
1340 }
1341
1342 /* ICETL_REG */
1343 {
1344 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_IECTL_REG_IM, 1);
1345 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_IECTL_REG, uCtl);
1346 }
1347
1348#ifdef VBOX_STRICT
1349 Assert(!RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_PRS)); /* PECTL_REG - Reserved if don't support PRS. */
1350 Assert(!RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_MTS)); /* MTRRCAP_REG - Reserved if we don't support MTS. */
1351#endif
1352}
1353
1354
1355/**
1356 * @interface_method_impl{PDMDEVREG,pfnReset}
1357 */
1358static DECLCALLBACK(void) iommuIntelR3Reset(PPDMDEVINS pDevIns)
1359{
1360 RT_NOREF1(pDevIns);
1361 LogFlowFunc(("\n"));
1362
1363 dmarR3RegsInit(pDevIns);
1364}
1365
1366
1367/**
1368 * @interface_method_impl{PDMDEVREG,pfnDestruct}
1369 */
1370static DECLCALLBACK(int) iommuIntelR3Destruct(PPDMDEVINS pDevIns)
1371{
1372 RT_NOREF(pDevIns);
1373 LogFlowFunc(("\n"));
1374 return VINF_SUCCESS;
1375}
1376
1377
1378/**
1379 * @interface_method_impl{PDMDEVREG,pfnConstruct}
1380 */
1381static DECLCALLBACK(int) iommuIntelR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
1382{
1383 RT_NOREF(pCfg);
1384
1385 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1386 PDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PDMARR3);
1387 pThisR3->pDevInsR3 = pDevIns;
1388
1389 LogFlowFunc(("iInstance=%d\n", iInstance));
1390 NOREF(iInstance);
1391
1392 /*
1393 * Register the IOMMU with PDM.
1394 */
1395 PDMIOMMUREGR3 IommuReg;
1396 RT_ZERO(IommuReg);
1397 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
1398 IommuReg.pfnMemAccess = iommuIntelMemAccess;
1399 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
1400 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
1401 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
1402 int rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
1403 if (RT_FAILURE(rc))
1404 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
1405 if (pThisR3->CTX_SUFF(pIommuHlp)->u32Version != PDM_IOMMUHLPR3_VERSION)
1406 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
1407 N_("IOMMU helper version mismatch; got %#x expected %#x"),
1408 pThisR3->CTX_SUFF(pIommuHlp)->u32Version, PDM_IOMMUHLPR3_VERSION);
1409 if (pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd != PDM_IOMMUHLPR3_VERSION)
1410 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
1411 N_("IOMMU helper end-version mismatch; got %#x expected %#x"),
1412 pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd, PDM_IOMMUHLPR3_VERSION);
1413 /*
1414 * Use PDM's critical section (via helpers) for the IOMMU device.
1415 */
1416 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
1417 AssertRCReturn(rc, rc);
1418
1419 /*
1420 * Initialize PCI configuration registers.
1421 */
1422 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1423 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1424
1425 /* Header. */
1426 PDMPciDevSetVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
1427 PDMPciDevSetDeviceId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
1428 PDMPciDevSetRevisionId(pPciDev, DMAR_PCI_REVISION_ID); /* VirtualBox specific device implementation revision */
1429 PDMPciDevSetClassBase(pPciDev, VBOX_PCI_CLASS_SYSTEM); /* System Base Peripheral */
1430 PDMPciDevSetClassSub(pPciDev, VBOX_PCI_SUB_SYSTEM_OTHER); /* Other */
1431 PDMPciDevSetHeaderType(pPciDev, 0); /* Single function, type 0 */
1432 PDMPciDevSetSubSystemId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
1433 PDMPciDevSetSubSystemVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
1434
1435 /** @todo Chipset spec says PCI Express Capability Id. Relevant for us? */
1436 PDMPciDevSetStatus(pPciDev, 0);
1437 PDMPciDevSetCapabilityList(pPciDev, 0);
1438
1439 /** @todo VTBAR at 0x180? */
1440
1441 /*
1442 * Register the PCI function with PDM.
1443 */
1444 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1445 AssertLogRelRCReturn(rc, rc);
1446
1447 /** @todo Register MSI but what's the MSI capability offset? */
1448#if 0
1449 /*
1450 * Register MSI support for the PCI device.
1451 * This must be done -after- registering it as a PCI device!
1452 */
1453#endif
1454
1455 /*
1456 * Register MMIO region.
1457 */
1458 AssertCompile(!(DMAR_MMIO_BASE_PHYSADDR & X86_PAGE_4K_OFFSET_MASK));
1459 rc = PDMDevHlpMmioCreateAndMap(pDevIns, DMAR_MMIO_BASE_PHYSADDR, DMAR_MMIO_SIZE, dmarMmioWrite, dmarMmioRead,
1460 IOMMMIO_FLAGS_READ_DWORD_QWORD | IOMMMIO_FLAGS_WRITE_DWORD_QWORD_ZEROED,
1461 "Intel-IOMMU", &pThis->hMmio);
1462 AssertRCReturn(rc, rc);
1463
1464 /*
1465 * Register debugger info items.
1466 */
1467 PDMDevHlpDBGFInfoRegister(pDevIns, "iommu", "Display IOMMU state.", dmarR3DbgInfo);
1468
1469#ifdef VBOX_WITH_STATISTICS
1470 /*
1471 * Statistics.
1472 */
1473 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadR3, STAMTYPE_COUNTER, "R3/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in R3");
1474 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadRZ, STAMTYPE_COUNTER, "RZ/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in RZ.");
1475
1476 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteR3, STAMTYPE_COUNTER, "R3/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in R3.");
1477 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteRZ, STAMTYPE_COUNTER, "RZ/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in RZ.");
1478
1479 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapR3, STAMTYPE_COUNTER, "R3/MsiRemap", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in R3.");
1480 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRZ, STAMTYPE_COUNTER, "RZ/MsiRemap", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in RZ.");
1481
1482 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadR3, STAMTYPE_COUNTER, "R3/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in R3.");
1483 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadRZ, STAMTYPE_COUNTER, "RZ/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in RZ.");
1484
1485 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteR3, STAMTYPE_COUNTER, "R3/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in R3.");
1486 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteRZ, STAMTYPE_COUNTER, "RZ/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in RZ.");
1487
1488 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadR3, STAMTYPE_COUNTER, "R3/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in R3.");
1489 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadRZ, STAMTYPE_COUNTER, "RZ/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in RZ.");
1490
1491 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteR3, STAMTYPE_COUNTER, "R3/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in R3.");
1492 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteRZ, STAMTYPE_COUNTER, "RZ/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in RZ.");
1493#endif
1494
1495 /*
1496 * Initialize registers.
1497 */
1498 dmarR3RegsInit(pDevIns);
1499
1500 /*
1501 * Log some of the features exposed to software.
1502 */
1503 uint32_t const uVerReg = pThis->uVerReg;
1504 uint8_t const cMaxGstAddrBits = RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_MGAW) + 1;
1505 uint8_t const cSupGstAddrBits = vtdCapRegGetSagawBits(RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_SAGAW));
1506 uint16_t const offFrcd = RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_FRO);
1507 uint16_t const offIva = RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_IRO);
1508 LogRel(("%s: VER=%u.%u CAP=%#RX64 ECAP=%#RX64 (MGAW=%u bits, SAGAW=%u bits, FRO=%#x, IRO=%#x) mapped at %#RGp\n", DMAR_LOG_PFX,
1509 RT_BF_GET(uVerReg, VTD_BF_VER_REG_MAX), RT_BF_GET(uVerReg, VTD_BF_VER_REG_MIN),
1510 pThis->fCap, pThis->fExtCap, cMaxGstAddrBits, cSupGstAddrBits, offFrcd, offIva, DMAR_MMIO_BASE_PHYSADDR));
1511 return VINF_SUCCESS;
1512}
1513
1514#else
1515
1516/**
1517 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
1518 */
1519static DECLCALLBACK(int) iommuIntelRZConstruct(PPDMDEVINS pDevIns)
1520{
1521 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
1522 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1523 PDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PDMARCC);
1524 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
1525
1526 /* We will use PDM's critical section (via helpers) for the IOMMU device. */
1527 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
1528 AssertRCReturn(rc, rc);
1529
1530 /* Set up the MMIO RZ handlers. */
1531 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, dmarMmioWrite, dmarMmioRead, NULL /* pvUser */);
1532 AssertRCReturn(rc, rc);
1533
1534 /* Set up the IOMMU RZ callbacks. */
1535 PDMIOMMUREGCC IommuReg;
1536 RT_ZERO(IommuReg);
1537 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
1538 IommuReg.idxIommu = pThis->idxIommu;
1539 IommuReg.pfnMemAccess = iommuIntelMemAccess;
1540 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
1541 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
1542 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
1543
1544 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp));
1545 AssertRCReturn(rc, rc);
1546 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp), VERR_IOMMU_IPE_1);
1547 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32Version == CTX_SUFF(PDM_IOMMUHLP)_VERSION, VERR_VERSION_MISMATCH);
1548 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd == CTX_SUFF(PDM_IOMMUHLP)_VERSION, VERR_VERSION_MISMATCH);
1549 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnLock, VERR_INVALID_POINTER);
1550 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnUnlock, VERR_INVALID_POINTER);
1551 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnLockIsOwner, VERR_INVALID_POINTER);
1552 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi, VERR_INVALID_POINTER);
1553
1554 return VINF_SUCCESS;
1555}
1556
1557#endif
1558
1559
1560/**
1561 * The device registration structure.
1562 */
1563PDMDEVREG const g_DeviceIommuIntel =
1564{
1565 /* .u32Version = */ PDM_DEVREG_VERSION,
1566 /* .uReserved0 = */ 0,
1567 /* .szName = */ "iommu-intel",
1568 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
1569 /* .fClass = */ PDM_DEVREG_CLASS_PCI_BUILTIN,
1570 /* .cMaxInstances = */ 1,
1571 /* .uSharedVersion = */ 42,
1572 /* .cbInstanceShared = */ sizeof(DMAR),
1573 /* .cbInstanceCC = */ sizeof(DMARCC),
1574 /* .cbInstanceRC = */ sizeof(DMARRC),
1575 /* .cMaxPciDevices = */ 1,
1576 /* .cMaxMsixVectors = */ 0,
1577 /* .pszDescription = */ "IOMMU (Intel)",
1578#if defined(IN_RING3)
1579 /* .pszRCMod = */ "VBoxDDRC.rc",
1580 /* .pszR0Mod = */ "VBoxDDR0.r0",
1581 /* .pfnConstruct = */ iommuIntelR3Construct,
1582 /* .pfnDestruct = */ iommuIntelR3Destruct,
1583 /* .pfnRelocate = */ NULL,
1584 /* .pfnMemSetup = */ NULL,
1585 /* .pfnPowerOn = */ NULL,
1586 /* .pfnReset = */ iommuIntelR3Reset,
1587 /* .pfnSuspend = */ NULL,
1588 /* .pfnResume = */ NULL,
1589 /* .pfnAttach = */ NULL,
1590 /* .pfnDetach = */ NULL,
1591 /* .pfnQueryInterface = */ NULL,
1592 /* .pfnInitComplete = */ NULL,
1593 /* .pfnPowerOff = */ NULL,
1594 /* .pfnSoftReset = */ NULL,
1595 /* .pfnReserved0 = */ NULL,
1596 /* .pfnReserved1 = */ NULL,
1597 /* .pfnReserved2 = */ NULL,
1598 /* .pfnReserved3 = */ NULL,
1599 /* .pfnReserved4 = */ NULL,
1600 /* .pfnReserved5 = */ NULL,
1601 /* .pfnReserved6 = */ NULL,
1602 /* .pfnReserved7 = */ NULL,
1603#elif defined(IN_RING0)
1604 /* .pfnEarlyConstruct = */ NULL,
1605 /* .pfnConstruct = */ iommuIntelRZConstruct,
1606 /* .pfnDestruct = */ NULL,
1607 /* .pfnFinalDestruct = */ NULL,
1608 /* .pfnRequest = */ NULL,
1609 /* .pfnReserved0 = */ NULL,
1610 /* .pfnReserved1 = */ NULL,
1611 /* .pfnReserved2 = */ NULL,
1612 /* .pfnReserved3 = */ NULL,
1613 /* .pfnReserved4 = */ NULL,
1614 /* .pfnReserved5 = */ NULL,
1615 /* .pfnReserved6 = */ NULL,
1616 /* .pfnReserved7 = */ NULL,
1617#elif defined(IN_RC)
1618 /* .pfnConstruct = */ iommuIntelRZConstruct,
1619 /* .pfnReserved0 = */ NULL,
1620 /* .pfnReserved1 = */ NULL,
1621 /* .pfnReserved2 = */ NULL,
1622 /* .pfnReserved3 = */ NULL,
1623 /* .pfnReserved4 = */ NULL,
1624 /* .pfnReserved5 = */ NULL,
1625 /* .pfnReserved6 = */ NULL,
1626 /* .pfnReserved7 = */ NULL,
1627#else
1628# error "Not in IN_RING3, IN_RING0 or IN_RC!"
1629#endif
1630 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
1631};
1632
1633#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
1634
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette