VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp@ 89216

Last change on this file since 89216 was 89216, checked in by vboxsync, 4 years ago

Intel IOMMU: bugref:9967 Address translation, WIP.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 147.4 KB
Line 
1/* $Id: DevIommuIntel.cpp 89216 2021-05-21 11:17:05Z vboxsync $ */
2/** @file
3 * IOMMU - Input/Output Memory Management Unit - Intel implementation.
4 */
5
6/*
7 * Copyright (C) 2021 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_IOMMU
23#include "VBoxDD.h"
24#include "DevIommuIntel.h"
25
26#include <iprt/mem.h>
27#include <iprt/string.h>
28
29
30/*********************************************************************************************************************************
31* Defined Constants And Macros *
32*********************************************************************************************************************************/
33/** Gets the low uint32_t of a uint64_t or something equivalent.
34 *
35 * This is suitable for casting constants outside code (since RT_LO_U32 can't be
36 * used as it asserts for correctness when compiling on certain compilers). */
37#define DMAR_LO_U32(a) (uint32_t)(UINT32_MAX & (a))
38
39/** Gets the high uint32_t of a uint64_t or something equivalent.
40 *
41 * This is suitable for casting constants outside code (since RT_HI_U32 can't be
42 * used as it asserts for correctness when compiling on certain compilers). */
43#define DMAR_HI_U32(a) (uint32_t)((a) >> 32)
44
45/** Asserts MMIO access' offset and size are valid or returns appropriate error
46 * code suitable for returning from MMIO access handlers. */
47#define DMAR_ASSERT_MMIO_ACCESS_RET(a_off, a_cb) \
48 do { \
49 AssertReturn((a_cb) == 4 || (a_cb) == 8, VINF_IOM_MMIO_UNUSED_FF); \
50 AssertReturn(!((a_off) & ((a_cb) - 1)), VINF_IOM_MMIO_UNUSED_FF); \
51 } while (0)
52
53/** Checks if the MMIO offset is valid. */
54#define DMAR_IS_MMIO_OFF_VALID(a_off) ( (a_off) < DMAR_MMIO_GROUP_0_OFF_END \
55 || (a_off) - DMAR_MMIO_GROUP_1_OFF_FIRST < DMAR_MMIO_GROUP_1_SIZE)
56
57/** Acquires the DMAR lock but returns with the given busy error code on failure. */
58#define DMAR_LOCK_RET(a_pDevIns, a_pThisCC, a_rcBusy) \
59 do { \
60 if ((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), (a_rcBusy)) == VINF_SUCCESS) \
61 { /* likely */ } \
62 else \
63 return (a_rcBusy); \
64 } while (0)
65
66/** Acquires the DMAR lock (not expected to fail). */
67#ifdef IN_RING3
68# define DMAR_LOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VERR_IGNORED)
69#else
70# define DMAR_LOCK(a_pDevIns, a_pThisCC) \
71 do { \
72 int const rcLock = (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VINF_SUCCESS); \
73 AssertRC(rcLock); \
74 } while (0)
75#endif
76
77/** Release the DMAR lock. */
78#define DMAR_UNLOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock(a_pDevIns)
79
80/** Asserts that the calling thread owns the DMAR lock. */
81#define DMAR_ASSERT_LOCK_IS_OWNER(a_pDevIns, a_pThisCC) \
82 do { \
83 Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)); \
84 RT_NOREF1(a_pThisCC); \
85 } while (0)
86
87/** Asserts that the calling thread does not own the DMAR lock. */
88#define DMAR_ASSERT_LOCK_IS_NOT_OWNER(a_pDevIns, a_pThisCC) \
89 do { \
90 Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns) == false); \
91 RT_NOREF1(a_pThisCC); \
92 } while (0)
93
94/** The number of fault recording registers our implementation supports.
95 * Normal guest operation shouldn't trigger faults anyway, so we only support the
96 * minimum number of registers (which is 1).
97 *
98 * See Intel VT-d spec. 10.4.2 "Capability Register" (CAP_REG.NFR). */
99#define DMAR_FRCD_REG_COUNT UINT32_C(1)
100
101/** Offset of first register in group 0. */
102#define DMAR_MMIO_GROUP_0_OFF_FIRST VTD_MMIO_OFF_VER_REG
103/** Offset of last register in group 0 (inclusive). */
104#define DMAR_MMIO_GROUP_0_OFF_LAST VTD_MMIO_OFF_MTRR_PHYSMASK9_REG
105/** Last valid offset in group 0 (exclusive). */
106#define DMAR_MMIO_GROUP_0_OFF_END (DMAR_MMIO_GROUP_0_OFF_LAST + 8 /* sizeof MTRR_PHYSMASK9_REG */)
107/** Size of the group 0 (in bytes). */
108#define DMAR_MMIO_GROUP_0_SIZE (DMAR_MMIO_GROUP_0_OFF_END - DMAR_MMIO_GROUP_0_OFF_FIRST)
109/**< Implementation-specific MMIO offset of IVA_REG. */
110#define DMAR_MMIO_OFF_IVA_REG 0xe50
111/**< Implementation-specific MMIO offset of IOTLB_REG. */
112#define DMAR_MMIO_OFF_IOTLB_REG 0xe58
113/**< Implementation-specific MMIO offset of FRCD_LO_REG. */
114#define DMAR_MMIO_OFF_FRCD_LO_REG 0xe70
115/**< Implementation-specific MMIO offset of FRCD_HI_REG. */
116#define DMAR_MMIO_OFF_FRCD_HI_REG 0xe78
117AssertCompile(!(DMAR_MMIO_OFF_FRCD_LO_REG & 0xf));
118
119/** Offset of first register in group 1. */
120#define DMAR_MMIO_GROUP_1_OFF_FIRST VTD_MMIO_OFF_VCCAP_REG
121/** Offset of last register in group 1 (inclusive). */
122#define DMAR_MMIO_GROUP_1_OFF_LAST (DMAR_MMIO_OFF_FRCD_LO_REG + 8) * DMAR_FRCD_REG_COUNT
123/** Last valid offset in group 1 (exclusive). */
124#define DMAR_MMIO_GROUP_1_OFF_END (DMAR_MMIO_GROUP_1_OFF_LAST + 8 /* sizeof FRCD_HI_REG */)
125/** Size of the group 1 (in bytes). */
126#define DMAR_MMIO_GROUP_1_SIZE (DMAR_MMIO_GROUP_1_OFF_END - DMAR_MMIO_GROUP_1_OFF_FIRST)
127
128/** DMAR implementation's major version number (exposed to software).
129 * We report 6 as the major version since we support queued-invalidations as
130 * software may make assumptions based on that.
131 *
132 * See Intel VT-d spec. 10.4.7 "Context Command Register" (CCMD_REG.CAIG). */
133#define DMAR_VER_MAJOR 6
134/** DMAR implementation's minor version number (exposed to software). */
135#define DMAR_VER_MINOR 0
136
137/** Release log prefix string. */
138#define DMAR_LOG_PFX "Intel-IOMMU"
139/** The current saved state version. */
140#define DMAR_SAVED_STATE_VERSION 1
141
142
143/*********************************************************************************************************************************
144* Structures and Typedefs *
145*********************************************************************************************************************************/
146/**
147 * DMAR error diagnostics.
148 * Sorted alphabetically so it's easier to add and locate items, no other reason.
149 *
150 * @note Members of this enum are used as array indices, so no gaps in enum
151 * values are not allowed. Update g_apszDmarDiagDesc when you modify
152 * fields in this enum.
153 */
154typedef enum
155{
156 kDmarDiag_None = 0,
157 kDmarDiag_CcmdReg_NotSupported,
158 kDmarDiag_CcmdReg_Qi_Enabled,
159 kDmarDiag_CcmdReg_Ttm_Invalid,
160 kDmarDiag_IqaReg_Dsc_Fetch_Error,
161 kDmarDiag_IqaReg_Dw_128_Invalid,
162 kDmarDiag_IqaReg_Dw_256_Invalid,
163 kDmarDiag_Iqei_Dsc_Type_Invalid,
164 kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd,
165 kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd,
166 kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid,
167 kDmarDiag_Iqei_Ttm_Rsvd,
168 kDmarDiag_IqtReg_Qt_Invalid,
169 kDmarDiag_IqtReg_Qt_NotAligned,
170 kDmarDiag_Ir_Cfi_Blocked,
171 kDmarDiag_Ir_Rfi_Intr_Index_Invalid,
172 kDmarDiag_Ir_Rfi_Irte_Mode_Invalid,
173 kDmarDiag_Ir_Rfi_Irte_Not_Present,
174 kDmarDiag_Ir_Rfi_Irte_Read_Failed,
175 kDmarDiag_Ir_Rfi_Irte_Rsvd,
176 kDmarDiag_Ir_Rfi_Irte_Svt_Bus,
177 kDmarDiag_Ir_Rfi_Irte_Svt_Masked,
178 kDmarDiag_Ir_Rfi_Irte_Svt_Rsvd,
179 kDmarDiag_Ir_Rfi_Rsvd,
180 /* Member for determining array index limit. */
181 kDmarDiag_End,
182 /* Type size hack. */
183 kDmarDiag_32Bit_Hack = 0x7fffffff
184} DMARDIAG;
185AssertCompileSize(DMARDIAG, 4);
186
187/** DMAR diagnostic enum description expansion.
188 * The below construct ensures typos in the input to this macro are caught
189 * during compile time. */
190#define DMARDIAG_DESC(a_Name) RT_CONCAT(kDmarDiag_, a_Name) < kDmarDiag_End ? RT_STR(a_Name) : "Ignored"
191
192/** DMAR diagnostics description for members in DMARDIAG. */
193static const char *const g_apszDmarDiagDesc[] =
194{
195 DMARDIAG_DESC(None ),
196 DMARDIAG_DESC(CcmdReg_NotSupported ),
197 DMARDIAG_DESC(CcmdReg_Qi_Enabled ),
198 DMARDIAG_DESC(CcmdReg_Ttm_Invalid ),
199 DMARDIAG_DESC(IqaReg_Dsc_Fetch_Error ),
200 DMARDIAG_DESC(IqaReg_Dw_128_Invalid ),
201 DMARDIAG_DESC(IqaReg_Dw_256_Invalid ),
202 DMARDIAG_DESC(Iqei_Dsc_Type_Invalid ),
203 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_0_1_Rsvd),
204 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_2_3_Rsvd),
205 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_Invalid ),
206 DMARDIAG_DESC(Iqei_Ttm_Rsvd ),
207 DMARDIAG_DESC(IqtReg_Qt_Invalid ),
208 DMARDIAG_DESC(IqtReg_Qt_NotAligned ),
209 DMARDIAG_DESC(Ir_Cfi_Blocked ),
210 DMARDIAG_DESC(Ir_Rfi_Intr_Index_Invalid ),
211 DMARDIAG_DESC(Ir_Rfi_Irte_Mode_Invalid ),
212 DMARDIAG_DESC(Ir_Rfi_Irte_Not_Present ),
213 DMARDIAG_DESC(Ir_Rfi_Irte_Read_Failed ),
214 DMARDIAG_DESC(Ir_Rfi_Irte_Rsvd ),
215 DMARDIAG_DESC(Ir_Rfi_Irte_Svt_Bus ),
216 DMARDIAG_DESC(Ir_Rfi_Irte_Svt_Masked ),
217 DMARDIAG_DESC(Ir_Rfi_Irte_Svt_Rsvd ),
218 DMARDIAG_DESC(Ir_Rfi_Rsvd ),
219 /* kDmarDiag_End */
220};
221AssertCompile(RT_ELEMENTS(g_apszDmarDiagDesc) == kDmarDiag_End);
222#undef DMARDIAG_DESC
223
224/**
225 * The shared DMAR device state.
226 */
227typedef struct DMAR
228{
229 /** IOMMU device index. */
230 uint32_t idxIommu;
231 /** DMAR magic. */
232 uint32_t u32Magic;
233
234 /** Registers (group 0). */
235 uint8_t abRegs0[DMAR_MMIO_GROUP_0_SIZE];
236 /** Registers (group 1). */
237 uint8_t abRegs1[DMAR_MMIO_GROUP_1_SIZE];
238
239 /** @name Lazily activated registers.
240 * These are the active values for lazily activated registers. Software is free to
241 * modify the actual register values while remapping/translation is enabled but they
242 * take effect only when explicitly signaled by software, hence we need to hold the
243 * active values separately.
244 * @{ */
245 /** Currently active IRTA_REG. */
246 uint64_t uIrtaReg;
247 /** Currently active RTADDR_REG. */
248 uint64_t uRtaddrReg;
249 /** @} */
250
251 /** @name Register copies for a tiny bit faster and more convenient access.
252 * @{ */
253 /** Copy of VER_REG. */
254 uint8_t uVerReg;
255 /** Alignment. */
256 uint8_t abPadding[7];
257 /** Copy of CAP_REG. */
258 uint64_t fCapReg;
259 /** Copy of ECAP_REG. */
260 uint64_t fExtCapReg;
261 /** @} */
262
263 /** The event semaphore the invalidation-queue thread waits on. */
264 SUPSEMEVENT hEvtInvQueue;
265 /** Padding. */
266 uint32_t uPadding0;
267 /** Error diagnostic. */
268 DMARDIAG enmDiag;
269 /** The MMIO handle. */
270 IOMMMIOHANDLE hMmio;
271
272#ifdef VBOX_WITH_STATISTICS
273 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */
274 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */
275 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */
276 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */
277
278 STAMCOUNTER StatMsiRemapCfiR3; /**< Number of compatibility-format interrupts remap requests in R3. */
279 STAMCOUNTER StatMsiRemapCfiRZ; /**< Number of compatibility-format interrupts remap requests in RZ. */
280 STAMCOUNTER StatMsiRemapRfiR3; /**< Number of remappable-format interrupts remap requests in R3. */
281 STAMCOUNTER StatMsiRemapRfiRZ; /**< Number of remappable-format interrupts remap requests in RZ. */
282
283 STAMCOUNTER StatMemReadR3; /**< Number of memory read translation requests in R3. */
284 STAMCOUNTER StatMemReadRZ; /**< Number of memory read translation requests in RZ. */
285 STAMCOUNTER StatMemWriteR3; /**< Number of memory write translation requests in R3. */
286 STAMCOUNTER StatMemWriteRZ; /**< Number of memory write translation requests in RZ. */
287
288 STAMCOUNTER StatMemBulkReadR3; /**< Number of memory read bulk translation requests in R3. */
289 STAMCOUNTER StatMemBulkReadRZ; /**< Number of memory read bulk translation requests in RZ. */
290 STAMCOUNTER StatMemBulkWriteR3; /**< Number of memory write bulk translation requests in R3. */
291 STAMCOUNTER StatMemBulkWriteRZ; /**< Number of memory write bulk translation requests in RZ. */
292
293 STAMCOUNTER StatCcInvDsc; /**< Number of Context-cache descriptors processed. */
294 STAMCOUNTER StatIotlbInvDsc; /**< Number of IOTLB descriptors processed. */
295 STAMCOUNTER StatDevtlbInvDsc; /**< Number of Device-TLB descriptors processed. */
296 STAMCOUNTER StatIecInvDsc; /**< Number of Interrupt-Entry cache descriptors processed. */
297 STAMCOUNTER StatInvWaitDsc; /**< Number of Invalidation wait descriptors processed. */
298 STAMCOUNTER StatPasidIotlbInvDsc; /**< Number of PASID-based IOTLB descriptors processed. */
299 STAMCOUNTER StatPasidCacheInvDsc; /**< Number of PASID-cache descriptors processed. */
300 STAMCOUNTER StatPasidDevtlbInvDsc; /**< Number of PASID-based device-TLB descriptors processed. */
301#endif
302} DMAR;
303/** Pointer to the DMAR device state. */
304typedef DMAR *PDMAR;
305/** Pointer to the const DMAR device state. */
306typedef DMAR const *PCDMAR;
307AssertCompileMemberAlignment(DMAR, abRegs0, 8);
308AssertCompileMemberAlignment(DMAR, abRegs1, 8);
309
310/**
311 * The ring-3 DMAR device state.
312 */
313typedef struct DMARR3
314{
315 /** Device instance. */
316 PPDMDEVINSR3 pDevInsR3;
317 /** The IOMMU helper. */
318 R3PTRTYPE(PCPDMIOMMUHLPR3) pIommuHlpR3;
319 /** The invalidation-queue thread. */
320 R3PTRTYPE(PPDMTHREAD) pInvQueueThread;
321} DMARR3;
322/** Pointer to the ring-3 DMAR device state. */
323typedef DMARR3 *PDMARR3;
324/** Pointer to the const ring-3 DMAR device state. */
325typedef DMARR3 const *PCDMARR3;
326
327/**
328 * The ring-0 DMAR device state.
329 */
330typedef struct DMARR0
331{
332 /** Device instance. */
333 PPDMDEVINSR0 pDevInsR0;
334 /** The IOMMU helper. */
335 R0PTRTYPE(PCPDMIOMMUHLPR0) pIommuHlpR0;
336} DMARR0;
337/** Pointer to the ring-0 IOMMU device state. */
338typedef DMARR0 *PDMARR0;
339/** Pointer to the const ring-0 IOMMU device state. */
340typedef DMARR0 const *PCDMARR0;
341
342/**
343 * The raw-mode DMAR device state.
344 */
345typedef struct DMARRC
346{
347 /** Device instance. */
348 PPDMDEVINSRC pDevInsRC;
349 /** The IOMMU helper. */
350 RCPTRTYPE(PCPDMIOMMUHLPRC) pIommuHlpRC;
351} DMARRC;
352/** Pointer to the raw-mode DMAR device state. */
353typedef DMARRC *PDMARRC;
354/** Pointer to the const raw-mode DMAR device state. */
355typedef DMARRC const *PCIDMARRC;
356
357/** The DMAR device state for the current context. */
358typedef CTX_SUFF(DMAR) DMARCC;
359/** Pointer to the DMAR device state for the current context. */
360typedef CTX_SUFF(PDMAR) PDMARCC;
361/** Pointer to the const DMAR device state for the current context. */
362typedef CTX_SUFF(PDMAR) const PCDMARCC;
363
364/**
365 * Type of DMAR originated events that generate interrupts.
366 */
367typedef enum DMAREVENTTYPE
368{
369 /** Invalidation completion event. */
370 DMAREVENTTYPE_INV_COMPLETE = 0,
371 /** Fault event. */
372 DMAREVENTTYPE_FAULT
373} DMAREVENTTYPE;
374
375
376/*********************************************************************************************************************************
377* Global Variables *
378*********************************************************************************************************************************/
379/**
380 * Read-write masks for DMAR registers (group 0).
381 */
382static uint32_t const g_au32RwMasks0[] =
383{
384 /* Offset Register Low High */
385 /* 0x000 VER_REG */ VTD_VER_REG_RW_MASK,
386 /* 0x004 Reserved */ 0,
387 /* 0x008 CAP_REG */ DMAR_LO_U32(VTD_CAP_REG_RW_MASK), DMAR_HI_U32(VTD_CAP_REG_RW_MASK),
388 /* 0x010 ECAP_REG */ DMAR_LO_U32(VTD_ECAP_REG_RW_MASK), DMAR_HI_U32(VTD_ECAP_REG_RW_MASK),
389 /* 0x018 GCMD_REG */ VTD_GCMD_REG_RW_MASK,
390 /* 0x01c GSTS_REG */ VTD_GSTS_REG_RW_MASK,
391 /* 0x020 RTADDR_REG */ DMAR_LO_U32(VTD_RTADDR_REG_RW_MASK), DMAR_HI_U32(VTD_RTADDR_REG_RW_MASK),
392 /* 0x028 CCMD_REG */ DMAR_LO_U32(VTD_CCMD_REG_RW_MASK), DMAR_HI_U32(VTD_CCMD_REG_RW_MASK),
393 /* 0x030 Reserved */ 0,
394 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW_MASK,
395 /* 0x038 FECTL_REG */ VTD_FECTL_REG_RW_MASK,
396 /* 0x03c FEDATA_REG */ VTD_FEDATA_REG_RW_MASK,
397 /* 0x040 FEADDR_REG */ VTD_FEADDR_REG_RW_MASK,
398 /* 0x044 FEUADDR_REG */ VTD_FEUADDR_REG_RW_MASK,
399 /* 0x048 Reserved */ 0, 0,
400 /* 0x050 Reserved */ 0, 0,
401 /* 0x058 AFLOG_REG */ DMAR_LO_U32(VTD_AFLOG_REG_RW_MASK), DMAR_HI_U32(VTD_AFLOG_REG_RW_MASK),
402 /* 0x060 Reserved */ 0,
403 /* 0x064 PMEN_REG */ 0, /* RO as we don't support PLMR and PHMR. */
404 /* 0x068 PLMBASE_REG */ 0, /* RO as we don't support PLMR. */
405 /* 0x06c PLMLIMIT_REG */ 0, /* RO as we don't support PLMR. */
406 /* 0x070 PHMBASE_REG */ 0, 0, /* RO as we don't support PHMR. */
407 /* 0x078 PHMLIMIT_REG */ 0, 0, /* RO as we don't support PHMR. */
408 /* 0x080 IQH_REG */ DMAR_LO_U32(VTD_IQH_REG_RW_MASK), DMAR_HI_U32(VTD_IQH_REG_RW_MASK),
409 /* 0x088 IQT_REG */ DMAR_LO_U32(VTD_IQT_REG_RW_MASK), DMAR_HI_U32(VTD_IQT_REG_RW_MASK),
410 /* 0x090 IQA_REG */ DMAR_LO_U32(VTD_IQA_REG_RW_MASK), DMAR_HI_U32(VTD_IQA_REG_RW_MASK),
411 /* 0x098 Reserved */ 0,
412 /* 0x09c ICS_REG */ VTD_ICS_REG_RW_MASK,
413 /* 0x0a0 IECTL_REG */ VTD_IECTL_REG_RW_MASK,
414 /* 0x0a4 IEDATA_REG */ VTD_IEDATA_REG_RW_MASK,
415 /* 0x0a8 IEADDR_REG */ VTD_IEADDR_REG_RW_MASK,
416 /* 0x0ac IEUADDR_REG */ VTD_IEUADDR_REG_RW_MASK,
417 /* 0x0b0 IQERCD_REG */ DMAR_LO_U32(VTD_IQERCD_REG_RW_MASK), DMAR_HI_U32(VTD_IQERCD_REG_RW_MASK),
418 /* 0x0b8 IRTA_REG */ DMAR_LO_U32(VTD_IRTA_REG_RW_MASK), DMAR_HI_U32(VTD_IRTA_REG_RW_MASK),
419 /* 0x0c0 PQH_REG */ DMAR_LO_U32(VTD_PQH_REG_RW_MASK), DMAR_HI_U32(VTD_PQH_REG_RW_MASK),
420 /* 0x0c8 PQT_REG */ DMAR_LO_U32(VTD_PQT_REG_RW_MASK), DMAR_HI_U32(VTD_PQT_REG_RW_MASK),
421 /* 0x0d0 PQA_REG */ DMAR_LO_U32(VTD_PQA_REG_RW_MASK), DMAR_HI_U32(VTD_PQA_REG_RW_MASK),
422 /* 0x0d8 Reserved */ 0,
423 /* 0x0dc PRS_REG */ VTD_PRS_REG_RW_MASK,
424 /* 0x0e0 PECTL_REG */ VTD_PECTL_REG_RW_MASK,
425 /* 0x0e4 PEDATA_REG */ VTD_PEDATA_REG_RW_MASK,
426 /* 0x0e8 PEADDR_REG */ VTD_PEADDR_REG_RW_MASK,
427 /* 0x0ec PEUADDR_REG */ VTD_PEUADDR_REG_RW_MASK,
428 /* 0x0f0 Reserved */ 0, 0,
429 /* 0x0f8 Reserved */ 0, 0,
430 /* 0x100 MTRRCAP_REG */ DMAR_LO_U32(VTD_MTRRCAP_REG_RW_MASK), DMAR_HI_U32(VTD_MTRRCAP_REG_RW_MASK),
431 /* 0x108 MTRRDEF_REG */ 0, 0, /* RO as we don't support MTS. */
432 /* 0x110 Reserved */ 0, 0,
433 /* 0x118 Reserved */ 0, 0,
434 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0, /* RO as we don't support MTS. */
435 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
436 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
437 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
438 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
439 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
440 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
441 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
442 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
443 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
444 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
445 /* 0x178 Reserved */ 0, 0,
446 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0, /* RO as we don't support MTS. */
447 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
448 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
449 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
450 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
451 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
452 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
453 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
454 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
455 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
456 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
457 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
458 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
459 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
460 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
461 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
462 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
463 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
464 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
465 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
466};
467AssertCompile(sizeof(g_au32RwMasks0) == DMAR_MMIO_GROUP_0_SIZE);
468
469/**
470 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 0).
471 */
472static uint32_t const g_au32Rw1cMasks0[] =
473{
474 /* Offset Register Low High */
475 /* 0x000 VER_REG */ 0,
476 /* 0x004 Reserved */ 0,
477 /* 0x008 CAP_REG */ 0, 0,
478 /* 0x010 ECAP_REG */ 0, 0,
479 /* 0x018 GCMD_REG */ 0,
480 /* 0x01c GSTS_REG */ 0,
481 /* 0x020 RTADDR_REG */ 0, 0,
482 /* 0x028 CCMD_REG */ 0, 0,
483 /* 0x030 Reserved */ 0,
484 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW1C_MASK,
485 /* 0x038 FECTL_REG */ 0,
486 /* 0x03c FEDATA_REG */ 0,
487 /* 0x040 FEADDR_REG */ 0,
488 /* 0x044 FEUADDR_REG */ 0,
489 /* 0x048 Reserved */ 0, 0,
490 /* 0x050 Reserved */ 0, 0,
491 /* 0x058 AFLOG_REG */ 0, 0,
492 /* 0x060 Reserved */ 0,
493 /* 0x064 PMEN_REG */ 0,
494 /* 0x068 PLMBASE_REG */ 0,
495 /* 0x06c PLMLIMIT_REG */ 0,
496 /* 0x070 PHMBASE_REG */ 0, 0,
497 /* 0x078 PHMLIMIT_REG */ 0, 0,
498 /* 0x080 IQH_REG */ 0, 0,
499 /* 0x088 IQT_REG */ 0, 0,
500 /* 0x090 IQA_REG */ 0, 0,
501 /* 0x098 Reserved */ 0,
502 /* 0x09c ICS_REG */ VTD_ICS_REG_RW1C_MASK,
503 /* 0x0a0 IECTL_REG */ 0,
504 /* 0x0a4 IEDATA_REG */ 0,
505 /* 0x0a8 IEADDR_REG */ 0,
506 /* 0x0ac IEUADDR_REG */ 0,
507 /* 0x0b0 IQERCD_REG */ 0, 0,
508 /* 0x0b8 IRTA_REG */ 0, 0,
509 /* 0x0c0 PQH_REG */ 0, 0,
510 /* 0x0c8 PQT_REG */ 0, 0,
511 /* 0x0d0 PQA_REG */ 0, 0,
512 /* 0x0d8 Reserved */ 0,
513 /* 0x0dc PRS_REG */ 0,
514 /* 0x0e0 PECTL_REG */ 0,
515 /* 0x0e4 PEDATA_REG */ 0,
516 /* 0x0e8 PEADDR_REG */ 0,
517 /* 0x0ec PEUADDR_REG */ 0,
518 /* 0x0f0 Reserved */ 0, 0,
519 /* 0x0f8 Reserved */ 0, 0,
520 /* 0x100 MTRRCAP_REG */ 0, 0,
521 /* 0x108 MTRRDEF_REG */ 0, 0,
522 /* 0x110 Reserved */ 0, 0,
523 /* 0x118 Reserved */ 0, 0,
524 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0,
525 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
526 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
527 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
528 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
529 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
530 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
531 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
532 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
533 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
534 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
535 /* 0x178 Reserved */ 0, 0,
536 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0,
537 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
538 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
539 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
540 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
541 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
542 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
543 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
544 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
545 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
546 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
547 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
548 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
549 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
550 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
551 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
552 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
553 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
554 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
555 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
556};
557AssertCompile(sizeof(g_au32Rw1cMasks0) == DMAR_MMIO_GROUP_0_SIZE);
558
559/**
560 * Read-write masks for DMAR registers (group 1).
561 */
562static uint32_t const g_au32RwMasks1[] =
563{
564 /* Offset Register Low High */
565 /* 0xe00 VCCAP_REG */ DMAR_LO_U32(VTD_VCCAP_REG_RW_MASK), DMAR_HI_U32(VTD_VCCAP_REG_RW_MASK),
566 /* 0xe08 VCMD_EO_REG */ DMAR_LO_U32(VTD_VCMD_EO_REG_RW_MASK), DMAR_HI_U32(VTD_VCMD_EO_REG_RW_MASK),
567 /* 0xe10 VCMD_REG */ 0, 0, /* RO: VCS not supported. */
568 /* 0xe18 VCMDRSVD_REG */ 0, 0,
569 /* 0xe20 VCRSP_REG */ 0, 0, /* RO: VCS not supported. */
570 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
571 /* 0xe30 Reserved */ 0, 0,
572 /* 0xe38 Reserved */ 0, 0,
573 /* 0xe40 Reserved */ 0, 0,
574 /* 0xe48 Reserved */ 0, 0,
575 /* 0xe50 IVA_REG */ DMAR_LO_U32(VTD_IVA_REG_RW_MASK), DMAR_HI_U32(VTD_IVA_REG_RW_MASK),
576 /* 0xe58 IOTLB_REG */ DMAR_LO_U32(VTD_IOTLB_REG_RW_MASK), DMAR_HI_U32(VTD_IOTLB_REG_RW_MASK),
577 /* 0xe60 Reserved */ 0, 0,
578 /* 0xe68 Reserved */ 0, 0,
579 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW_MASK),
580 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW_MASK),
581};
582AssertCompile(sizeof(g_au32RwMasks1) == DMAR_MMIO_GROUP_1_SIZE);
583AssertCompile((DMAR_MMIO_OFF_FRCD_LO_REG - DMAR_MMIO_GROUP_1_OFF_FIRST) + DMAR_FRCD_REG_COUNT * 2 * sizeof(uint64_t) );
584
585/**
586 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 1).
587 */
588static uint32_t const g_au32Rw1cMasks1[] =
589{
590 /* Offset Register Low High */
591 /* 0xe00 VCCAP_REG */ 0, 0,
592 /* 0xe08 VCMD_EO_REG */ 0, 0,
593 /* 0xe10 VCMD_REG */ 0, 0,
594 /* 0xe18 VCMDRSVD_REG */ 0, 0,
595 /* 0xe20 VCRSP_REG */ 0, 0,
596 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
597 /* 0xe30 Reserved */ 0, 0,
598 /* 0xe38 Reserved */ 0, 0,
599 /* 0xe40 Reserved */ 0, 0,
600 /* 0xe48 Reserved */ 0, 0,
601 /* 0xe50 IVA_REG */ 0, 0,
602 /* 0xe58 IOTLB_REG */ 0, 0,
603 /* 0xe60 Reserved */ 0, 0,
604 /* 0xe68 Reserved */ 0, 0,
605 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW1C_MASK),
606 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW1C_MASK),
607};
608AssertCompile(sizeof(g_au32Rw1cMasks1) == DMAR_MMIO_GROUP_1_SIZE);
609
610/** Array of RW masks for each register group. */
611static uint8_t const *g_apbRwMasks[] = { (uint8_t *)&g_au32RwMasks0[0], (uint8_t *)&g_au32RwMasks1[0] };
612
613/** Array of RW1C masks for each register group. */
614static uint8_t const *g_apbRw1cMasks[] = { (uint8_t *)&g_au32Rw1cMasks0[0], (uint8_t *)&g_au32Rw1cMasks1[0] };
615
616/* Masks arrays must be identical in size (even bounds checking code assumes this). */
617AssertCompile(sizeof(g_apbRw1cMasks) == sizeof(g_apbRwMasks));
618
619
620#ifndef VBOX_DEVICE_STRUCT_TESTCASE
621/** @todo Add IOMMU struct size/alignment verification, see
622 * Devices/testcase/Makefile.kmk and
623 * Devices/testcase/tstDeviceStructSize[RC].cpp */
624
625/**
626 * Returns the number of supported adjusted guest-address width (SAGAW) in bits
627 * given a CAP_REG.SAGAW value.
628 *
629 * @returns Number of SAGAW bits.
630 * @param uSagaw The CAP_REG.SAGAW value.
631 */
632static uint8_t vtdCapRegGetSagawBits(uint8_t uSagaw)
633{
634 if (RT_LIKELY(uSagaw > 0 && uSagaw < 4))
635 return 30 + (uSagaw * 9);
636 return 0;
637}
638
639
640/**
641 * Returns the supported adjusted guest-address width (SAGAW) given the maximum
642 * guest address width (MGAW).
643 *
644 * @returns The CAP_REG.SAGAW value.
645 * @param uMgaw The CAP_REG.MGAW value.
646 */
647static uint8_t vtdCapRegGetSagaw(uint8_t uMgaw)
648{
649 switch (uMgaw + 1)
650 {
651 case 39: return 1;
652 case 48: return 2;
653 case 57: return 3;
654 }
655 return 0;
656}
657
658
659/**
660 * Returns whether the interrupt remapping fault is qualified or not.
661 *
662 * @returns @c true if qualified, @c false otherwise.
663 * @param enmIntrFault The interrupt remapping fault condition.
664 */
665static bool vtdIrFaultIsQualified(VTDINTRFAULT enmIntrFault)
666{
667 switch (enmIntrFault)
668 {
669 case VTDINTRFAULT_IRTE_NOT_PRESENT:
670 case VTDINTRFAULT_IRTE_PRESENT_RSVD:
671 case VTDINTRFAULT_IRTE_PRESENT_INVALID:
672 case VTDINTRFAULT_PID_READ_FAILED:
673 case VTDINTRFAULT_PID_RSVD:
674 return true;
675 default:
676 return false;
677 }
678}
679
680
681/**
682 * Returns table translation mode's descriptive name.
683 *
684 * @returns The descriptive name.
685 * @param uTtm The RTADDR_REG.TTM value.
686 */
687static const char* vtdRtaddrRegGetTtmDesc(uint8_t uTtm)
688{
689 Assert(!(uTtm & 3));
690 static const char* s_apszTtmNames[] =
691 {
692 "Legacy Mode",
693 "Scalable Mode",
694 "Reserved",
695 "Abort-DMA Mode"
696 };
697 return s_apszTtmNames[uTtm & (RT_ELEMENTS(s_apszTtmNames) - 1)];
698}
699
700
701/**
702 * Gets the index of the group the register belongs to given its MMIO offset.
703 *
704 * @returns The group index.
705 * @param offReg The MMIO offset of the register.
706 * @param cbReg The size of the access being made (for bounds checking on
707 * debug builds).
708 */
709DECLINLINE(uint8_t) dmarRegGetGroupIndex(uint16_t offReg, uint8_t cbReg)
710{
711 uint16_t const offLast = offReg + cbReg - 1;
712 AssertCompile(DMAR_MMIO_GROUP_0_OFF_FIRST == 0);
713 AssertMsg(DMAR_IS_MMIO_OFF_VALID(offLast), ("off=%#x cb=%u\n", offReg, cbReg));
714 return !(offLast < DMAR_MMIO_GROUP_0_OFF_END);
715}
716
717
718/**
719 * Gets the group the register belongs to given its MMIO offset.
720 *
721 * @returns Pointer to the first element of the register group.
722 * @param pThis The shared DMAR device state.
723 * @param offReg The MMIO offset of the register.
724 * @param cbReg The size of the access being made (for bounds checking on
725 * debug builds).
726 * @param pIdxGroup Where to store the index of the register group the register
727 * belongs to.
728 */
729DECLINLINE(uint8_t *) dmarRegGetGroup(PDMAR pThis, uint16_t offReg, uint8_t cbReg, uint8_t *pIdxGroup)
730{
731 *pIdxGroup = dmarRegGetGroupIndex(offReg, cbReg);
732 uint8_t *apbRegs[] = { &pThis->abRegs0[0], &pThis->abRegs1[0] };
733 return apbRegs[*pIdxGroup];
734}
735
736
737/**
738 * Const/read-only version of dmarRegGetGroup.
739 *
740 * @copydoc dmarRegGetGroup
741 */
742DECLINLINE(uint8_t const*) dmarRegGetGroupRo(PCDMAR pThis, uint16_t offReg, uint8_t cbReg, uint8_t *pIdxGroup)
743{
744 *pIdxGroup = dmarRegGetGroupIndex(offReg, cbReg);
745 uint8_t const *apbRegs[] = { &pThis->abRegs0[0], &pThis->abRegs1[0] };
746 return apbRegs[*pIdxGroup];
747}
748
749
750/**
751 * Writes a 32-bit register with the exactly the supplied value.
752 *
753 * @param pThis The shared DMAR device state.
754 * @param offReg The MMIO offset of the register.
755 * @param uReg The 32-bit value to write.
756 */
757static void dmarRegWriteRaw32(PDMAR pThis, uint16_t offReg, uint32_t uReg)
758{
759 uint8_t idxGroup;
760 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint32_t), &idxGroup);
761 NOREF(idxGroup);
762 *(uint32_t *)(pabRegs + offReg) = uReg;
763}
764
765
766/**
767 * Writes a 64-bit register with the exactly the supplied value.
768 *
769 * @param pThis The shared DMAR device state.
770 * @param offReg The MMIO offset of the register.
771 * @param uReg The 64-bit value to write.
772 */
773static void dmarRegWriteRaw64(PDMAR pThis, uint16_t offReg, uint64_t uReg)
774{
775 uint8_t idxGroup;
776 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint64_t), &idxGroup);
777 NOREF(idxGroup);
778 *(uint64_t *)(pabRegs + offReg) = uReg;
779}
780
781
782/**
783 * Reads a 32-bit register with exactly the value it contains.
784 *
785 * @returns The raw register value.
786 * @param pThis The shared DMAR device state.
787 * @param offReg The MMIO offset of the register.
788 */
789static uint32_t dmarRegReadRaw32(PCDMAR pThis, uint16_t offReg)
790{
791 uint8_t idxGroup;
792 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint32_t), &idxGroup);
793 NOREF(idxGroup);
794 return *(uint32_t *)(pabRegs + offReg);
795}
796
797
798/**
799 * Reads a 64-bit register with exactly the value it contains.
800 *
801 * @returns The raw register value.
802 * @param pThis The shared DMAR device state.
803 * @param offReg The MMIO offset of the register.
804 */
805static uint64_t dmarRegReadRaw64(PCDMAR pThis, uint16_t offReg)
806{
807 uint8_t idxGroup;
808 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint64_t), &idxGroup);
809 NOREF(idxGroup);
810 return *(uint64_t *)(pabRegs + offReg);
811}
812
813
814/**
815 * Reads a 32-bit register with exactly the value it contains along with their
816 * corresponding masks
817 *
818 * @param pThis The shared DMAR device state.
819 * @param offReg The MMIO offset of the register.
820 * @param puReg Where to store the raw 32-bit register value.
821 * @param pfRwMask Where to store the RW mask corresponding to this register.
822 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
823 */
824static void dmarRegReadRaw32Ex(PCDMAR pThis, uint16_t offReg, uint32_t *puReg, uint32_t *pfRwMask, uint32_t *pfRw1cMask)
825{
826 uint8_t idxGroup;
827 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint32_t), &idxGroup);
828 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
829 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
830 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
831 *puReg = *(uint32_t *)(pabRegs + offReg);
832 *pfRwMask = *(uint32_t *)(pabRwMasks + offReg);
833 *pfRw1cMask = *(uint32_t *)(pabRw1cMasks + offReg);
834}
835
836
837/**
838 * Reads a 64-bit register with exactly the value it contains along with their
839 * corresponding masks.
840 *
841 * @param pThis The shared DMAR device state.
842 * @param offReg The MMIO offset of the register.
843 * @param puReg Where to store the raw 64-bit register value.
844 * @param pfRwMask Where to store the RW mask corresponding to this register.
845 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
846 */
847static void dmarRegReadRaw64Ex(PCDMAR pThis, uint16_t offReg, uint64_t *puReg, uint64_t *pfRwMask, uint64_t *pfRw1cMask)
848{
849 uint8_t idxGroup;
850 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint64_t), &idxGroup);
851 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
852 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
853 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
854 *puReg = *(uint64_t *)(pabRegs + offReg);
855 *pfRwMask = *(uint64_t *)(pabRwMasks + offReg);
856 *pfRw1cMask = *(uint64_t *)(pabRw1cMasks + offReg);
857}
858
859
860/**
861 * Writes a 32-bit register as it would be when written by software.
862 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
863 *
864 * @returns The value that's actually written to the register.
865 * @param pThis The shared DMAR device state.
866 * @param offReg The MMIO offset of the register.
867 * @param uReg The 32-bit value to write.
868 * @param puPrev Where to store the register value prior to writing.
869 */
870static uint32_t dmarRegWrite32(PDMAR pThis, uint16_t offReg, uint32_t uReg, uint32_t *puPrev)
871{
872 /* Read current value from the 32-bit register. */
873 uint32_t uCurReg;
874 uint32_t fRwMask;
875 uint32_t fRw1cMask;
876 dmarRegReadRaw32Ex(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
877 *puPrev = uCurReg;
878
879 uint32_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
880 uint32_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
881 uint32_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
882 uint32_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
883
884 /* Write new value to the 32-bit register. */
885 dmarRegWriteRaw32(pThis, offReg, uNewReg);
886 return uNewReg;
887}
888
889
890/**
891 * Writes a 64-bit register as it would be when written by software.
892 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
893 *
894 * @returns The value that's actually written to the register.
895 * @param pThis The shared DMAR device state.
896 * @param offReg The MMIO offset of the register.
897 * @param uReg The 64-bit value to write.
898 * @param puPrev Where to store the register value prior to writing.
899 */
900static uint64_t dmarRegWrite64(PDMAR pThis, uint16_t offReg, uint64_t uReg, uint64_t *puPrev)
901{
902 /* Read current value from the 64-bit register. */
903 uint64_t uCurReg;
904 uint64_t fRwMask;
905 uint64_t fRw1cMask;
906 dmarRegReadRaw64Ex(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
907 *puPrev = uCurReg;
908
909 uint64_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
910 uint64_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
911 uint64_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
912 uint64_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
913
914 /* Write new value to the 64-bit register. */
915 dmarRegWriteRaw64(pThis, offReg, uNewReg);
916 return uNewReg;
917}
918
919
920/**
921 * Reads a 32-bit register as it would be when read by software.
922 *
923 * @returns The register value.
924 * @param pThis The shared DMAR device state.
925 * @param offReg The MMIO offset of the register.
926 */
927static uint32_t dmarRegRead32(PCDMAR pThis, uint16_t offReg)
928{
929 return dmarRegReadRaw32(pThis, offReg);
930}
931
932
933/**
934 * Reads a 64-bit register as it would be when read by software.
935 *
936 * @returns The register value.
937 * @param pThis The shared DMAR device state.
938 * @param offReg The MMIO offset of the register.
939 */
940static uint64_t dmarRegRead64(PCDMAR pThis, uint16_t offReg)
941{
942 return dmarRegReadRaw64(pThis, offReg);
943}
944
945
946/**
947 * Modifies a 32-bit register.
948 *
949 * @param pThis The shared DMAR device state.
950 * @param offReg The MMIO offset of the register.
951 * @param fAndMask The AND mask (applied first).
952 * @param fOrMask The OR mask.
953 * @remarks This does NOT apply RO or RW1C masks while modifying the
954 * register.
955 */
956static void dmarRegChangeRaw32(PDMAR pThis, uint16_t offReg, uint32_t fAndMask, uint32_t fOrMask)
957{
958 uint32_t uReg = dmarRegReadRaw32(pThis, offReg);
959 uReg = (uReg & fAndMask) | fOrMask;
960 dmarRegWriteRaw32(pThis, offReg, uReg);
961}
962
963
964/**
965 * Modifies a 64-bit register.
966 *
967 * @param pThis The shared DMAR device state.
968 * @param offReg The MMIO offset of the register.
969 * @param fAndMask The AND mask (applied first).
970 * @param fOrMask The OR mask.
971 * @remarks This does NOT apply RO or RW1C masks while modifying the
972 * register.
973 */
974static void dmarRegChangeRaw64(PDMAR pThis, uint16_t offReg, uint64_t fAndMask, uint64_t fOrMask)
975{
976 uint64_t uReg = dmarRegReadRaw64(pThis, offReg);
977 uReg = (uReg & fAndMask) | fOrMask;
978 dmarRegWriteRaw64(pThis, offReg, uReg);
979}
980
981
982/**
983 * Checks if the invalidation-queue is empty.
984 *
985 * Extended version which optionally returns the current queue head and tail
986 * offsets.
987 *
988 * @returns @c true if empty, @c false otherwise.
989 * @param pThis The shared DMAR device state.
990 * @param poffQh Where to store the queue head offset. Optional, can be NULL.
991 * @param poffQt Where to store the queue tail offset. Optional, can be NULL.
992 */
993static bool dmarInvQueueIsEmptyEx(PCDMAR pThis, uint32_t *poffQh, uint32_t *poffQt)
994{
995 /* Read only the low-32 bits of the queue head and queue tail as high bits are all RsvdZ.*/
996 uint32_t const uIqtReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IQT_REG);
997 uint32_t const uIqhReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IQH_REG);
998
999 /* Don't bother masking QT, QH since other bits are RsvdZ. */
1000 Assert(!(uIqtReg & ~VTD_BF_IQT_REG_QT_MASK));
1001 Assert(!(uIqhReg & ~VTD_BF_IQH_REG_QH_MASK));
1002 if (poffQh)
1003 *poffQh = uIqhReg;
1004 if (poffQt)
1005 *poffQt = uIqtReg;
1006 return uIqtReg == uIqhReg;
1007}
1008
1009
1010/**
1011 * Checks if the invalidation-queue is empty.
1012 *
1013 * @returns @c true if empty, @c false otherwise.
1014 * @param pThis The shared DMAR device state.
1015 */
1016static bool dmarInvQueueIsEmpty(PCDMAR pThis)
1017{
1018 return dmarInvQueueIsEmptyEx(pThis, NULL /* poffQh */, NULL /* poffQt */);
1019}
1020
1021
1022/**
1023 * Checks if the invalidation-queue is capable of processing requests.
1024 *
1025 * @returns @c true if the invalidation-queue can process requests, @c false
1026 * otherwise.
1027 * @param pThis The shared DMAR device state.
1028 */
1029static bool dmarInvQueueCanProcessRequests(PCDMAR pThis)
1030{
1031 /* Check if queued-invalidation is enabled. */
1032 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
1033 if (uGstsReg & VTD_BF_GSTS_REG_QIES_MASK)
1034 {
1035 /* Check if there are no invalidation-queue or timeout errors. */
1036 uint32_t const uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
1037 if (!(uFstsReg & (VTD_BF_FSTS_REG_IQE_MASK | VTD_BF_FSTS_REG_ITE_MASK)))
1038 return true;
1039 }
1040 return false;
1041}
1042
1043
1044/**
1045 * Wakes up the invalidation-queue thread if there are requests to be processed.
1046 *
1047 * @param pDevIns The IOMMU device instance.
1048 */
1049static void dmarInvQueueThreadWakeUpIfNeeded(PPDMDEVINS pDevIns)
1050{
1051 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1052 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1053 Log4Func(("\n"));
1054
1055 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1056
1057 if ( dmarInvQueueCanProcessRequests(pThis)
1058 && !dmarInvQueueIsEmpty(pThis))
1059 {
1060 Log4Func(("Signaling the invalidation-queue thread\n"));
1061 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtInvQueue);
1062 }
1063}
1064
1065
1066/**
1067 * Raises an event on behalf of the DMAR.
1068 *
1069 * These are events that are generated by the DMAR itself (like faults and
1070 * invalidation completion notifications).
1071 *
1072 * @param pDevIns The IOMMU device instance.
1073 * @param enmEventType The DMAR event type.
1074 *
1075 * @remarks The DMAR lock must be held while calling this function.
1076 */
1077static void dmarEventRaiseInterrupt(PPDMDEVINS pDevIns, DMAREVENTTYPE enmEventType)
1078{
1079 uint16_t offCtlReg;
1080 uint32_t fIntrMaskedMask;
1081 uint32_t fIntrPendingMask;
1082 uint16_t offMsiAddrLoReg;
1083 uint16_t offMsiAddrHiReg;
1084 uint16_t offMsiDataReg;
1085 switch (enmEventType)
1086 {
1087 case DMAREVENTTYPE_INV_COMPLETE:
1088 {
1089 offCtlReg = VTD_MMIO_OFF_IECTL_REG;
1090 fIntrMaskedMask = VTD_BF_IECTL_REG_IM_MASK;
1091 fIntrPendingMask = VTD_BF_IECTL_REG_IP_MASK;
1092 offMsiAddrLoReg = VTD_MMIO_OFF_IEADDR_REG;
1093 offMsiAddrHiReg = VTD_MMIO_OFF_IEUADDR_REG;
1094 offMsiDataReg = VTD_MMIO_OFF_IEDATA_REG;
1095 break;
1096 }
1097
1098 case DMAREVENTTYPE_FAULT:
1099 {
1100 offCtlReg = VTD_MMIO_OFF_FECTL_REG;
1101 fIntrMaskedMask = VTD_BF_FECTL_REG_IM_MASK;
1102 fIntrPendingMask = VTD_BF_FECTL_REG_IP_MASK;
1103 offMsiAddrLoReg = VTD_MMIO_OFF_FEADDR_REG;
1104 offMsiAddrHiReg = VTD_MMIO_OFF_FEUADDR_REG;
1105 offMsiDataReg = VTD_MMIO_OFF_FEDATA_REG;
1106 break;
1107 }
1108
1109 default:
1110 {
1111 /* Shouldn't ever happen. */
1112 AssertMsgFailedReturnVoid(("DMAR event type %#x unknown!\n", enmEventType));
1113 }
1114 }
1115
1116 /* Check if software has masked the interrupt. */
1117 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1118 uint32_t uCtlReg = dmarRegReadRaw32(pThis, offCtlReg);
1119 if (!(uCtlReg & fIntrMaskedMask))
1120 {
1121 /*
1122 * Interrupt is unmasked, raise it.
1123 * Interrupts generated by the DMAR have trigger mode and level as 0.
1124 * See Intel spec. 5.1.6 "Remapping Hardware Event Interrupt Programming".
1125 */
1126 MSIMSG Msi;
1127 Msi.Addr.au32[0] = dmarRegReadRaw32(pThis, offMsiAddrLoReg);
1128 Msi.Addr.au32[1] = (pThis->fExtCapReg & VTD_BF_ECAP_REG_EIM_MASK) ? dmarRegReadRaw32(pThis, offMsiAddrHiReg) : 0;
1129 Msi.Data.u32 = dmarRegReadRaw32(pThis, offMsiDataReg);
1130 Assert(Msi.Data.n.u1Level == 0);
1131 Assert(Msi.Data.n.u1TriggerMode == 0);
1132
1133 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1134 pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi(pDevIns, &Msi, 0 /* uTagSrc */);
1135
1136 /* Clear interrupt pending bit. */
1137 uCtlReg &= ~fIntrPendingMask;
1138 dmarRegWriteRaw32(pThis, offCtlReg, uCtlReg);
1139 }
1140 else
1141 {
1142 /* Interrupt is masked, set the interrupt pending bit. */
1143 uCtlReg |= fIntrPendingMask;
1144 dmarRegWriteRaw32(pThis, offCtlReg, uCtlReg);
1145 }
1146}
1147
1148
1149/**
1150 * Raises an interrupt in response to a fault event.
1151 *
1152 * @param pDevIns The IOMMU device instance.
1153 *
1154 * @remarks This assumes the caller has already set the required status bits in the
1155 * FSTS_REG (namely one or more of PPF, PFO, IQE, ICE or ITE bits).
1156 */
1157static void dmarFaultEventRaiseInterrupt(PPDMDEVINS pDevIns)
1158{
1159 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1160 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1161 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1162
1163#ifdef RT_STRICT
1164 {
1165 uint32_t const uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
1166 uint32_t const fFaultMask = VTD_BF_FSTS_REG_PPF_MASK | VTD_BF_FSTS_REG_PFO_MASK
1167 /* | VTD_BF_FSTS_REG_APF_MASK | VTD_BF_FSTS_REG_AFO_MASK */ /* AFL not supported */
1168 /* | VTD_BF_FSTS_REG_ICE_MASK | VTD_BF_FSTS_REG_ITE_MASK */ /* Device-TLBs not supported */
1169 | VTD_BF_FSTS_REG_IQE_MASK;
1170 Assert(uFstsReg & fFaultMask);
1171 }
1172#endif
1173 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_FAULT);
1174}
1175
1176
1177#ifdef IN_RING3
1178/**
1179 * Raises an interrupt in response to an invalidation (complete) event.
1180 *
1181 * @param pDevIns The IOMMU device instance.
1182 */
1183static void dmarR3InvEventRaiseInterrupt(PPDMDEVINS pDevIns)
1184{
1185 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1186 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1187 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1188
1189 uint32_t const uIcsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_ICS_REG);
1190 if (!(uIcsReg & VTD_BF_ICS_REG_IWC_MASK))
1191 {
1192 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_ICS_REG, UINT32_MAX, VTD_BF_ICS_REG_IWC_MASK);
1193 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_INV_COMPLETE);
1194 }
1195}
1196#endif /* IN_RING3 */
1197
1198
1199/**
1200 * Checks if a primary fault can be recorded.
1201 *
1202 * @returns @c true if the fault can be recorded, @c false otherwise.
1203 * @param pDevIns The IOMMU device instance.
1204 * @param pThis The shared DMAR device state.
1205 *
1206 * @remarks Warning: This function has side-effects wrt the DMAR register state. Do
1207 * NOT call it unless there is a fault condition!
1208 */
1209static bool dmarPrimaryFaultCanRecord(PPDMDEVINS pDevIns, PDMAR pThis)
1210{
1211 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1212 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1213
1214 uint32_t uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
1215 if (uFstsReg & VTD_BF_FSTS_REG_PFO_MASK)
1216 return false;
1217
1218 /*
1219 * If we add more FRCD registers, we'll have to loop through them here.
1220 * Since we support only one FRCD_REG, we don't support "compression of multiple faults",
1221 * nor do we need to increment FRI.
1222 *
1223 * See Intel VT-d spec. 7.2.1 "Primary Fault Logging".
1224 */
1225 AssertCompile(DMAR_FRCD_REG_COUNT == 1);
1226 uint64_t const uFrcdRegHi = dmarRegReadRaw64(pThis, DMAR_MMIO_OFF_FRCD_HI_REG);
1227 if (uFrcdRegHi & VTD_BF_1_FRCD_REG_F_MASK)
1228 {
1229 uFstsReg |= VTD_BF_FSTS_REG_PFO_MASK;
1230 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, uFstsReg);
1231 return false;
1232 }
1233
1234 return true;
1235}
1236
1237
1238/**
1239 * Records a primary fault.
1240 *
1241 * @param pDevIns The IOMMU device instance.
1242 * @param enmDiag The diagnostic reason.
1243 * @param uFrcdHi The FRCD_HI_REG value for this fault.
1244 * @param uFrcdLo The FRCD_LO_REG value for this fault.
1245 */
1246static void dmarPrimaryFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, uint64_t uFrcdHi, uint64_t uFrcdLo)
1247{
1248 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1249 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1250
1251 DMAR_LOCK(pDevIns, pThisCC);
1252
1253 /* Update the diagnostic reason. */
1254 pThis->enmDiag = enmDiag;
1255
1256 /* We don't support advance fault logging. */
1257 Assert(!(dmarRegRead32(pThis, VTD_MMIO_OFF_GSTS_REG) & VTD_BF_GSTS_REG_AFLS_MASK));
1258
1259 if (dmarPrimaryFaultCanRecord(pDevIns, pThis))
1260 {
1261 /* Update the fault recording registers with the fault information. */
1262 dmarRegWriteRaw64(pThis, DMAR_MMIO_OFF_FRCD_HI_REG, uFrcdHi);
1263 dmarRegWriteRaw64(pThis, DMAR_MMIO_OFF_FRCD_LO_REG, uFrcdLo);
1264
1265 /* Set the Pending Primary Fault (PPF) field in the status register. */
1266 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, UINT32_MAX, VTD_BF_FSTS_REG_PPF_MASK);
1267
1268 /* Raise interrupt if necessary. */
1269 dmarFaultEventRaiseInterrupt(pDevIns);
1270 }
1271
1272 DMAR_UNLOCK(pDevIns, pThisCC);
1273}
1274
1275
1276/**
1277 * Records an interrupt request fault.
1278 *
1279 * @param pDevIns The IOMMU device instance.
1280 * @param enmDiag The diagnostic reason.
1281 * @param enmIntrFault The interrupt fault reason.
1282 * @param idDevice The device ID (bus, device, function).
1283 * @param idxIntr The interrupt index.
1284 */
1285static void dmarIntrFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, VTDINTRFAULT enmIntrFault, uint16_t idDevice,
1286 uint16_t idxIntr)
1287{
1288 uint64_t const uFrcdHi = RT_BF_MAKE(VTD_BF_1_FRCD_REG_SID, idDevice)
1289 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_FR, enmIntrFault)
1290 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_F, 1);
1291 uint64_t const uFrcdLo = (uint64_t)idxIntr << 48;
1292 dmarPrimaryFaultRecord(pDevIns, enmDiag, uFrcdHi, uFrcdLo);
1293}
1294
1295
1296/**
1297 * Records a qualified interrupt request fault.
1298 *
1299 * Qualified faults are those that can be suppressed by software using the FPD bit
1300 * in the IRTE.
1301 *
1302 * @param pDevIns The IOMMU device instance.
1303 * @param enmDiag The diagnostic reason.
1304 * @param enmIntrFault The interrupt fault reason.
1305 * @param idDevice The device ID (bus, device, function).
1306 * @param idxIntr The interrupt index.
1307 * @param pIrte The IRTE that caused this fault.
1308 */
1309static void dmarIntrFaultRecordQualified(PPDMDEVINS pDevIns, DMARDIAG enmDiag, VTDINTRFAULT enmIntrFault, uint16_t idDevice,
1310 uint16_t idxIntr, PCVTD_IRTE_T pIrte)
1311{
1312 Assert(vtdIrFaultIsQualified(enmIntrFault));
1313 Assert(pIrte);
1314 if (!(pIrte->au64[0] & VTD_BF_0_IRTE_FPD_MASK))
1315 return dmarIntrFaultRecord(pDevIns, enmDiag, enmIntrFault, idDevice, idxIntr);
1316}
1317
1318
1319/**
1320 * Records an address translation fault.
1321 *
1322 * @param pDevIns The IOMMU device instance.
1323 * @param enmDiag The diagnostic reason.
1324 * @param enmAddrFault The address translation fault reason.
1325 * @param idDevice The device ID (bus, device, function).
1326 * @param uFaultAddr The page address of the faulted request.
1327 * @param enmReqType The type of the faulted request.
1328 * @param uAddrType The address type of the faulted request (only applicable
1329 * when device-TLB is supported).
1330 * @param fHasPasid Whether the faulted request has a PASID TLP prefix.
1331 * @param uPasid The PASID value when a PASID TLP prefix is present.
1332 * @param fExec Execute permission was requested by the faulted request.
1333 * @param fPriv Supervisor privilege permission was requested by the
1334 * faulted request.
1335 */
1336static void dmarAddrFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, VTDADDRFAULT enmAddrFault, uint16_t idDevice,
1337 uint64_t uFaultAddr, VTDREQTYPE enmReqType, uint8_t uAddrType, bool fHasPasid, uint32_t uPasid,
1338 bool fExec, bool fPriv)
1339{
1340 uint8_t const fType1 = enmReqType & RT_BIT(1);
1341 uint8_t const fType2 = enmReqType & RT_BIT(0);
1342 uint64_t const uFrcdHi = RT_BF_MAKE(VTD_BF_1_FRCD_REG_SID, idDevice)
1343 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_T2, fType2)
1344 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_PP, fHasPasid)
1345 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_EXE, fExec)
1346 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_PRIV, fPriv)
1347 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_FR, enmAddrFault)
1348 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_PV, uPasid)
1349 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_AT, uAddrType)
1350 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_T1, fType1)
1351 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_F, 1);
1352 uint64_t const uFrcdLo = uFaultAddr & X86_PAGE_BASE_MASK;
1353 dmarPrimaryFaultRecord(pDevIns, enmDiag, uFrcdHi, uFrcdLo);
1354}
1355
1356
1357/**
1358 * Records an IQE fault.
1359 *
1360 * @param pDevIns The IOMMU device instance.
1361 * @param enmIqei The IQE information.
1362 * @param enmDiag The diagnostic reason.
1363 */
1364static void dmarIqeFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, VTDIQEI enmIqei)
1365{
1366 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1367 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1368
1369 DMAR_LOCK(pDevIns, pThisCC);
1370
1371 /* Update the diagnostic reason. */
1372 pThis->enmDiag = enmDiag;
1373
1374 /* Set the error bit. */
1375 uint32_t const fIqe = RT_BF_MAKE(VTD_BF_FSTS_REG_IQE, 1);
1376 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, UINT32_MAX, fIqe);
1377
1378 /* Set the error information. */
1379 uint64_t const fIqei = RT_BF_MAKE(VTD_BF_IQERCD_REG_IQEI, enmIqei);
1380 dmarRegChangeRaw64(pThis, VTD_MMIO_OFF_IQERCD_REG, UINT64_MAX, fIqei);
1381
1382 dmarFaultEventRaiseInterrupt(pDevIns);
1383
1384 DMAR_UNLOCK(pDevIns, pThisCC);
1385}
1386
1387
1388/**
1389 * Handles writes to GCMD_REG.
1390 *
1391 * @returns Strict VBox status code.
1392 * @param pDevIns The IOMMU device instance.
1393 * @param uGcmdReg The value written to GCMD_REG.
1394 */
1395static VBOXSTRICTRC dmarGcmdRegWrite(PPDMDEVINS pDevIns, uint32_t uGcmdReg)
1396{
1397 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1398 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
1399 uint32_t const fChanged = uGstsReg ^ uGcmdReg;
1400 uint64_t const fExtCapReg = pThis->fExtCapReg;
1401
1402 /* Queued-invalidation. */
1403 if ( (fExtCapReg & VTD_BF_ECAP_REG_QI_MASK)
1404 && (fChanged & VTD_BF_GCMD_REG_QIE_MASK))
1405 {
1406 if (uGcmdReg & VTD_BF_GCMD_REG_QIE_MASK)
1407 {
1408 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_QIES_MASK);
1409 dmarInvQueueThreadWakeUpIfNeeded(pDevIns);
1410 }
1411 else
1412 {
1413 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_QIES_MASK, 0 /* fOrMask */);
1414 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_IQH_REG, 0);
1415 }
1416 }
1417
1418 if (fExtCapReg & VTD_BF_ECAP_REG_IR_MASK)
1419 {
1420 /* Set Interrupt Remapping Table Pointer (SIRTP). */
1421 if (uGcmdReg & VTD_BF_GCMD_REG_SIRTP_MASK)
1422 {
1423 /** @todo Perform global invalidation of all interrupt-entry cache when ESIRTPS is
1424 * supported. */
1425 pThis->uIrtaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IRTA_REG);
1426 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_IRTPS_MASK);
1427 }
1428
1429 /* Interrupt remapping. */
1430 if (fChanged & VTD_BF_GCMD_REG_IRE_MASK)
1431 {
1432 if (uGcmdReg & VTD_BF_GCMD_REG_IRE_MASK)
1433 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_IRES_MASK);
1434 else
1435 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_IRES_MASK, 0 /* fOrMask */);
1436 }
1437
1438 /* Compatibility format interrupts. */
1439 if (fChanged & VTD_BF_GCMD_REG_CFI_MASK)
1440 {
1441 if (uGcmdReg & VTD_BF_GCMD_REG_CFI_MASK)
1442 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_CFIS_MASK);
1443 else
1444 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_CFIS_MASK, 0 /* fOrMask */);
1445 }
1446 }
1447
1448 /* Set Root Table Pointer (SRTP). */
1449 if (uGcmdReg & VTD_BF_GCMD_REG_SRTP_MASK)
1450 {
1451 /** @todo Perform global invalidation of all remapping translation caches when
1452 * ESRTPS is supported. */
1453 pThis->uRtaddrReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_RTADDR_REG);
1454 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_RTPS_MASK);
1455 }
1456
1457 /* Translation (DMA remapping). */
1458 if (fChanged & VTD_BF_GCMD_REG_TE_MASK)
1459 {
1460 if (uGcmdReg & VTD_BF_GCMD_REG_TE_MASK)
1461 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_TES_MASK);
1462 else
1463 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_TES_MASK, 0 /* fOrMask */);
1464 }
1465
1466 return VINF_SUCCESS;
1467}
1468
1469
1470/**
1471 * Handles writes to CCMD_REG.
1472 *
1473 * @returns Strict VBox status code.
1474 * @param pDevIns The IOMMU device instance.
1475 * @param offReg The MMIO register offset.
1476 * @param cbReg The size of the MMIO access (in bytes).
1477 * @param uCcmdReg The value written to CCMD_REG.
1478 */
1479static VBOXSTRICTRC dmarCcmdRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint8_t cbReg, uint64_t uCcmdReg)
1480{
1481 /* At present, we only care about responding to high 32-bits writes, low 32-bits are data. */
1482 if (offReg + cbReg > VTD_MMIO_OFF_CCMD_REG + 4)
1483 {
1484 /* Check if we need to invalidate the context-context. */
1485 bool const fIcc = RT_BF_GET(uCcmdReg, VTD_BF_CCMD_REG_ICC);
1486 if (fIcc)
1487 {
1488 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1489 uint8_t const uMajorVersion = RT_BF_GET(pThis->uVerReg, VTD_BF_VER_REG_MAX);
1490 if (uMajorVersion < 6)
1491 {
1492 /* Register-based invalidation can only be used when queued-invalidations are not enabled. */
1493 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
1494 if (!(uGstsReg & VTD_BF_GSTS_REG_QIES_MASK))
1495 {
1496 /* Verify table translation mode is legacy. */
1497 uint8_t const fTtm = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
1498 if (fTtm == VTD_TTM_LEGACY_MODE)
1499 {
1500 /** @todo Invalidate. */
1501 return VINF_SUCCESS;
1502 }
1503 pThis->enmDiag = kDmarDiag_CcmdReg_Ttm_Invalid;
1504 }
1505 else
1506 pThis->enmDiag = kDmarDiag_CcmdReg_Qi_Enabled;
1507 }
1508 else
1509 pThis->enmDiag = kDmarDiag_CcmdReg_NotSupported;
1510 dmarRegChangeRaw64(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_CCMD_REG_CAIG_MASK, 0 /* fOrMask */);
1511 }
1512 }
1513 return VINF_SUCCESS;
1514}
1515
1516
1517/**
1518 * Handles writes to FECTL_REG.
1519 *
1520 * @returns Strict VBox status code.
1521 * @param pDevIns The IOMMU device instance.
1522 * @param uFectlReg The value written to FECTL_REG.
1523 */
1524static VBOXSTRICTRC dmarFectlRegWrite(PPDMDEVINS pDevIns, uint32_t uFectlReg)
1525{
1526 /*
1527 * If software unmasks the interrupt when the interrupt is pending, we must raise
1528 * the interrupt now (which will consequently clear the interrupt pending (IP) bit).
1529 */
1530 if ( (uFectlReg & VTD_BF_FECTL_REG_IP_MASK)
1531 && ~(uFectlReg & VTD_BF_FECTL_REG_IM_MASK))
1532 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_FAULT);
1533 return VINF_SUCCESS;
1534}
1535
1536
1537/**
1538 * Handles writes to FSTS_REG.
1539 *
1540 * @returns Strict VBox status code.
1541 * @param pDevIns The IOMMU device instance.
1542 * @param uFstsReg The value written to FSTS_REG.
1543 * @param uPrev The value in FSTS_REG prior to writing it.
1544 */
1545static VBOXSTRICTRC dmarFstsRegWrite(PPDMDEVINS pDevIns, uint32_t uFstsReg, uint32_t uPrev)
1546{
1547 /*
1548 * If software clears other status bits in FSTS_REG (pertaining to primary fault logging),
1549 * the interrupt pending (IP) bit must be cleared.
1550 *
1551 * See Intel VT-d spec. 10.4.10 "Fault Event Control Register".
1552 */
1553 uint32_t const fChanged = uPrev ^ uFstsReg;
1554 if (fChanged & ( VTD_BF_FSTS_REG_ICE_MASK | VTD_BF_FSTS_REG_ITE_MASK
1555 | VTD_BF_FSTS_REG_IQE_MASK | VTD_BF_FSTS_REG_PFO_MASK))
1556 {
1557 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1558 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, ~VTD_BF_FECTL_REG_IP_MASK, 0 /* fOrMask */);
1559 }
1560 return VINF_SUCCESS;
1561}
1562
1563
1564/**
1565 * Handles writes to IQT_REG.
1566 *
1567 * @returns Strict VBox status code.
1568 * @param pDevIns The IOMMU device instance.
1569 * @param offReg The MMIO register offset.
1570 * @param uIqtReg The value written to IQT_REG.
1571 */
1572static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqtReg)
1573{
1574 /* We only care about the low 32-bits, high 32-bits are reserved. */
1575 Assert(offReg == VTD_MMIO_OFF_IQT_REG);
1576 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1577
1578 /* Paranoia. */
1579 Assert(!(uIqtReg & ~VTD_BF_IQT_REG_QT_MASK));
1580
1581 uint32_t const offQt = uIqtReg;
1582 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
1583 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
1584
1585 /* If the descriptor width is 256-bits, the queue tail offset must be aligned accordingly. */
1586 if ( fDw != VTD_IQA_REG_DW_256_BIT
1587 || !(offQt & RT_BIT(4)))
1588 dmarInvQueueThreadWakeUpIfNeeded(pDevIns);
1589 else
1590 {
1591 /* Hardware treats bit 4 as RsvdZ in this situation, so clear it. */
1592 dmarRegChangeRaw32(pThis, offReg, ~RT_BIT(4), 0 /* fOrMask */);
1593 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_NotAligned, VTDIQEI_QUEUE_TAIL_MISALIGNED);
1594 }
1595 return VINF_SUCCESS;
1596}
1597
1598
1599/**
1600 * Handles writes to IQA_REG.
1601 *
1602 * @returns Strict VBox status code.
1603 * @param pDevIns The IOMMU device instance.
1604 * @param offReg The MMIO register offset.
1605 * @param uIqaReg The value written to IQA_REG.
1606 */
1607static VBOXSTRICTRC dmarIqaRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqaReg)
1608{
1609 /* At present, we only care about the low 32-bits, high 32-bits are data. */
1610 Assert(offReg == VTD_MMIO_OFF_IQA_REG); NOREF(offReg);
1611
1612 /** @todo What happens if IQA_REG is written when dmarInvQueueCanProcessRequests
1613 * returns true? The Intel VT-d spec. doesn't state anywhere that it
1614 * cannot happen or that it's ignored when it does happen. */
1615
1616 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1617 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
1618 if (fDw == VTD_IQA_REG_DW_256_BIT)
1619 {
1620 bool const fSupports256BitDw = (pThis->fExtCapReg & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK));
1621 if (fSupports256BitDw)
1622 { /* likely */ }
1623 else
1624 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_256_Invalid, VTDIQEI_INVALID_DESCRIPTOR_WIDTH);
1625 }
1626 /* else: 128-bit descriptor width is validated lazily, see explanation in dmarR3InvQueueProcessRequests. */
1627
1628 return VINF_SUCCESS;
1629}
1630
1631
1632/**
1633 * Handles writes to ICS_REG.
1634 *
1635 * @returns Strict VBox status code.
1636 * @param pDevIns The IOMMU device instance.
1637 * @param uIcsReg The value written to ICS_REG.
1638 */
1639static VBOXSTRICTRC dmarIcsRegWrite(PPDMDEVINS pDevIns, uint32_t uIcsReg)
1640{
1641 /*
1642 * If the IP field is set when software services the interrupt condition,
1643 * (by clearing the IWC field), the IP field must be cleared.
1644 */
1645 if (!(uIcsReg & VTD_BF_ICS_REG_IWC_MASK))
1646 {
1647 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1648 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_IECTL_REG, ~VTD_BF_IECTL_REG_IP_MASK, 0 /* fOrMask */);
1649 }
1650 return VINF_SUCCESS;
1651}
1652
1653
1654/**
1655 * Handles writes to IECTL_REG.
1656 *
1657 * @returns Strict VBox status code.
1658 * @param pDevIns The IOMMU device instance.
1659 * @param uIectlReg The value written to IECTL_REG.
1660 */
1661static VBOXSTRICTRC dmarIectlRegWrite(PPDMDEVINS pDevIns, uint32_t uIectlReg)
1662{
1663 /*
1664 * If software unmasks the interrupt when the interrupt is pending, we must raise
1665 * the interrupt now (which will consequently clear the interrupt pending (IP) bit).
1666 */
1667 if ( (uIectlReg & VTD_BF_IECTL_REG_IP_MASK)
1668 && ~(uIectlReg & VTD_BF_IECTL_REG_IM_MASK))
1669 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_INV_COMPLETE);
1670 return VINF_SUCCESS;
1671}
1672
1673
1674/**
1675 * Handles writes to FRCD_REG (High 64-bits).
1676 *
1677 * @returns Strict VBox status code.
1678 * @param pDevIns The IOMMU device instance.
1679 * @param offReg The MMIO register offset.
1680 * @param cbReg The size of the MMIO access (in bytes).
1681 * @param uFrcdHiReg The value written to FRCD_REG.
1682 * @param uPrev The value in FRCD_REG prior to writing it.
1683 */
1684static VBOXSTRICTRC dmarFrcdHiRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint8_t cbReg, uint64_t uFrcdHiReg, uint64_t uPrev)
1685{
1686 /* We only care about responding to high 32-bits, low 32-bits are read-only. */
1687 if (offReg + cbReg > DMAR_MMIO_OFF_FRCD_HI_REG + 4)
1688 {
1689 /*
1690 * If software cleared the RW1C F (fault) bit in all FRCD_REGs, hardware clears the
1691 * Primary Pending Fault (PPF) and the interrupt pending (IP) bits. Our implementation
1692 * has only 1 FRCD register.
1693 *
1694 * See Intel VT-d spec. 10.4.10 "Fault Event Control Register".
1695 */
1696 AssertCompile(DMAR_FRCD_REG_COUNT == 1);
1697 uint64_t const fChanged = uPrev ^ uFrcdHiReg;
1698 if (fChanged & VTD_BF_1_FRCD_REG_F_MASK)
1699 {
1700 Assert(!(uFrcdHiReg & VTD_BF_1_FRCD_REG_F_MASK)); /* Software should only ever be able to clear this bit. */
1701 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1702 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, ~VTD_BF_FSTS_REG_PPF_MASK, 0 /* fOrMask */);
1703 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, ~VTD_BF_FECTL_REG_IP_MASK, 0 /* fOrMask */);
1704 }
1705 }
1706 return VINF_SUCCESS;
1707}
1708
1709
1710/**
1711 * Memory access bulk (one or more 4K pages) request from a device.
1712 *
1713 * @returns VBox status code.
1714 * @param pDevIns The IOMMU device instance.
1715 * @param idDevice The device ID (bus, device, function).
1716 * @param cIovas The number of addresses being accessed.
1717 * @param pauIovas The I/O virtual addresses for each page being accessed.
1718 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
1719 * @param paGCPhysSpa Where to store the translated physical addresses.
1720 *
1721 * @thread Any.
1722 */
1723static DECLCALLBACK(int) iommuIntelMemBulkAccess(PPDMDEVINS pDevIns, uint16_t idDevice, size_t cIovas, uint64_t const *pauIovas,
1724 uint32_t fFlags, PRTGCPHYS paGCPhysSpa)
1725{
1726 RT_NOREF6(pDevIns, idDevice, cIovas, pauIovas, fFlags, paGCPhysSpa);
1727 return VERR_NOT_IMPLEMENTED;
1728}
1729
1730
1731/**
1732 * Memory access transaction from a device.
1733 *
1734 * @returns VBox status code.
1735 * @param pDevIns The IOMMU device instance.
1736 * @param idDevice The device ID (bus, device, function).
1737 * @param uIova The I/O virtual address being accessed.
1738 * @param cbIova The size of the access.
1739 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
1740 * @param pGCPhysSpa Where to store the translated system physical address.
1741 * @param pcbContiguous Where to store the number of contiguous bytes translated
1742 * and permission-checked.
1743 *
1744 * @thread Any.
1745 */
1746static DECLCALLBACK(int) iommuIntelMemAccess(PPDMDEVINS pDevIns, uint16_t idDevice, uint64_t uIova, size_t cbIova,
1747 uint32_t fFlags, PRTGCPHYS pGCPhysSpa, size_t *pcbContiguous)
1748{
1749 RT_NOREF6(idDevice, uIova, cbIova, fFlags, pGCPhysSpa, pcbContiguous);
1750
1751 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1752 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1753
1754 DMAR_LOCK(pDevIns, pThisCC);
1755 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
1756 uint64_t const uRtaddrReg = pThis->uRtaddrReg;
1757 DMAR_UNLOCK(pDevIns, pThisCC);
1758
1759 if (uGstsReg & VTD_BF_GSTS_REG_TES_MASK)
1760 {
1761 VTDREQTYPE enmReqType;
1762 if (fFlags & PDMIOMMU_MEM_F_READ)
1763 {
1764 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemRead));
1765 enmReqType = VTDREQTYPE_READ;
1766 }
1767 else
1768 {
1769 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemWrite));
1770 enmReqType = VTDREQTYPE_WRITE;
1771 }
1772
1773 uint8_t const fTtm = RT_BF_GET(uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
1774 switch (fTtm)
1775 {
1776 case VTD_TTM_LEGACY_MODE:
1777 case VTD_TTM_ABORT_DMA_MODE:
1778 {
1779 if (pThis->fExtCapReg & VTD_BF_ECAP_REG_ADMS_MASK)
1780 {
1781 }
1782 }
1783 }
1784
1785 return VERR_NOT_IMPLEMENTED;
1786 }
1787
1788 *pGCPhysSpa = uIova;
1789 *pcbContiguous = cbIova;
1790 return VINF_SUCCESS;
1791}
1792
1793
1794/**
1795 * Reads an IRTE from guest memory.
1796 *
1797 * @returns VBox status code.
1798 * @param pDevIns The IOMMU device instance.
1799 * @param uIrtaReg The IRTA_REG.
1800 * @param idxIntr The interrupt index.
1801 * @param pIrte Where to store the read IRTE.
1802 */
1803static int dmarIrReadIrte(PPDMDEVINS pDevIns, uint64_t uIrtaReg, uint16_t idxIntr, PVTD_IRTE_T pIrte)
1804{
1805 Assert(idxIntr < VTD_IRTA_REG_GET_ENTRY_COUNT(uIrtaReg));
1806
1807 size_t const cbIrte = sizeof(*pIrte);
1808 RTGCPHYS const GCPhysIrte = (uIrtaReg & VTD_BF_IRTA_REG_IRTA_MASK) + (idxIntr * cbIrte);
1809 return PDMDevHlpPhysReadMeta(pDevIns, GCPhysIrte, pIrte, cbIrte);
1810}
1811
1812
1813/**
1814 * Remaps the source MSI to the destination MSI given the IRTE.
1815 *
1816 * @param fExtIntrMode Whether extended interrupt mode is enabled (i.e
1817 * IRTA_REG.EIME).
1818 * @param pIrte The IRTE used for the remapping.
1819 * @param pMsiIn The source MSI (currently unused).
1820 * @param pMsiOut Where to store the remapped MSI.
1821 */
1822static void dmarIrRemapFromIrte(bool fExtIntrMode, PCVTD_IRTE_T pIrte, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
1823{
1824 NOREF(pMsiIn);
1825 uint64_t const uIrteQword0 = pIrte->au64[0];
1826
1827 /*
1828 * Let's start with a clean slate and preserve unspecified bits if the need arises.
1829 * For instance, address bits 1:0 is supposed to be "ignored" by remapping hardware,
1830 * but it's not clear if hardware zeroes out these bits in the remapped MSI or if
1831 * it copies it from the source MSI.
1832 */
1833 RT_ZERO(*pMsiOut);
1834 pMsiOut->Addr.n.u1DestMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DM);
1835 pMsiOut->Addr.n.u1RedirHint = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_RH);
1836 pMsiOut->Addr.n.u12Addr = VBOX_MSI_ADDR_BASE >> VBOX_MSI_ADDR_SHIFT;
1837 if (fExtIntrMode)
1838 {
1839 /*
1840 * Apparently the DMAR stuffs the high 24-bits of the destination ID into the
1841 * high 24-bits of the upper 32-bits of the message address, see @bugref{9967#c22}.
1842 */
1843 uint32_t const idDest = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DST);
1844 pMsiOut->Addr.n.u8DestId = idDest;
1845 pMsiOut->Addr.n.u32Rsvd0 = idDest & UINT32_C(0xffffff00);
1846 }
1847 else
1848 pMsiOut->Addr.n.u8DestId = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DST_XAPIC);
1849
1850 pMsiOut->Data.n.u8Vector = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_V);
1851 pMsiOut->Data.n.u3DeliveryMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DLM);
1852 pMsiOut->Data.n.u1Level = 1;
1853 pMsiOut->Data.n.u1TriggerMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_TM);
1854}
1855
1856
1857/**
1858 * Handles remapping of interrupts in remappable interrupt format.
1859 *
1860 * @returns VBox status code.
1861 * @param pDevIns The IOMMU device instance.
1862 * @param uIrtaReg The IRTA_REG.
1863 * @param idDevice The device ID (bus, device, function).
1864 * @param pMsiIn The source MSI.
1865 * @param pMsiOut Where to store the remapped MSI.
1866 */
1867static int dmarIrRemapIntr(PPDMDEVINS pDevIns, uint64_t uIrtaReg, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
1868{
1869 Assert(pMsiIn->Addr.dmar_remap.fIntrFormat == VTD_INTR_FORMAT_REMAPPABLE);
1870
1871 /* Validate reserved bits in the interrupt request. */
1872 AssertCompile(VTD_REMAPPABLE_MSI_ADDR_VALID_MASK == UINT32_MAX);
1873 if (!(pMsiIn->Data.u32 & ~VTD_REMAPPABLE_MSI_DATA_VALID_MASK))
1874 {
1875 /* Compute the index into the interrupt remap table. */
1876 uint16_t const uHandleHi = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_HANDLE_HI);
1877 uint16_t const uHandleLo = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_HANDLE_LO);
1878 uint16_t const uHandle = uHandleLo | (uHandleHi << 15);
1879 bool const fSubHandleValid = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_SHV);
1880 uint16_t const idxIntr = fSubHandleValid
1881 ? uHandle + RT_BF_GET(pMsiIn->Data.u32, VTD_BF_REMAPPABLE_MSI_DATA_SUBHANDLE)
1882 : uHandle;
1883
1884 /* Validate the index. */
1885 uint32_t const cEntries = VTD_IRTA_REG_GET_ENTRY_COUNT(uIrtaReg);
1886 if (idxIntr < cEntries)
1887 {
1888 /** @todo Implement and read IRTE from interrupt-entry cache here. */
1889
1890 /* Read the interrupt remap table entry (IRTE) at the index. */
1891 VTD_IRTE_T Irte;
1892 int rc = dmarIrReadIrte(pDevIns, uIrtaReg, idxIntr, &Irte);
1893 if (RT_SUCCESS(rc))
1894 {
1895 /* Check if the IRTE is present (this must be done -before- checking reserved bits). */
1896 uint64_t const uIrteQword0 = Irte.au64[0];
1897 uint64_t const uIrteQword1 = Irte.au64[1];
1898 bool const fPresent = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_P);
1899 if (fPresent)
1900 {
1901 /* Validate reserved bits in the IRTE. */
1902 bool const fExtIntrMode = RT_BF_GET(uIrtaReg, VTD_BF_IRTA_REG_EIME);
1903 uint64_t const fQw0ValidMask = fExtIntrMode ? VTD_IRTE_0_X2APIC_VALID_MASK : VTD_IRTE_0_XAPIC_VALID_MASK;
1904 if ( !(uIrteQword0 & ~fQw0ValidMask)
1905 && !(uIrteQword1 & ~VTD_IRTE_1_VALID_MASK))
1906 {
1907 /* Validate requester id (the device ID) as configured in the IRTE. */
1908 bool fSrcValid;
1909 DMARDIAG enmIrDiag;
1910 uint8_t const fSvt = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SVT);
1911 switch (fSvt)
1912 {
1913 case VTD_IRTE_SVT_NONE:
1914 {
1915 fSrcValid = true;
1916 enmIrDiag = kDmarDiag_None;
1917 break;
1918 }
1919
1920 case VTD_IRTE_SVT_VALIDATE_MASK:
1921 {
1922 static uint16_t const s_afValidMasks[] = { 0xffff, 0xfffb, 0xfff9, 0xfff8 };
1923 uint8_t const idxMask = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SQ) & 3;
1924 uint16_t const fValidMask = s_afValidMasks[idxMask];
1925 uint16_t const idSource = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SID);
1926 fSrcValid = (idDevice & fValidMask) == (idSource & fValidMask);
1927 enmIrDiag = kDmarDiag_Ir_Rfi_Irte_Svt_Masked;
1928 break;
1929 }
1930
1931 case VTD_IRTE_SVT_VALIDATE_BUS_RANGE:
1932 {
1933 uint16_t const idSource = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SID);
1934 uint8_t const uBusFirst = RT_HI_U8(idSource);
1935 uint8_t const uBusLast = RT_LO_U8(idSource);
1936 uint8_t const idDeviceBus = idDevice >> VBOX_PCI_BUS_SHIFT;
1937 fSrcValid = (idDeviceBus >= uBusFirst && idDeviceBus <= uBusLast);
1938 enmIrDiag = kDmarDiag_Ir_Rfi_Irte_Svt_Bus;
1939 break;
1940 }
1941
1942 default:
1943 {
1944 fSrcValid = false;
1945 enmIrDiag = kDmarDiag_Ir_Rfi_Irte_Svt_Bus;
1946 break;
1947 }
1948 }
1949
1950 if (fSrcValid)
1951 {
1952 uint8_t const fPostedMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_IM);
1953 if (!fPostedMode)
1954 {
1955 dmarIrRemapFromIrte(fExtIntrMode, &Irte, pMsiIn, pMsiOut);
1956 return VINF_SUCCESS;
1957 }
1958 dmarIntrFaultRecordQualified(pDevIns, kDmarDiag_Ir_Rfi_Irte_Mode_Invalid,
1959 VTDINTRFAULT_IRTE_PRESENT_RSVD, idDevice, idxIntr, &Irte);
1960 }
1961 else
1962 dmarIntrFaultRecordQualified(pDevIns, enmIrDiag, VTDINTRFAULT_IRTE_PRESENT_RSVD, idDevice, idxIntr,
1963 &Irte);
1964 }
1965 else
1966 dmarIntrFaultRecordQualified(pDevIns, kDmarDiag_Ir_Rfi_Irte_Rsvd, VTDINTRFAULT_IRTE_PRESENT_RSVD,
1967 idDevice, idxIntr, &Irte);
1968 }
1969 else
1970 dmarIntrFaultRecordQualified(pDevIns, kDmarDiag_Ir_Rfi_Irte_Not_Present, VTDINTRFAULT_IRTE_NOT_PRESENT,
1971 idDevice, idxIntr, &Irte);
1972 }
1973 else
1974 dmarIntrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Irte_Read_Failed, VTDINTRFAULT_IRTE_READ_FAILED, idDevice, idxIntr);
1975 }
1976 else
1977 dmarIntrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Intr_Index_Invalid, VTDINTRFAULT_INTR_INDEX_INVALID, idDevice, idxIntr);
1978 }
1979 else
1980 dmarIntrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Rsvd, VTDINTRFAULT_REMAPPABLE_INTR_RSVD, idDevice, 0 /* idxIntr */);
1981 return VERR_IOMMU_INTR_REMAP_DENIED;
1982}
1983
1984
1985/**
1986 * Interrupt remap request from a device.
1987 *
1988 * @returns VBox status code.
1989 * @param pDevIns The IOMMU device instance.
1990 * @param idDevice The device ID (bus, device, function).
1991 * @param pMsiIn The source MSI.
1992 * @param pMsiOut Where to store the remapped MSI.
1993 */
1994static DECLCALLBACK(int) iommuIntelMsiRemap(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
1995{
1996 /* Validate. */
1997 Assert(pDevIns);
1998 Assert(pMsiIn);
1999 Assert(pMsiOut);
2000 RT_NOREF1(idDevice);
2001
2002 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2003 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
2004
2005 /* Lock and read all registers required for interrupt remapping up-front. */
2006 DMAR_LOCK(pDevIns, pThisCC);
2007 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
2008 uint64_t const uIrtaReg = pThis->uIrtaReg;
2009 DMAR_UNLOCK(pDevIns, pThisCC);
2010
2011 /* Check if interrupt remapping is enabled. */
2012 if (uGstsReg & VTD_BF_GSTS_REG_IRES_MASK)
2013 {
2014 bool const fIsRemappable = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_INTR_FMT);
2015 if (!fIsRemappable)
2016 {
2017 /* Handle compatibility format interrupts. */
2018 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemapCfi));
2019
2020 /* If EIME is enabled or CFIs are disabled, block the interrupt. */
2021 if ( (uIrtaReg & VTD_BF_IRTA_REG_EIME_MASK)
2022 || !(uGstsReg & VTD_BF_GSTS_REG_CFIS_MASK))
2023 {
2024 dmarIntrFaultRecord(pDevIns, kDmarDiag_Ir_Cfi_Blocked, VTDINTRFAULT_CFI_BLOCKED, idDevice, 0 /* idxIntr */);
2025 return VERR_IOMMU_INTR_REMAP_DENIED;
2026 }
2027
2028 /* Interrupt isn't subject to remapping, pass-through the interrupt. */
2029 *pMsiOut = *pMsiIn;
2030 return VINF_SUCCESS;
2031 }
2032
2033 /* Handle remappable format interrupts. */
2034 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemapRfi));
2035 return dmarIrRemapIntr(pDevIns, uIrtaReg, idDevice, pMsiIn, pMsiOut);
2036 }
2037
2038 /* Interrupt-remapping isn't enabled, all interrupts are pass-through. */
2039 *pMsiOut = *pMsiIn;
2040 return VINF_SUCCESS;
2041}
2042
2043
2044/**
2045 * @callback_method_impl{FNIOMMMIONEWWRITE}
2046 */
2047static DECLCALLBACK(VBOXSTRICTRC) dmarMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2048{
2049 RT_NOREF1(pvUser);
2050 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
2051
2052 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2053 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioWrite));
2054
2055 uint16_t const offReg = off;
2056 uint16_t const offLast = offReg + cb - 1;
2057 if (DMAR_IS_MMIO_OFF_VALID(offLast))
2058 {
2059 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
2060 DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_WRITE);
2061
2062 uint64_t uPrev = 0;
2063 uint64_t const uRegWritten = cb == 8 ? dmarRegWrite64(pThis, offReg, *(uint64_t *)pv, &uPrev)
2064 : dmarRegWrite32(pThis, offReg, *(uint32_t *)pv, (uint32_t *)&uPrev);
2065 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2066 switch (off)
2067 {
2068 case VTD_MMIO_OFF_GCMD_REG: /* 32-bit */
2069 {
2070 rcStrict = dmarGcmdRegWrite(pDevIns, uRegWritten);
2071 break;
2072 }
2073
2074 case VTD_MMIO_OFF_CCMD_REG: /* 64-bit */
2075 case VTD_MMIO_OFF_CCMD_REG + 4:
2076 {
2077 rcStrict = dmarCcmdRegWrite(pDevIns, offReg, cb, uRegWritten);
2078 break;
2079 }
2080
2081 case VTD_MMIO_OFF_FSTS_REG: /* 32-bit */
2082 {
2083 rcStrict = dmarFstsRegWrite(pDevIns, uRegWritten, uPrev);
2084 break;
2085 }
2086
2087 case VTD_MMIO_OFF_FECTL_REG: /* 32-bit */
2088 {
2089 rcStrict = dmarFectlRegWrite(pDevIns, uRegWritten);
2090 break;
2091 }
2092
2093 case VTD_MMIO_OFF_IQT_REG: /* 64-bit */
2094 /* VTD_MMIO_OFF_IQT_REG + 4: */ /* High 32-bits reserved. */
2095 {
2096 rcStrict = dmarIqtRegWrite(pDevIns, offReg, uRegWritten);
2097 break;
2098 }
2099
2100 case VTD_MMIO_OFF_IQA_REG: /* 64-bit */
2101 /* VTD_MMIO_OFF_IQA_REG + 4: */ /* High 32-bits data. */
2102 {
2103 rcStrict = dmarIqaRegWrite(pDevIns, offReg, uRegWritten);
2104 break;
2105 }
2106
2107 case VTD_MMIO_OFF_ICS_REG: /* 32-bit */
2108 {
2109 rcStrict = dmarIcsRegWrite(pDevIns, uRegWritten);
2110 break;
2111 }
2112
2113 case VTD_MMIO_OFF_IECTL_REG: /* 32-bit */
2114 {
2115 rcStrict = dmarIectlRegWrite(pDevIns, uRegWritten);
2116 break;
2117 }
2118
2119 case DMAR_MMIO_OFF_FRCD_HI_REG: /* 64-bit */
2120 case DMAR_MMIO_OFF_FRCD_HI_REG + 4:
2121 {
2122 rcStrict = dmarFrcdHiRegWrite(pDevIns, offReg, cb, uRegWritten, uPrev);
2123 break;
2124 }
2125 }
2126
2127 DMAR_UNLOCK(pDevIns, pThisCC);
2128 LogFlowFunc(("offReg=%#x uRegWritten=%#RX64 rc=%Rrc\n", offReg, uRegWritten, VBOXSTRICTRC_VAL(rcStrict)));
2129 return rcStrict;
2130 }
2131
2132 return VINF_IOM_MMIO_UNUSED_FF;
2133}
2134
2135
2136/**
2137 * @callback_method_impl{FNIOMMMIONEWREAD}
2138 */
2139static DECLCALLBACK(VBOXSTRICTRC) dmarMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2140{
2141 RT_NOREF1(pvUser);
2142 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
2143
2144 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2145 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioRead));
2146
2147 uint16_t const offReg = off;
2148 uint16_t const offLast = offReg + cb - 1;
2149 if (DMAR_IS_MMIO_OFF_VALID(offLast))
2150 {
2151 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
2152 DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_READ);
2153
2154 if (cb == 8)
2155 {
2156 *(uint64_t *)pv = dmarRegRead64(pThis, offReg);
2157 LogFlowFunc(("offReg=%#x pv=%#RX64\n", offReg, *(uint64_t *)pv));
2158 }
2159 else
2160 {
2161 *(uint32_t *)pv = dmarRegRead32(pThis, offReg);
2162 LogFlowFunc(("offReg=%#x pv=%#RX32\n", offReg, *(uint32_t *)pv));
2163 }
2164
2165 DMAR_UNLOCK(pDevIns, pThisCC);
2166 return VINF_SUCCESS;
2167 }
2168
2169 return VINF_IOM_MMIO_UNUSED_FF;
2170}
2171
2172
2173#ifdef IN_RING3
2174/**
2175 * Process requests in the invalidation queue.
2176 *
2177 * @param pDevIns The IOMMU device instance.
2178 * @param pvRequests The requests to process.
2179 * @param cbRequests The size of all requests (in bytes).
2180 * @param fDw The descriptor width (VTD_IQA_REG_DW_128_BIT or
2181 * VTD_IQA_REG_DW_256_BIT).
2182 * @param fTtm The table translation mode. Must not be VTD_TTM_RSVD.
2183 */
2184static void dmarR3InvQueueProcessRequests(PPDMDEVINS pDevIns, void const *pvRequests, uint32_t cbRequests, uint8_t fDw,
2185 uint8_t fTtm)
2186{
2187#define DMAR_IQE_FAULT_RECORD_RET(a_enmDiag, a_enmIqei) \
2188 do \
2189 { \
2190 dmarIqeFaultRecord(pDevIns, (a_enmDiag), (a_enmIqei)); \
2191 return; \
2192 } while (0)
2193
2194 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2195 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
2196
2197 DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3);
2198 Assert(fTtm != VTD_TTM_RSVD); /* Should've beeen handled by caller. */
2199
2200 /*
2201 * The below check is redundant since we check both TTM and DW for each
2202 * descriptor type we process. However, the error reported by hardware
2203 * may differ hence this is kept commented out but not removed from the code
2204 * if we need to change this in the future.
2205 *
2206 * In our implementation, we would report the descriptor type as invalid,
2207 * while on real hardware it may report descriptor width as invalid.
2208 * The Intel VT-d spec. is not clear which error takes preceedence.
2209 */
2210#if 0
2211 /*
2212 * Verify that 128-bit descriptors are not used when operating in scalable mode.
2213 * We don't check this while software writes IQA_REG but defer it until now because
2214 * RTADDR_REG can be updated lazily (via GCMD_REG.SRTP). The 256-bit descriptor check
2215 * -IS- performed when software writes IQA_REG since it only requires checking against
2216 * immutable hardware features.
2217 */
2218 if ( fTtm != VTD_TTM_SCALABLE_MODE
2219 || fDw != VTD_IQA_REG_DW_128_BIT)
2220 { /* likely */ }
2221 else
2222 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_IqaReg_Dw_128_Invalid, VTDIQEI_INVALID_DESCRIPTOR_WIDTH);
2223#endif
2224
2225 /*
2226 * Process requests in FIFO order.
2227 */
2228 uint8_t const cbDsc = fDw == VTD_IQA_REG_DW_256_BIT ? 32 : 16;
2229 for (uint32_t offDsc = 0; offDsc < cbRequests; offDsc += cbDsc)
2230 {
2231 uint64_t const *puDscQwords = (uint64_t const *)((uintptr_t)pvRequests + offDsc);
2232 uint64_t const uQword0 = puDscQwords[0];
2233 uint64_t const uQword1 = puDscQwords[1];
2234 uint8_t const fDscType = VTD_GENERIC_INV_DSC_GET_TYPE(uQword0);
2235 switch (fDscType)
2236 {
2237 case VTD_INV_WAIT_DSC_TYPE:
2238 {
2239 /* Validate descriptor type. */
2240 if ( fTtm == VTD_TTM_LEGACY_MODE
2241 || fDw == VTD_IQA_REG_DW_256_BIT)
2242 { /* likely */ }
2243 else
2244 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid, VTDIQEI_INVALID_DESCRIPTOR_TYPE);
2245
2246 /* Validate reserved bits. */
2247 uint64_t const fValidMask0 = !(pThis->fExtCapReg & VTD_BF_ECAP_REG_PDS_MASK)
2248 ? VTD_INV_WAIT_DSC_0_VALID_MASK & ~VTD_BF_0_INV_WAIT_DSC_PD_MASK
2249 : VTD_INV_WAIT_DSC_0_VALID_MASK;
2250 if ( !(uQword0 & ~fValidMask0)
2251 && !(uQword1 & ~VTD_INV_WAIT_DSC_1_VALID_MASK))
2252 { /* likely */ }
2253 else
2254 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd, VTDIQEI_RSVD_FIELD_VIOLATION);
2255
2256 if (fDw == VTD_IQA_REG_DW_256_BIT)
2257 {
2258 if ( !puDscQwords[2]
2259 && !puDscQwords[3])
2260 { /* likely */ }
2261 else
2262 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd, VTDIQEI_RSVD_FIELD_VIOLATION);
2263 }
2264
2265 /* Perform status write (this must be done prior to generating the completion interrupt). */
2266 bool const fSw = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_SW);
2267 if (fSw)
2268 {
2269 uint32_t const uStatus = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_STDATA);
2270 RTGCPHYS const GCPhysStatus = uQword1 & VTD_BF_1_INV_WAIT_DSC_STADDR_MASK;
2271 int const rc = PDMDevHlpPhysWrite(pDevIns, GCPhysStatus, (void const*)&uStatus, sizeof(uStatus));
2272 AssertRC(rc);
2273 }
2274
2275 /* Generate invalidation event interrupt. */
2276 bool const fIf = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_IF);
2277 if (fIf)
2278 {
2279 DMAR_LOCK(pDevIns, pThisR3);
2280 dmarR3InvEventRaiseInterrupt(pDevIns);
2281 DMAR_UNLOCK(pDevIns, pThisR3);
2282 }
2283
2284 STAM_COUNTER_INC(&pThis->StatInvWaitDsc);
2285 break;
2286 }
2287
2288 case VTD_CC_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatCcInvDsc); break;
2289 case VTD_IOTLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatIotlbInvDsc); break;
2290 case VTD_DEV_TLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatDevtlbInvDsc); break;
2291 case VTD_IEC_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatIecInvDsc); break;
2292 case VTD_P_IOTLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatPasidIotlbInvDsc); break;
2293 case VTD_PC_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatPasidCacheInvDsc); break;
2294 case VTD_P_DEV_TLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatPasidDevtlbInvDsc); break;
2295 default:
2296 {
2297 /* Stop processing further requests. */
2298 LogFunc(("Invalid descriptor type: %#x\n", fDscType));
2299 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Dsc_Type_Invalid, VTDIQEI_INVALID_DESCRIPTOR_TYPE);
2300 }
2301 }
2302 }
2303#undef DMAR_IQE_FAULT_RECORD_RET
2304}
2305
2306
2307/**
2308 * The invalidation-queue thread.
2309 *
2310 * @returns VBox status code.
2311 * @param pDevIns The IOMMU device instance.
2312 * @param pThread The command thread.
2313 */
2314static DECLCALLBACK(int) dmarR3InvQueueThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
2315{
2316 NOREF(pThread);
2317 LogFlowFunc(("\n"));
2318
2319 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
2320 return VINF_SUCCESS;
2321
2322 /*
2323 * Pre-allocate the maximum size of the invalidation queue allowed by the spec.
2324 * This prevents trashing the heap as well as deal with out-of-memory situations
2325 * up-front while starting the VM. It also simplifies the code from having to
2326 * dynamically grow/shrink the allocation based on how software sizes the queue.
2327 * Guests normally don't alter the queue size all the time, but that's not an
2328 * assumption we can make.
2329 */
2330 uint8_t const cMaxPages = 1 << VTD_BF_IQA_REG_QS_MASK;
2331 size_t const cbMaxQs = cMaxPages << X86_PAGE_SHIFT;
2332 void *pvRequests = RTMemAllocZ(cbMaxQs);
2333 AssertPtrReturn(pvRequests, VERR_NO_MEMORY);
2334
2335 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2336 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
2337
2338 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
2339 {
2340 /*
2341 * Sleep until we are woken up.
2342 */
2343 {
2344 int const rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEvtInvQueue, RT_INDEFINITE_WAIT);
2345 AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc), rc);
2346 if (RT_UNLIKELY(pThread->enmState != PDMTHREADSTATE_RUNNING))
2347 break;
2348 }
2349
2350 DMAR_LOCK(pDevIns, pThisR3);
2351 if (dmarInvQueueCanProcessRequests(pThis))
2352 {
2353 uint32_t offQueueHead;
2354 uint32_t offQueueTail;
2355 bool const fIsEmpty = dmarInvQueueIsEmptyEx(pThis, &offQueueHead, &offQueueTail);
2356 if (!fIsEmpty)
2357 {
2358 /*
2359 * Get the current queue size, descriptor width, queue base address and the
2360 * table translation mode while the lock is still held.
2361 */
2362 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
2363 uint8_t const cQueuePages = 1 << (uIqaReg & VTD_BF_IQA_REG_QS_MASK);
2364 uint32_t const cbQueue = cQueuePages << X86_PAGE_SHIFT;
2365 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
2366 uint8_t const fTtm = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
2367 RTGCPHYS const GCPhysRequests = (uIqaReg & VTD_BF_IQA_REG_IQA_MASK) + offQueueHead;
2368
2369 /* Paranoia. */
2370 Assert(cbQueue <= cbMaxQs);
2371 Assert(!(offQueueTail & ~VTD_BF_IQT_REG_QT_MASK));
2372 Assert(!(offQueueHead & ~VTD_BF_IQH_REG_QH_MASK));
2373 Assert(fDw != VTD_IQA_REG_DW_256_BIT || !(offQueueTail & RT_BIT(4)));
2374 Assert(fDw != VTD_IQA_REG_DW_256_BIT || !(offQueueHead & RT_BIT(4)));
2375 Assert(offQueueHead < cbQueue);
2376
2377 /*
2378 * A table translation mode of "reserved" isn't valid for any descriptor type.
2379 * However, RTADDR_REG can be modified in parallel to invalidation-queue processing,
2380 * but if ESRTPS is support, we will perform a global invalidation when software
2381 * changes RTADDR_REG, or it's the responsibility of software to do it explicitly.
2382 * So caching TTM while reading all descriptors should not be a problem.
2383 *
2384 * Also, validate the queue tail offset as it's mutable by software.
2385 */
2386 if ( fTtm != VTD_TTM_RSVD
2387 && offQueueTail < cbQueue)
2388 {
2389 /* Don't hold the lock while reading (a potentially large amount of) requests */
2390 DMAR_UNLOCK(pDevIns, pThisR3);
2391
2392 int rc;
2393 uint32_t cbRequests;
2394 if (offQueueTail > offQueueHead)
2395 {
2396 /* The requests have not wrapped around, read them in one go. */
2397 cbRequests = offQueueTail - offQueueHead;
2398 rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysRequests, pvRequests, cbRequests);
2399 }
2400 else
2401 {
2402 /* The requests have wrapped around, read forward and wrapped-around. */
2403 uint32_t const cbForward = cbQueue - offQueueHead;
2404 rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysRequests, pvRequests, cbForward);
2405
2406 uint32_t const cbWrapped = offQueueTail;
2407 if ( RT_SUCCESS(rc)
2408 && cbWrapped > 0)
2409 {
2410 rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysRequests + cbForward,
2411 (void *)((uintptr_t)pvRequests + cbForward), cbWrapped);
2412 }
2413 cbRequests = cbForward + cbWrapped;
2414 }
2415
2416 /* Re-acquire the lock since we need to update device state. */
2417 DMAR_LOCK(pDevIns, pThisR3);
2418
2419 if (RT_SUCCESS(rc))
2420 {
2421 /* Indicate to software we've fetched all requests. */
2422 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_IQH_REG, offQueueTail);
2423
2424 /* Don't hold the lock while processing requests. */
2425 DMAR_UNLOCK(pDevIns, pThisR3);
2426
2427 /* Process all requests. */
2428 Assert(cbRequests <= cbQueue);
2429 dmarR3InvQueueProcessRequests(pDevIns, pvRequests, cbRequests, fDw, fTtm);
2430
2431 /*
2432 * We've processed all requests and the lock shouldn't be held at this point.
2433 * Using 'continue' here allows us to skip re-acquiring the lock just to release
2434 * it again before going back to the thread loop. It's a bit ugly but it certainly
2435 * helps with performance.
2436 */
2437 DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3);
2438 continue;
2439 }
2440 else
2441 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dsc_Fetch_Error, VTDIQEI_FETCH_DESCRIPTOR_ERR);
2442 }
2443 else
2444 {
2445 if (fTtm == VTD_TTM_RSVD)
2446 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Ttm_Rsvd, VTDIQEI_INVALID_TTM);
2447 else
2448 {
2449 Assert(offQueueTail >= cbQueue);
2450 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_Invalid, VTDIQEI_INVALID_TAIL_PTR);
2451 }
2452 }
2453 }
2454 }
2455 DMAR_UNLOCK(pDevIns, pThisR3);
2456 }
2457
2458 RTMemFree(pvRequests);
2459 pvRequests = NULL;
2460
2461 LogFlowFunc(("Invalidation-queue thread terminating\n"));
2462 return VINF_SUCCESS;
2463}
2464
2465
2466/**
2467 * Wakes up the invalidation-queue thread so it can respond to a state
2468 * change.
2469 *
2470 * @returns VBox status code.
2471 * @param pDevIns The IOMMU device instance.
2472 * @param pThread The invalidation-queue thread.
2473 *
2474 * @thread EMT.
2475 */
2476static DECLCALLBACK(int) dmarR3InvQueueThreadWakeUp(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
2477{
2478 RT_NOREF(pThread);
2479 LogFlowFunc(("\n"));
2480 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2481 return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtInvQueue);
2482}
2483
2484
2485/**
2486 * @callback_method_impl{FNDBGFHANDLERDEV}
2487 */
2488static DECLCALLBACK(void) dmarR3DbgInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
2489{
2490 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2491 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
2492 bool const fVerbose = RTStrCmp(pszArgs, "verbose") == 0;
2493
2494 /*
2495 * We lock the device to get a consistent register state as it is
2496 * ASSUMED pHlp->pfnPrintf is expensive, so we copy the registers (the
2497 * ones we care about here) into temporaries and release the lock ASAP.
2498 *
2499 * Order of register being read and outputted is in accordance with the
2500 * spec. for no particular reason.
2501 * See Intel VT-d spec. 10.4 "Register Descriptions".
2502 */
2503 DMAR_LOCK(pDevIns, pThisR3);
2504
2505 DMARDIAG const enmDiag = pThis->enmDiag;
2506 uint32_t const uVerReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_VER_REG);
2507 uint64_t const uCapReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_CAP_REG);
2508 uint64_t const uEcapReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_ECAP_REG);
2509 uint32_t const uGcmdReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GCMD_REG);
2510 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
2511 uint64_t const uRtaddrReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_RTADDR_REG);
2512 uint64_t const uCcmdReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_CCMD_REG);
2513 uint32_t const uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
2514 uint32_t const uFectlReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FECTL_REG);
2515 uint32_t const uFedataReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FEDATA_REG);
2516 uint32_t const uFeaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FEADDR_REG);
2517 uint32_t const uFeuaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FEUADDR_REG);
2518 uint64_t const uAflogReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_AFLOG_REG);
2519 uint32_t const uPmenReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PMEN_REG);
2520 uint32_t const uPlmbaseReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PLMBASE_REG);
2521 uint32_t const uPlmlimitReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PLMLIMIT_REG);
2522 uint64_t const uPhmbaseReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PHMBASE_REG);
2523 uint64_t const uPhmlimitReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PHMLIMIT_REG);
2524 uint64_t const uIqhReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQH_REG);
2525 uint64_t const uIqtReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQT_REG);
2526 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
2527 uint32_t const uIcsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_ICS_REG);
2528 uint32_t const uIectlReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IECTL_REG);
2529 uint32_t const uIedataReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEDATA_REG);
2530 uint32_t const uIeaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEADDR_REG);
2531 uint32_t const uIeuaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEUADDR_REG);
2532 uint64_t const uIqercdReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQERCD_REG);
2533 uint64_t const uIrtaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IRTA_REG);
2534 uint64_t const uPqhReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PQH_REG);
2535 uint64_t const uPqtReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PQT_REG);
2536 uint64_t const uPqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PQA_REG);
2537 uint32_t const uPrsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PRS_REG);
2538 uint32_t const uPectlReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PECTL_REG);
2539 uint32_t const uPedataReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PEDATA_REG);
2540 uint32_t const uPeaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PEADDR_REG);
2541 uint32_t const uPeuaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PEUADDR_REG);
2542 uint64_t const uMtrrcapReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_MTRRCAP_REG);
2543 uint64_t const uMtrrdefReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_MTRRDEF_REG);
2544
2545 DMAR_UNLOCK(pDevIns, pThisR3);
2546
2547 const char *const pszDiag = enmDiag < RT_ELEMENTS(g_apszDmarDiagDesc) ? g_apszDmarDiagDesc[enmDiag] : "(Unknown)";
2548 pHlp->pfnPrintf(pHlp, "Intel-IOMMU:\n");
2549 pHlp->pfnPrintf(pHlp, " Diag = %s\n", pszDiag);
2550
2551 /*
2552 * Non-verbose output.
2553 */
2554 if (!fVerbose)
2555 {
2556 pHlp->pfnPrintf(pHlp, " VER_REG = %#RX32\n", uVerReg);
2557 pHlp->pfnPrintf(pHlp, " CAP_REG = %#RX64\n", uCapReg);
2558 pHlp->pfnPrintf(pHlp, " ECAP_REG = %#RX64\n", uEcapReg);
2559 pHlp->pfnPrintf(pHlp, " GCMD_REG = %#RX32\n", uGcmdReg);
2560 pHlp->pfnPrintf(pHlp, " GSTS_REG = %#RX32\n", uGstsReg);
2561 pHlp->pfnPrintf(pHlp, " RTADDR_REG = %#RX64\n", uRtaddrReg);
2562 pHlp->pfnPrintf(pHlp, " CCMD_REG = %#RX64\n", uCcmdReg);
2563 pHlp->pfnPrintf(pHlp, " FSTS_REG = %#RX32\n", uFstsReg);
2564 pHlp->pfnPrintf(pHlp, " FECTL_REG = %#RX32\n", uFectlReg);
2565 pHlp->pfnPrintf(pHlp, " FEDATA_REG = %#RX32\n", uFedataReg);
2566 pHlp->pfnPrintf(pHlp, " FEADDR_REG = %#RX32\n", uFeaddrReg);
2567 pHlp->pfnPrintf(pHlp, " FEUADDR_REG = %#RX32\n", uFeuaddrReg);
2568 pHlp->pfnPrintf(pHlp, " AFLOG_REG = %#RX64\n", uAflogReg);
2569 pHlp->pfnPrintf(pHlp, " PMEN_REG = %#RX32\n", uPmenReg);
2570 pHlp->pfnPrintf(pHlp, " PLMBASE_REG = %#RX32\n", uPlmbaseReg);
2571 pHlp->pfnPrintf(pHlp, " PLMLIMIT_REG = %#RX32\n", uPlmlimitReg);
2572 pHlp->pfnPrintf(pHlp, " PHMBASE_REG = %#RX64\n", uPhmbaseReg);
2573 pHlp->pfnPrintf(pHlp, " PHMLIMIT_REG = %#RX64\n", uPhmlimitReg);
2574 pHlp->pfnPrintf(pHlp, " IQH_REG = %#RX64\n", uIqhReg);
2575 pHlp->pfnPrintf(pHlp, " IQT_REG = %#RX64\n", uIqtReg);
2576 pHlp->pfnPrintf(pHlp, " IQA_REG = %#RX64\n", uIqaReg);
2577 pHlp->pfnPrintf(pHlp, " ICS_REG = %#RX32\n", uIcsReg);
2578 pHlp->pfnPrintf(pHlp, " IECTL_REG = %#RX32\n", uIectlReg);
2579 pHlp->pfnPrintf(pHlp, " IEDATA_REG = %#RX32\n", uIedataReg);
2580 pHlp->pfnPrintf(pHlp, " IEADDR_REG = %#RX32\n", uIeaddrReg);
2581 pHlp->pfnPrintf(pHlp, " IEUADDR_REG = %#RX32\n", uIeuaddrReg);
2582 pHlp->pfnPrintf(pHlp, " IQERCD_REG = %#RX64\n", uIqercdReg);
2583 pHlp->pfnPrintf(pHlp, " IRTA_REG = %#RX64\n", uIrtaReg);
2584 pHlp->pfnPrintf(pHlp, " PQH_REG = %#RX64\n", uPqhReg);
2585 pHlp->pfnPrintf(pHlp, " PQT_REG = %#RX64\n", uPqtReg);
2586 pHlp->pfnPrintf(pHlp, " PQA_REG = %#RX64\n", uPqaReg);
2587 pHlp->pfnPrintf(pHlp, " PRS_REG = %#RX32\n", uPrsReg);
2588 pHlp->pfnPrintf(pHlp, " PECTL_REG = %#RX32\n", uPectlReg);
2589 pHlp->pfnPrintf(pHlp, " PEDATA_REG = %#RX32\n", uPedataReg);
2590 pHlp->pfnPrintf(pHlp, " PEADDR_REG = %#RX32\n", uPeaddrReg);
2591 pHlp->pfnPrintf(pHlp, " PEUADDR_REG = %#RX32\n", uPeuaddrReg);
2592 pHlp->pfnPrintf(pHlp, " MTRRCAP_REG = %#RX64\n", uMtrrcapReg);
2593 pHlp->pfnPrintf(pHlp, " MTRRDEF_REG = %#RX64\n", uMtrrdefReg);
2594 pHlp->pfnPrintf(pHlp, "\n");
2595 return;
2596 }
2597
2598 /*
2599 * Verbose output.
2600 */
2601 pHlp->pfnPrintf(pHlp, " VER_REG = %#RX32\n", uVerReg);
2602 {
2603 pHlp->pfnPrintf(pHlp, " MAJ = %#x\n", RT_BF_GET(uVerReg, VTD_BF_VER_REG_MAX));
2604 pHlp->pfnPrintf(pHlp, " MIN = %#x\n", RT_BF_GET(uVerReg, VTD_BF_VER_REG_MIN));
2605 }
2606 pHlp->pfnPrintf(pHlp, " CAP_REG = %#RX64\n", uCapReg);
2607 {
2608 uint8_t const uSagaw = RT_BF_GET(uCapReg, VTD_BF_CAP_REG_SAGAW);
2609 uint8_t const uMgaw = RT_BF_GET(uCapReg, VTD_BF_CAP_REG_MGAW);
2610 uint8_t const uNfr = RT_BF_GET(uCapReg, VTD_BF_CAP_REG_NFR);
2611 pHlp->pfnPrintf(pHlp, " ND = %u\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ND));
2612 pHlp->pfnPrintf(pHlp, " AFL = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_AFL));
2613 pHlp->pfnPrintf(pHlp, " RWBF = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_RWBF));
2614 pHlp->pfnPrintf(pHlp, " PLMR = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PLMR));
2615 pHlp->pfnPrintf(pHlp, " PHMR = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PHMR));
2616 pHlp->pfnPrintf(pHlp, " CM = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_CM));
2617 pHlp->pfnPrintf(pHlp, " SAGAW = %#x (%u bits)\n", uSagaw, vtdCapRegGetSagawBits(uSagaw));
2618 pHlp->pfnPrintf(pHlp, " MGAW = %#x (%u bits)\n", uMgaw, uMgaw + 1);
2619 pHlp->pfnPrintf(pHlp, " ZLR = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ZLR));
2620 pHlp->pfnPrintf(pHlp, " FRO = %#x bytes\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_FRO));
2621 pHlp->pfnPrintf(pHlp, " SLLPS = %#x\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_SLLPS));
2622 pHlp->pfnPrintf(pHlp, " PSI = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PSI));
2623 pHlp->pfnPrintf(pHlp, " NFR = %u (%u FRCD register%s)\n", uNfr, uNfr + 1, uNfr > 0 ? "s" : "");
2624 pHlp->pfnPrintf(pHlp, " MAMV = %#x\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_MAMV));
2625 pHlp->pfnPrintf(pHlp, " DWD = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_DWD));
2626 pHlp->pfnPrintf(pHlp, " DRD = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_DRD));
2627 pHlp->pfnPrintf(pHlp, " FL1GP = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_FL1GP));
2628 pHlp->pfnPrintf(pHlp, " PI = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PI));
2629 pHlp->pfnPrintf(pHlp, " FL5LP = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_FL5LP));
2630 pHlp->pfnPrintf(pHlp, " ESIRTPS = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ESIRTPS));
2631 pHlp->pfnPrintf(pHlp, " ESRTPS = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ESRTPS));
2632 }
2633 pHlp->pfnPrintf(pHlp, " ECAP_REG = %#RX64\n", uEcapReg);
2634 {
2635 uint8_t const uPss = RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PSS);
2636 pHlp->pfnPrintf(pHlp, " C = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_C));
2637 pHlp->pfnPrintf(pHlp, " QI = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_QI));
2638 pHlp->pfnPrintf(pHlp, " DT = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_DT));
2639 pHlp->pfnPrintf(pHlp, " IR = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_IR));
2640 pHlp->pfnPrintf(pHlp, " EIM = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_EIM));
2641 pHlp->pfnPrintf(pHlp, " PT = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PT));
2642 pHlp->pfnPrintf(pHlp, " SC = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SC));
2643 pHlp->pfnPrintf(pHlp, " IRO = %#x bytes\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_IRO));
2644 pHlp->pfnPrintf(pHlp, " MHMV = %#x\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_MHMV));
2645 pHlp->pfnPrintf(pHlp, " MTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_MTS));
2646 pHlp->pfnPrintf(pHlp, " NEST = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_NEST));
2647 pHlp->pfnPrintf(pHlp, " PRS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PRS));
2648 pHlp->pfnPrintf(pHlp, " ERS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_ERS));
2649 pHlp->pfnPrintf(pHlp, " SRS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SRS));
2650 pHlp->pfnPrintf(pHlp, " NWFS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_NWFS));
2651 pHlp->pfnPrintf(pHlp, " EAFS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_EAFS));
2652 pHlp->pfnPrintf(pHlp, " PSS = %u (%u bits)\n", uPss, uPss > 0 ? uPss + 1 : 0);
2653 pHlp->pfnPrintf(pHlp, " PASID = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PASID));
2654 pHlp->pfnPrintf(pHlp, " DIT = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_DIT));
2655 pHlp->pfnPrintf(pHlp, " PDS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PDS));
2656 pHlp->pfnPrintf(pHlp, " SMTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SMTS));
2657 pHlp->pfnPrintf(pHlp, " VCS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_VCS));
2658 pHlp->pfnPrintf(pHlp, " SLADS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SLADS));
2659 pHlp->pfnPrintf(pHlp, " SLTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SLTS));
2660 pHlp->pfnPrintf(pHlp, " FLTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_FLTS));
2661 pHlp->pfnPrintf(pHlp, " SMPWCS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SMPWCS));
2662 pHlp->pfnPrintf(pHlp, " RPS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_RPS));
2663 pHlp->pfnPrintf(pHlp, " ADMS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_ADMS));
2664 pHlp->pfnPrintf(pHlp, " RPRIVS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_RPRIVS));
2665 }
2666 pHlp->pfnPrintf(pHlp, " GCMD_REG = %#RX32\n", uGcmdReg);
2667 {
2668 uint8_t const fCfi = RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_CFI);
2669 pHlp->pfnPrintf(pHlp, " CFI = %u (%s)\n", fCfi, fCfi ? "Passthrough" : "Blocked");
2670 pHlp->pfnPrintf(pHlp, " SIRTP = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SIRTP));
2671 pHlp->pfnPrintf(pHlp, " IRE = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_IRE));
2672 pHlp->pfnPrintf(pHlp, " QIE = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_QIE));
2673 pHlp->pfnPrintf(pHlp, " WBF = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_WBF));
2674 pHlp->pfnPrintf(pHlp, " EAFL = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SFL));
2675 pHlp->pfnPrintf(pHlp, " SFL = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SFL));
2676 pHlp->pfnPrintf(pHlp, " SRTP = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SRTP));
2677 pHlp->pfnPrintf(pHlp, " TE = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_TE));
2678 }
2679 pHlp->pfnPrintf(pHlp, " GSTS_REG = %#RX32\n", uGstsReg);
2680 {
2681 uint8_t const fCfis = RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_CFIS);
2682 pHlp->pfnPrintf(pHlp, " CFIS = %u (%s)\n", fCfis, fCfis ? "Passthrough" : "Blocked");
2683 pHlp->pfnPrintf(pHlp, " IRTPS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_IRTPS));
2684 pHlp->pfnPrintf(pHlp, " IRES = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_IRES));
2685 pHlp->pfnPrintf(pHlp, " QIES = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_QIES));
2686 pHlp->pfnPrintf(pHlp, " WBFS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_WBFS));
2687 pHlp->pfnPrintf(pHlp, " AFLS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_AFLS));
2688 pHlp->pfnPrintf(pHlp, " FLS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_FLS));
2689 pHlp->pfnPrintf(pHlp, " RTPS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_RTPS));
2690 pHlp->pfnPrintf(pHlp, " TES = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_TES));
2691 }
2692 pHlp->pfnPrintf(pHlp, " RTADDR_REG = %#RX64\n", uRtaddrReg);
2693 {
2694 uint8_t const uTtm = RT_BF_GET(uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
2695 pHlp->pfnPrintf(pHlp, " RTA = %#RX64\n", uRtaddrReg & VTD_BF_RTADDR_REG_RTA_MASK);
2696 pHlp->pfnPrintf(pHlp, " TTM = %u (%s)\n", uTtm, vtdRtaddrRegGetTtmDesc(uTtm));
2697 }
2698 pHlp->pfnPrintf(pHlp, " CCMD_REG = %#RX64\n", uCcmdReg);
2699 pHlp->pfnPrintf(pHlp, " FSTS_REG = %#RX32\n", uFstsReg);
2700 {
2701 pHlp->pfnPrintf(pHlp, " PFO = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_PFO));
2702 pHlp->pfnPrintf(pHlp, " PPF = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_PPF));
2703 pHlp->pfnPrintf(pHlp, " AFO = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_AFO));
2704 pHlp->pfnPrintf(pHlp, " APF = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_APF));
2705 pHlp->pfnPrintf(pHlp, " IQE = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_IQE));
2706 pHlp->pfnPrintf(pHlp, " ICS = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_ICE));
2707 pHlp->pfnPrintf(pHlp, " ITE = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_ITE));
2708 pHlp->pfnPrintf(pHlp, " FRI = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_FRI));
2709 }
2710 pHlp->pfnPrintf(pHlp, " FECTL_REG = %#RX32\n", uFectlReg);
2711 {
2712 pHlp->pfnPrintf(pHlp, " IM = %RTbool\n", RT_BF_GET(uFectlReg, VTD_BF_FECTL_REG_IM));
2713 pHlp->pfnPrintf(pHlp, " IP = %RTbool\n", RT_BF_GET(uFectlReg, VTD_BF_FECTL_REG_IP));
2714 }
2715 pHlp->pfnPrintf(pHlp, " FEDATA_REG = %#RX32\n", uFedataReg);
2716 pHlp->pfnPrintf(pHlp, " FEADDR_REG = %#RX32\n", uFeaddrReg);
2717 pHlp->pfnPrintf(pHlp, " FEUADDR_REG = %#RX32\n", uFeuaddrReg);
2718 pHlp->pfnPrintf(pHlp, " AFLOG_REG = %#RX64\n", uAflogReg);
2719 pHlp->pfnPrintf(pHlp, " PMEN_REG = %#RX32\n", uPmenReg);
2720 pHlp->pfnPrintf(pHlp, " PLMBASE_REG = %#RX32\n", uPlmbaseReg);
2721 pHlp->pfnPrintf(pHlp, " PLMLIMIT_REG = %#RX32\n", uPlmlimitReg);
2722 pHlp->pfnPrintf(pHlp, " PHMBASE_REG = %#RX64\n", uPhmbaseReg);
2723 pHlp->pfnPrintf(pHlp, " PHMLIMIT_REG = %#RX64\n", uPhmlimitReg);
2724 pHlp->pfnPrintf(pHlp, " IQH_REG = %#RX64\n", uIqhReg);
2725 pHlp->pfnPrintf(pHlp, " IQT_REG = %#RX64\n", uIqtReg);
2726 pHlp->pfnPrintf(pHlp, " IQA_REG = %#RX64\n", uIqaReg);
2727 {
2728 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
2729 uint8_t const fQs = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_QS);
2730 uint8_t const cQueuePages = 1 << fQs;
2731 pHlp->pfnPrintf(pHlp, " DW = %u (%s)\n", fDw, fDw == VTD_IQA_REG_DW_128_BIT ? "128-bit" : "256-bit");
2732 pHlp->pfnPrintf(pHlp, " QS = %u (%u page%s)\n", fQs, cQueuePages, cQueuePages > 1 ? "s" : "");
2733 }
2734 pHlp->pfnPrintf(pHlp, " ICS_REG = %#RX32\n", uIcsReg);
2735 {
2736 pHlp->pfnPrintf(pHlp, " IWC = %u\n", RT_BF_GET(uIcsReg, VTD_BF_ICS_REG_IWC));
2737 }
2738 pHlp->pfnPrintf(pHlp, " IECTL_REG = %#RX32\n", uIectlReg);
2739 {
2740 pHlp->pfnPrintf(pHlp, " IM = %RTbool\n", RT_BF_GET(uIectlReg, VTD_BF_IECTL_REG_IM));
2741 pHlp->pfnPrintf(pHlp, " IP = %RTbool\n", RT_BF_GET(uIectlReg, VTD_BF_IECTL_REG_IP));
2742 }
2743 pHlp->pfnPrintf(pHlp, " IEDATA_REG = %#RX32\n", uIedataReg);
2744 pHlp->pfnPrintf(pHlp, " IEADDR_REG = %#RX32\n", uIeaddrReg);
2745 pHlp->pfnPrintf(pHlp, " IEUADDR_REG = %#RX32\n", uIeuaddrReg);
2746 pHlp->pfnPrintf(pHlp, " IQERCD_REG = %#RX64\n", uIqercdReg);
2747 {
2748 pHlp->pfnPrintf(pHlp, " ICESID = %#RX32\n", RT_BF_GET(uIqercdReg, VTD_BF_IQERCD_REG_ICESID));
2749 pHlp->pfnPrintf(pHlp, " ITESID = %#RX32\n", RT_BF_GET(uIqercdReg, VTD_BF_IQERCD_REG_ITESID));
2750 pHlp->pfnPrintf(pHlp, " IQEI = %#RX32\n", RT_BF_GET(uIqercdReg, VTD_BF_IQERCD_REG_IQEI));
2751 }
2752 pHlp->pfnPrintf(pHlp, " IRTA_REG = %#RX64\n", uIrtaReg);
2753 {
2754 uint32_t const cIrtEntries = VTD_IRTA_REG_GET_ENTRY_COUNT(uIrtaReg);
2755 uint32_t const cbIrt = sizeof(VTD_IRTE_T) * cIrtEntries;
2756 pHlp->pfnPrintf(pHlp, " IRTA = %#RX64\n", uIrtaReg & VTD_BF_IRTA_REG_IRTA_MASK);
2757 pHlp->pfnPrintf(pHlp, " EIME = %RTbool\n", RT_BF_GET(uIrtaReg, VTD_BF_IRTA_REG_EIME));
2758 pHlp->pfnPrintf(pHlp, " S = %u entries (%u bytes)\n", cIrtEntries, cbIrt);
2759 }
2760 pHlp->pfnPrintf(pHlp, " PQH_REG = %#RX64\n", uPqhReg);
2761 pHlp->pfnPrintf(pHlp, " PQT_REG = %#RX64\n", uPqtReg);
2762 pHlp->pfnPrintf(pHlp, " PQA_REG = %#RX64\n", uPqaReg);
2763 pHlp->pfnPrintf(pHlp, " PRS_REG = %#RX32\n", uPrsReg);
2764 pHlp->pfnPrintf(pHlp, " PECTL_REG = %#RX32\n", uPectlReg);
2765 pHlp->pfnPrintf(pHlp, " PEDATA_REG = %#RX32\n", uPedataReg);
2766 pHlp->pfnPrintf(pHlp, " PEADDR_REG = %#RX32\n", uPeaddrReg);
2767 pHlp->pfnPrintf(pHlp, " PEUADDR_REG = %#RX32\n", uPeuaddrReg);
2768 pHlp->pfnPrintf(pHlp, " MTRRCAP_REG = %#RX64\n", uMtrrcapReg);
2769 pHlp->pfnPrintf(pHlp, " MTRRDEF_REG = %#RX64\n", uMtrrdefReg);
2770 pHlp->pfnPrintf(pHlp, "\n");
2771}
2772
2773
2774/**
2775 * Initializes all registers in the DMAR unit.
2776 *
2777 * @param pDevIns The IOMMU device instance.
2778 */
2779static void dmarR3RegsInit(PPDMDEVINS pDevIns)
2780{
2781 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2782
2783 /*
2784 * Wipe all registers (required on reset).
2785 */
2786 RT_ZERO(pThis->abRegs0);
2787 RT_ZERO(pThis->abRegs1);
2788
2789 /*
2790 * Initialize registers not mutable by software prior to initializing other registers.
2791 */
2792 /* VER_REG */
2793 {
2794 pThis->uVerReg = RT_BF_MAKE(VTD_BF_VER_REG_MIN, DMAR_VER_MINOR)
2795 | RT_BF_MAKE(VTD_BF_VER_REG_MAX, DMAR_VER_MAJOR);
2796 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_VER_REG, pThis->uVerReg);
2797 }
2798
2799 uint8_t const fFlts = 1; /* First-Level translation support. */
2800 uint8_t const fSlts = 1; /* Second-Level translation support. */
2801 uint8_t const fPt = 1; /* Pass-Through support. */
2802 uint8_t const fSmts = fFlts & fSlts & fPt; /* Scalable mode translation support.*/
2803 uint8_t const fNest = 0; /* Nested translation support. */
2804
2805 /* CAP_REG */
2806 {
2807 uint8_t cGstPhysAddrBits;
2808 uint8_t cGstLinearAddrBits;
2809 PDMDevHlpCpuGetGuestAddrWidths(pDevIns, &cGstPhysAddrBits, &cGstLinearAddrBits);
2810
2811 uint8_t const fFl1gp = 1; /* First-Level 1GB pages support. */
2812 uint8_t const fFl5lp = 1; /* First-level 5-level paging support (PML5E). */
2813 uint8_t const fSl2mp = fSlts & 1; /* Second-Level 2MB pages support. */
2814 uint8_t const fSl2gp = fSlts & 1; /* Second-Level 1GB pages support. */
2815 uint8_t const fSllps = fSl2mp /* Second-Level large page Support. */
2816 | ((fSl2mp & fFl1gp) & RT_BIT(1));
2817 uint8_t const fMamv = (fSl2gp ? X86_PAGE_1G_SHIFT /* Maximum address mask value (for 2nd-level invalidations). */
2818 : X86_PAGE_2M_SHIFT)
2819 - X86_PAGE_4K_SHIFT;
2820 uint8_t const fNd = 2; /* Number of domains supported (0=16, 1=64, 2=256, 3=1K, 4=4K,
2821 5=16K, 6=64K, 7=Reserved). */
2822 uint8_t const fPsi = 1; /* Page selective invalidation. */
2823 uint8_t const uMgaw = cGstPhysAddrBits - 1; /* Maximum guest address width. */
2824 uint8_t const uSagaw = vtdCapRegGetSagaw(uMgaw); /* Supported adjust guest address width. */
2825 uint16_t const offFro = DMAR_MMIO_OFF_FRCD_LO_REG >> 4; /* MMIO offset of FRCD registers. */
2826 uint8_t const fEsrtps = 1; /* Enhanced SRTPS (auto invalidate cache on SRTP). */
2827 uint8_t const fEsirtps = 1; /* Enhanced SIRTPS (auto invalidate cache on SIRTP). */
2828
2829 pThis->fCapReg = RT_BF_MAKE(VTD_BF_CAP_REG_ND, fNd)
2830 | RT_BF_MAKE(VTD_BF_CAP_REG_AFL, 0) /* Advanced fault logging not supported. */
2831 | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF, 0) /* Software need not flush write-buffers. */
2832 | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR, 0) /* Protected Low-Memory Region not supported. */
2833 | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR, 0) /* Protected High-Memory Region not supported. */
2834 | RT_BF_MAKE(VTD_BF_CAP_REG_CM, 1) /* Software should invalidate on mapping structure changes. */
2835 | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW, fSlts & uSagaw)
2836 | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW, uMgaw)
2837 | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR, 1) /** @todo Figure out if/how to support zero-length reads. */
2838 | RT_BF_MAKE(VTD_BF_CAP_REG_FRO, offFro)
2839 | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS, fSlts & fSllps)
2840 | RT_BF_MAKE(VTD_BF_CAP_REG_PSI, fPsi)
2841 | RT_BF_MAKE(VTD_BF_CAP_REG_NFR, DMAR_FRCD_REG_COUNT - 1)
2842 | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV, fPsi & fMamv)
2843 | RT_BF_MAKE(VTD_BF_CAP_REG_DWD, 1)
2844 | RT_BF_MAKE(VTD_BF_CAP_REG_DRD, 1)
2845 | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP, fFlts & fFl1gp)
2846 | RT_BF_MAKE(VTD_BF_CAP_REG_PI, 0) /* Posted Interrupts not supported. */
2847 | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP, fFlts & fFl5lp)
2848 | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, fEsirtps)
2849 | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS, fEsrtps);
2850 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCapReg);
2851 }
2852
2853 /* ECAP_REG */
2854 {
2855 uint8_t const fQi = 1; /* Queued-invalidations. */
2856 uint8_t const fIr = !!(DMAR_ACPI_DMAR_FLAGS & ACPI_DMAR_F_INTR_REMAP); /* Interrupt remapping support. */
2857 uint8_t const fMhmv = 0xf; /* Maximum handle mask value. */
2858 uint16_t const offIro = DMAR_MMIO_OFF_IVA_REG >> 4; /* MMIO offset of IOTLB registers. */
2859 uint8_t const fEim = 1; /* Extended interrupt mode.*/
2860 uint8_t const fAdms = 1; /* Abort DMA mode support. */
2861
2862 pThis->fExtCapReg = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */
2863 | RT_BF_MAKE(VTD_BF_ECAP_REG_QI, fQi)
2864 | RT_BF_MAKE(VTD_BF_ECAP_REG_DT, 0) /* Device-TLBs not supported. */
2865 | RT_BF_MAKE(VTD_BF_ECAP_REG_IR, fQi & fIr)
2866 | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM, fIr & fEim)
2867 | RT_BF_MAKE(VTD_BF_ECAP_REG_PT, fPt)
2868 | RT_BF_MAKE(VTD_BF_ECAP_REG_SC, 0) /* Snoop control not supported. */
2869 | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO, offIro)
2870 | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV, fIr & fMhmv)
2871 | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS, 0) /* Memory type not supported. */
2872 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest)
2873 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */
2874 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, 0) /* Execute request not supported. */
2875 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, 0) /* Supervisor request not supported. */
2876 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */
2877 | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS, 0) /** @todo figure out if EAFS is required? */
2878 | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS, 0) /* 0 as PASID not supported. */
2879 | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID, 0) /* PASID support. */
2880 | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT, 0) /* 0 as DT not supported. */
2881 | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS, 0) /* 0 as DT not supported. */
2882 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS, fSmts)
2883 | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS, 0) /* 0 as PASID not supported (commands seem PASID specific). */
2884 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS, 0) /* Second-level accessed/dirty not supported. */
2885 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS, fSlts)
2886 | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS, fFlts)
2887 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0) /* 0 as PASID not supported. */
2888 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS, 0) /* We don't support RID_PASID field in SM context entry. */
2889 | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS, fAdms)
2890 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /* 0 as SRS not supported. */
2891 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCapReg);
2892 }
2893
2894 /*
2895 * Initialize registers mutable by software.
2896 */
2897 /* FECTL_REG */
2898 {
2899 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_FECTL_REG_IM, 1);
2900 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, uCtl);
2901 }
2902
2903 /* ICETL_REG */
2904 {
2905 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_IECTL_REG_IM, 1);
2906 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_IECTL_REG, uCtl);
2907 }
2908
2909#ifdef VBOX_STRICT
2910 Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_PRS)); /* PECTL_REG - Reserved if don't support PRS. */
2911 Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_MTS)); /* MTRRCAP_REG - Reserved if we don't support MTS. */
2912#endif
2913}
2914
2915
2916/**
2917 * @interface_method_impl{PDMDEVREG,pfnReset}
2918 */
2919static DECLCALLBACK(void) iommuIntelR3Reset(PPDMDEVINS pDevIns)
2920{
2921 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
2922 LogFlowFunc(("\n"));
2923
2924 DMAR_LOCK(pDevIns, pThisR3);
2925 dmarR3RegsInit(pDevIns);
2926 DMAR_UNLOCK(pDevIns, pThisR3);
2927}
2928
2929
2930/**
2931 * @interface_method_impl{PDMDEVREG,pfnDestruct}
2932 */
2933static DECLCALLBACK(int) iommuIntelR3Destruct(PPDMDEVINS pDevIns)
2934{
2935 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2936 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
2937 LogFlowFunc(("\n"));
2938
2939 DMAR_LOCK(pDevIns, pThisR3);
2940
2941 if (pThis->hEvtInvQueue != NIL_SUPSEMEVENT)
2942 {
2943 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEvtInvQueue);
2944 pThis->hEvtInvQueue = NIL_SUPSEMEVENT;
2945 }
2946
2947 DMAR_UNLOCK(pDevIns, pThisR3);
2948 return VINF_SUCCESS;
2949}
2950
2951
2952/**
2953 * @interface_method_impl{PDMDEVREG,pfnConstruct}
2954 */
2955static DECLCALLBACK(int) iommuIntelR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
2956{
2957 RT_NOREF(pCfg);
2958
2959 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2960 PDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PDMARR3);
2961 pThisR3->pDevInsR3 = pDevIns;
2962
2963 LogFlowFunc(("iInstance=%d\n", iInstance));
2964 NOREF(iInstance);
2965
2966 /*
2967 * Register the IOMMU with PDM.
2968 */
2969 PDMIOMMUREGR3 IommuReg;
2970 RT_ZERO(IommuReg);
2971 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
2972 IommuReg.pfnMemAccess = iommuIntelMemAccess;
2973 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
2974 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
2975 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
2976 int rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
2977 if (RT_FAILURE(rc))
2978 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
2979 if (pThisR3->CTX_SUFF(pIommuHlp)->u32Version != PDM_IOMMUHLPR3_VERSION)
2980 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
2981 N_("IOMMU helper version mismatch; got %#x expected %#x"),
2982 pThisR3->CTX_SUFF(pIommuHlp)->u32Version, PDM_IOMMUHLPR3_VERSION);
2983 if (pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd != PDM_IOMMUHLPR3_VERSION)
2984 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
2985 N_("IOMMU helper end-version mismatch; got %#x expected %#x"),
2986 pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd, PDM_IOMMUHLPR3_VERSION);
2987 AssertPtr(pThisR3->pIommuHlpR3->pfnLock);
2988 AssertPtr(pThisR3->pIommuHlpR3->pfnUnlock);
2989 AssertPtr(pThisR3->pIommuHlpR3->pfnLockIsOwner);
2990 AssertPtr(pThisR3->pIommuHlpR3->pfnSendMsi);
2991
2992 /*
2993 * Use PDM's critical section (via helpers) for the IOMMU device.
2994 */
2995 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
2996 AssertRCReturn(rc, rc);
2997
2998 /*
2999 * Initialize PCI configuration registers.
3000 */
3001 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3002 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3003
3004 /* Header. */
3005 PDMPciDevSetVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
3006 PDMPciDevSetDeviceId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
3007 PDMPciDevSetRevisionId(pPciDev, DMAR_PCI_REVISION_ID); /* VirtualBox specific device implementation revision */
3008 PDMPciDevSetClassBase(pPciDev, VBOX_PCI_CLASS_SYSTEM); /* System Base Peripheral */
3009 PDMPciDevSetClassSub(pPciDev, VBOX_PCI_SUB_SYSTEM_OTHER); /* Other */
3010 PDMPciDevSetHeaderType(pPciDev, 0); /* Single function, type 0 */
3011 PDMPciDevSetSubSystemId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
3012 PDMPciDevSetSubSystemVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
3013
3014 /** @todo Chipset spec says PCI Express Capability Id. Relevant for us? */
3015 PDMPciDevSetStatus(pPciDev, 0);
3016 PDMPciDevSetCapabilityList(pPciDev, 0);
3017
3018 /** @todo VTBAR at 0x180? */
3019
3020 /*
3021 * Register the PCI function with PDM.
3022 */
3023 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
3024 AssertLogRelRCReturn(rc, rc);
3025
3026 /** @todo Register MSI but what's the MSI capability offset? */
3027#if 0
3028 /*
3029 * Register MSI support for the PCI device.
3030 * This must be done -after- registering it as a PCI device!
3031 */
3032#endif
3033
3034 /*
3035 * Register MMIO region.
3036 */
3037 AssertCompile(!(DMAR_MMIO_BASE_PHYSADDR & X86_PAGE_4K_OFFSET_MASK));
3038 rc = PDMDevHlpMmioCreateAndMap(pDevIns, DMAR_MMIO_BASE_PHYSADDR, DMAR_MMIO_SIZE, dmarMmioWrite, dmarMmioRead,
3039 IOMMMIO_FLAGS_READ_DWORD_QWORD | IOMMMIO_FLAGS_WRITE_DWORD_QWORD_ZEROED, "Intel-IOMMU",
3040 &pThis->hMmio);
3041 AssertLogRelRCReturn(rc, rc);
3042
3043 /*
3044 * Register debugger info items.
3045 */
3046 rc = PDMDevHlpDBGFInfoRegister(pDevIns, "iommu", "Display IOMMU state.", dmarR3DbgInfo);
3047 AssertLogRelRCReturn(rc, rc);
3048
3049#ifdef VBOX_WITH_STATISTICS
3050 /*
3051 * Statistics.
3052 */
3053 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadR3, STAMTYPE_COUNTER, "R3/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in R3");
3054 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadRZ, STAMTYPE_COUNTER, "RZ/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in RZ.");
3055
3056 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteR3, STAMTYPE_COUNTER, "R3/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in R3.");
3057 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteRZ, STAMTYPE_COUNTER, "RZ/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in RZ.");
3058
3059 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapCfiR3, STAMTYPE_COUNTER, "R3/MsiRemapCfi", STAMUNIT_OCCURENCES, "Number of compatibility-format interrupt remap requests in R3.");
3060 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapCfiRZ, STAMTYPE_COUNTER, "RZ/MsiRemapCfi", STAMUNIT_OCCURENCES, "Number of compatibility-format interrupt remap requests in RZ.");
3061 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRfiR3, STAMTYPE_COUNTER, "R3/MsiRemapRfi", STAMUNIT_OCCURENCES, "Number of remappable-format interrupt remap requests in R3.");
3062 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRfiRZ, STAMTYPE_COUNTER, "RZ/MsiRemapRfi", STAMUNIT_OCCURENCES, "Number of remappable-format interrupt remap requests in RZ.");
3063
3064 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadR3, STAMTYPE_COUNTER, "R3/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in R3.");
3065 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadRZ, STAMTYPE_COUNTER, "RZ/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in RZ.");
3066
3067 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteR3, STAMTYPE_COUNTER, "R3/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in R3.");
3068 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteRZ, STAMTYPE_COUNTER, "RZ/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in RZ.");
3069
3070 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadR3, STAMTYPE_COUNTER, "R3/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in R3.");
3071 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadRZ, STAMTYPE_COUNTER, "RZ/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in RZ.");
3072
3073 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteR3, STAMTYPE_COUNTER, "R3/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in R3.");
3074 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteRZ, STAMTYPE_COUNTER, "RZ/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in RZ.");
3075
3076 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCcInvDsc, STAMTYPE_COUNTER, "R3/QI/CcInv", STAMUNIT_OCCURENCES, "Number of cc_inv_dsc processed.");
3077 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbInvDsc, STAMTYPE_COUNTER, "R3/QI/IotlbInv", STAMUNIT_OCCURENCES, "Number of iotlb_inv_dsc processed.");
3078 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatDevtlbInvDsc, STAMTYPE_COUNTER, "R3/QI/DevtlbInv", STAMUNIT_OCCURENCES, "Number of dev_tlb_inv_dsc processed.");
3079 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIecInvDsc, STAMTYPE_COUNTER, "R3/QI/IecInv", STAMUNIT_OCCURENCES, "Number of iec_inv processed.");
3080 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatInvWaitDsc, STAMTYPE_COUNTER, "R3/QI/InvWait", STAMUNIT_OCCURENCES, "Number of inv_wait_dsc processed.");
3081 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPasidIotlbInvDsc, STAMTYPE_COUNTER, "R3/QI/PasidIotlbInv", STAMUNIT_OCCURENCES, "Number of p_iotlb_inv_dsc processed.");
3082 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPasidCacheInvDsc, STAMTYPE_COUNTER, "R3/QI/PasidCacheInv", STAMUNIT_OCCURENCES, "Number of pc_inv_dsc pprocessed.");
3083 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPasidDevtlbInvDsc, STAMTYPE_COUNTER, "R3/QI/PasidDevtlbInv", STAMUNIT_OCCURENCES, "Number of p_dev_tlb_inv_dsc processed.");
3084#endif
3085
3086 /*
3087 * Initialize registers.
3088 */
3089 dmarR3RegsInit(pDevIns);
3090
3091 /*
3092 * Create invalidation-queue thread and semaphore.
3093 */
3094 char szInvQueueThread[32];
3095 RT_ZERO(szInvQueueThread);
3096 RTStrPrintf(szInvQueueThread, sizeof(szInvQueueThread), "IOMMU-QI-%u", iInstance);
3097 rc = PDMDevHlpThreadCreate(pDevIns, &pThisR3->pInvQueueThread, pThis, dmarR3InvQueueThread, dmarR3InvQueueThreadWakeUp,
3098 0 /* cbStack */, RTTHREADTYPE_IO, szInvQueueThread);
3099 AssertLogRelRCReturn(rc, rc);
3100
3101 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEvtInvQueue);
3102 AssertLogRelRCReturn(rc, rc);
3103
3104 /*
3105 * Log some of the features exposed to software.
3106 */
3107 uint32_t const uVerReg = pThis->uVerReg;
3108 uint8_t const cMaxGstAddrBits = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_MGAW) + 1;
3109 uint8_t const cSupGstAddrBits = vtdCapRegGetSagawBits(RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SAGAW));
3110 uint16_t const offFrcd = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_FRO);
3111 uint16_t const offIva = RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_IRO);
3112 LogRel(("%s: VER=%u.%u CAP=%#RX64 ECAP=%#RX64 (MGAW=%u bits, SAGAW=%u bits, FRO=%#x, IRO=%#x) mapped at %#RGp\n",
3113 DMAR_LOG_PFX, RT_BF_GET(uVerReg, VTD_BF_VER_REG_MAX), RT_BF_GET(uVerReg, VTD_BF_VER_REG_MIN),
3114 pThis->fCapReg, pThis->fExtCapReg, cMaxGstAddrBits, cSupGstAddrBits, offFrcd, offIva, DMAR_MMIO_BASE_PHYSADDR));
3115
3116 return VINF_SUCCESS;
3117}
3118
3119#else
3120
3121/**
3122 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
3123 */
3124static DECLCALLBACK(int) iommuIntelRZConstruct(PPDMDEVINS pDevIns)
3125{
3126 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
3127 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3128 PDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PDMARCC);
3129 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
3130
3131 /* We will use PDM's critical section (via helpers) for the IOMMU device. */
3132 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
3133 AssertRCReturn(rc, rc);
3134
3135 /* Set up the MMIO RZ handlers. */
3136 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, dmarMmioWrite, dmarMmioRead, NULL /* pvUser */);
3137 AssertRCReturn(rc, rc);
3138
3139 /* Set up the IOMMU RZ callbacks. */
3140 PDMIOMMUREGCC IommuReg;
3141 RT_ZERO(IommuReg);
3142 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
3143 IommuReg.idxIommu = pThis->idxIommu;
3144 IommuReg.pfnMemAccess = iommuIntelMemAccess;
3145 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
3146 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
3147 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
3148
3149 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp));
3150 AssertRCReturn(rc, rc);
3151 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp), VERR_IOMMU_IPE_1);
3152 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32Version == CTX_MID(PDM_IOMMUHLP,_VERSION), VERR_VERSION_MISMATCH);
3153 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd == CTX_MID(PDM_IOMMUHLP,_VERSION), VERR_VERSION_MISMATCH);
3154 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnLock);
3155 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnUnlock);
3156 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnLockIsOwner);
3157 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi);
3158
3159 return VINF_SUCCESS;
3160}
3161
3162#endif
3163
3164
3165/**
3166 * The device registration structure.
3167 */
3168PDMDEVREG const g_DeviceIommuIntel =
3169{
3170 /* .u32Version = */ PDM_DEVREG_VERSION,
3171 /* .uReserved0 = */ 0,
3172 /* .szName = */ "iommu-intel",
3173 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
3174 /* .fClass = */ PDM_DEVREG_CLASS_PCI_BUILTIN,
3175 /* .cMaxInstances = */ 1,
3176 /* .uSharedVersion = */ 42,
3177 /* .cbInstanceShared = */ sizeof(DMAR),
3178 /* .cbInstanceCC = */ sizeof(DMARCC),
3179 /* .cbInstanceRC = */ sizeof(DMARRC),
3180 /* .cMaxPciDevices = */ 1,
3181 /* .cMaxMsixVectors = */ 0,
3182 /* .pszDescription = */ "IOMMU (Intel)",
3183#if defined(IN_RING3)
3184 /* .pszRCMod = */ "VBoxDDRC.rc",
3185 /* .pszR0Mod = */ "VBoxDDR0.r0",
3186 /* .pfnConstruct = */ iommuIntelR3Construct,
3187 /* .pfnDestruct = */ iommuIntelR3Destruct,
3188 /* .pfnRelocate = */ NULL,
3189 /* .pfnMemSetup = */ NULL,
3190 /* .pfnPowerOn = */ NULL,
3191 /* .pfnReset = */ iommuIntelR3Reset,
3192 /* .pfnSuspend = */ NULL,
3193 /* .pfnResume = */ NULL,
3194 /* .pfnAttach = */ NULL,
3195 /* .pfnDetach = */ NULL,
3196 /* .pfnQueryInterface = */ NULL,
3197 /* .pfnInitComplete = */ NULL,
3198 /* .pfnPowerOff = */ NULL,
3199 /* .pfnSoftReset = */ NULL,
3200 /* .pfnReserved0 = */ NULL,
3201 /* .pfnReserved1 = */ NULL,
3202 /* .pfnReserved2 = */ NULL,
3203 /* .pfnReserved3 = */ NULL,
3204 /* .pfnReserved4 = */ NULL,
3205 /* .pfnReserved5 = */ NULL,
3206 /* .pfnReserved6 = */ NULL,
3207 /* .pfnReserved7 = */ NULL,
3208#elif defined(IN_RING0)
3209 /* .pfnEarlyConstruct = */ NULL,
3210 /* .pfnConstruct = */ iommuIntelRZConstruct,
3211 /* .pfnDestruct = */ NULL,
3212 /* .pfnFinalDestruct = */ NULL,
3213 /* .pfnRequest = */ NULL,
3214 /* .pfnReserved0 = */ NULL,
3215 /* .pfnReserved1 = */ NULL,
3216 /* .pfnReserved2 = */ NULL,
3217 /* .pfnReserved3 = */ NULL,
3218 /* .pfnReserved4 = */ NULL,
3219 /* .pfnReserved5 = */ NULL,
3220 /* .pfnReserved6 = */ NULL,
3221 /* .pfnReserved7 = */ NULL,
3222#elif defined(IN_RC)
3223 /* .pfnConstruct = */ iommuIntelRZConstruct,
3224 /* .pfnReserved0 = */ NULL,
3225 /* .pfnReserved1 = */ NULL,
3226 /* .pfnReserved2 = */ NULL,
3227 /* .pfnReserved3 = */ NULL,
3228 /* .pfnReserved4 = */ NULL,
3229 /* .pfnReserved5 = */ NULL,
3230 /* .pfnReserved6 = */ NULL,
3231 /* .pfnReserved7 = */ NULL,
3232#else
3233# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3234#endif
3235 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3236};
3237
3238#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
3239
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette