VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp@ 90113

Last change on this file since 90113 was 90113, checked in by vboxsync, 4 years ago

Intel IOMMU: bugref:9967 Fix return code when translating DMA addresses to non-contiguous physical regions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 197.8 KB
Line 
1/* $Id: DevIommuIntel.cpp 90113 2021-07-09 10:50:09Z vboxsync $ */
2/** @file
3 * IOMMU - Input/Output Memory Management Unit - Intel implementation.
4 */
5
6/*
7 * Copyright (C) 2021 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_IOMMU
23#include "VBoxDD.h"
24#include "DevIommuIntel.h"
25
26#include <VBox/iommu-intel.h>
27#include <iprt/mem.h>
28#include <iprt/string.h>
29
30
31/*********************************************************************************************************************************
32* Defined Constants And Macros *
33*********************************************************************************************************************************/
34/** Gets the low uint32_t of a uint64_t or something equivalent.
35 *
36 * This is suitable for casting constants outside code (since RT_LO_U32 can't be
37 * used as it asserts for correctness when compiling on certain compilers). */
38#define DMAR_LO_U32(a) (uint32_t)(UINT32_MAX & (a))
39
40/** Gets the high uint32_t of a uint64_t or something equivalent.
41 *
42 * This is suitable for casting constants outside code (since RT_HI_U32 can't be
43 * used as it asserts for correctness when compiling on certain compilers). */
44#define DMAR_HI_U32(a) (uint32_t)((a) >> 32)
45
46/** Asserts MMIO access' offset and size are valid or returns appropriate error
47 * code suitable for returning from MMIO access handlers. */
48#define DMAR_ASSERT_MMIO_ACCESS_RET(a_off, a_cb) \
49 do { \
50 AssertReturn((a_cb) == 4 || (a_cb) == 8, VINF_IOM_MMIO_UNUSED_FF); \
51 AssertReturn(!((a_off) & ((a_cb) - 1)), VINF_IOM_MMIO_UNUSED_FF); \
52 } while (0)
53
54/** Checks if the MMIO offset is valid. */
55#define DMAR_IS_MMIO_OFF_VALID(a_off) ( (a_off) < DMAR_MMIO_GROUP_0_OFF_END \
56 || (a_off) - (uint16_t)DMAR_MMIO_GROUP_1_OFF_FIRST < (uint16_t)DMAR_MMIO_GROUP_1_SIZE)
57
58/** Acquires the DMAR lock but returns with the given busy error code on failure. */
59#define DMAR_LOCK_RET(a_pDevIns, a_pThisCC, a_rcBusy) \
60 do { \
61 if ((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), (a_rcBusy)) == VINF_SUCCESS) \
62 { /* likely */ } \
63 else \
64 return (a_rcBusy); \
65 } while (0)
66
67/** Acquires the DMAR lock (not expected to fail). */
68#ifdef IN_RING3
69# define DMAR_LOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VERR_IGNORED)
70#else
71# define DMAR_LOCK(a_pDevIns, a_pThisCC) \
72 do { \
73 int const rcLock = (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VINF_SUCCESS); \
74 AssertRC(rcLock); \
75 } while (0)
76#endif
77
78/** Release the DMAR lock. */
79#define DMAR_UNLOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock(a_pDevIns)
80
81/** Asserts that the calling thread owns the DMAR lock. */
82#define DMAR_ASSERT_LOCK_IS_OWNER(a_pDevIns, a_pThisCC) \
83 do { \
84 Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)); \
85 RT_NOREF1(a_pThisCC); \
86 } while (0)
87
88/** Asserts that the calling thread does not own the DMAR lock. */
89#define DMAR_ASSERT_LOCK_IS_NOT_OWNER(a_pDevIns, a_pThisCC) \
90 do { \
91 Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns) == false); \
92 RT_NOREF1(a_pThisCC); \
93 } while (0)
94
95/** The number of fault recording registers our implementation supports.
96 * Normal guest operation shouldn't trigger faults anyway, so we only support the
97 * minimum number of registers (which is 1).
98 *
99 * See Intel VT-d spec. 10.4.2 "Capability Register" (CAP_REG.NFR). */
100#define DMAR_FRCD_REG_COUNT UINT32_C(1)
101
102/** Number of register groups (used in saved states). */
103#define DMAR_MMIO_GROUP_COUNT 2
104/** Offset of first register in group 0. */
105#define DMAR_MMIO_GROUP_0_OFF_FIRST VTD_MMIO_OFF_VER_REG
106/** Offset of last register in group 0 (inclusive). */
107#define DMAR_MMIO_GROUP_0_OFF_LAST VTD_MMIO_OFF_MTRR_PHYSMASK9_REG
108/** Last valid offset in group 0 (exclusive). */
109#define DMAR_MMIO_GROUP_0_OFF_END (DMAR_MMIO_GROUP_0_OFF_LAST + 8 /* sizeof MTRR_PHYSMASK9_REG */)
110/** Size of the group 0 (in bytes). */
111#define DMAR_MMIO_GROUP_0_SIZE (DMAR_MMIO_GROUP_0_OFF_END - DMAR_MMIO_GROUP_0_OFF_FIRST)
112/** Number of implementation-defined MMIO register offsets - IVA_REG and
113 * FRCD_LO_REG (used in saved state). IOTLB_REG and FRCD_HI_REG are derived from
114 * IVA_REG and FRCD_LO_REG respectively */
115#define DMAR_MMIO_OFF_IMPL_COUNT 2
116/** Implementation-specific MMIO offset of IVA_REG (used in saved state). */
117#define DMAR_MMIO_OFF_IVA_REG 0xe50
118/** Implementation-specific MMIO offset of IOTLB_REG. */
119#define DMAR_MMIO_OFF_IOTLB_REG 0xe58
120/** Implementation-specific MMIO offset of FRCD_LO_REG (used in saved state). */
121#define DMAR_MMIO_OFF_FRCD_LO_REG 0xe70
122/** Implementation-specific MMIO offset of FRCD_HI_REG. */
123#define DMAR_MMIO_OFF_FRCD_HI_REG 0xe78
124AssertCompile(!(DMAR_MMIO_OFF_FRCD_LO_REG & 0xf));
125AssertCompile(DMAR_MMIO_OFF_IOTLB_REG == DMAR_MMIO_OFF_IVA_REG + 8);
126AssertCompile(DMAR_MMIO_OFF_FRCD_HI_REG == DMAR_MMIO_OFF_FRCD_LO_REG + 8);
127
128/** Offset of first register in group 1. */
129#define DMAR_MMIO_GROUP_1_OFF_FIRST VTD_MMIO_OFF_VCCAP_REG
130/** Offset of last register in group 1 (inclusive). */
131#define DMAR_MMIO_GROUP_1_OFF_LAST (DMAR_MMIO_OFF_FRCD_LO_REG + 8) * DMAR_FRCD_REG_COUNT
132/** Last valid offset in group 1 (exclusive). */
133#define DMAR_MMIO_GROUP_1_OFF_END (DMAR_MMIO_GROUP_1_OFF_LAST + 8 /* sizeof FRCD_HI_REG */)
134/** Size of the group 1 (in bytes). */
135#define DMAR_MMIO_GROUP_1_SIZE (DMAR_MMIO_GROUP_1_OFF_END - DMAR_MMIO_GROUP_1_OFF_FIRST)
136
137/** DMAR implementation's major version number (exposed to software).
138 * We report 6 as the major version since we support queued-invalidations as
139 * software may make assumptions based on that.
140 *
141 * See Intel VT-d spec. 10.4.7 "Context Command Register" (CCMD_REG.CAIG). */
142#define DMAR_VER_MAJOR 6
143/** DMAR implementation's minor version number (exposed to software). */
144#define DMAR_VER_MINOR 0
145
146/** Number of domain supported (0=16, 1=64, 2=256, 3=1K, 4=4K, 5=16K, 6=64K,
147 * 7=Reserved). */
148#define DMAR_ND 6
149
150/** @name DMAR_PERM_XXX: DMA request permissions.
151 * The order of R, W, X bits is important as it corresponds to those bits in
152 * page-table entries.
153 *
154 * @{ */
155/** DMA request permission: Read. */
156#define DMAR_PERM_READ RT_BIT(0)
157/** DMA request permission: Write. */
158#define DMAR_PERM_WRITE RT_BIT(1)
159/** DMA request permission: Execute (ER). */
160#define DMAR_PERM_EXE RT_BIT(2)
161/** DMA request permission: Supervisor privilege (PR). */
162#define DMAR_PERM_PRIV RT_BIT(3)
163/** DMA request permissions: All. */
164#define DMAR_PERM_ALL (DMAR_PERM_READ | DMAR_PERM_WRITE | DMAR_PERM_EXE | DMAR_PERM_PRIV)
165/** @} */
166
167/** Release log prefix string. */
168#define DMAR_LOG_PFX "Intel-IOMMU"
169/** The current saved state version. */
170#define DMAR_SAVED_STATE_VERSION 1
171
172
173/*********************************************************************************************************************************
174* Structures and Typedefs *
175*********************************************************************************************************************************/
176/**
177 * DMAR error diagnostics.
178 * Sorted alphabetically so it's easier to add and locate items, no other reason.
179 *
180 * @note Members of this enum are used as array indices, so no gaps in enum
181 * values are not allowed. Update g_apszDmarDiagDesc when you modify
182 * fields in this enum.
183 */
184typedef enum
185{
186 /* No error, this must be zero! */
187 kDmarDiag_None = 0,
188
189 /* Address Translation Faults. */
190 kDmarDiag_At_Lm_CtxEntry_Not_Present,
191 kDmarDiag_At_Lm_CtxEntry_Read_Failed,
192 kDmarDiag_At_Lm_CtxEntry_Rsvd,
193 kDmarDiag_At_Lm_Pt_At_Block,
194 kDmarDiag_At_Lm_Pt_Aw_Invalid,
195 kDmarDiag_At_Lm_RootEntry_Not_Present,
196 kDmarDiag_At_Lm_RootEntry_Read_Failed,
197 kDmarDiag_At_Lm_RootEntry_Rsvd,
198 kDmarDiag_At_Lm_Tt_Invalid,
199 kDmarDiag_At_Lm_Ut_At_Block,
200 kDmarDiag_At_Lm_Ut_Aw_Invalid,
201 kDmarDiag_At_Rta_Adms_Not_Supported,
202 kDmarDiag_At_Rta_Rsvd,
203 kDmarDiag_At_Rta_Smts_Not_Supported,
204 kDmarDiag_At_Xm_AddrIn_Invalid,
205 kDmarDiag_At_Xm_AddrOut_Invalid,
206 kDmarDiag_At_Xm_Perm_Denied,
207 kDmarDiag_At_Xm_Pte_Rsvd,
208 kDmarDiag_At_Xm_Pte_Sllps_Invalid,
209 kDmarDiag_At_Xm_Read_Pte_Failed,
210 kDmarDiag_At_Xm_Slpptr_Read_Failed,
211
212 /* CCMD_REG faults. */
213 kDmarDiag_CcmdReg_Not_Supported,
214 kDmarDiag_CcmdReg_Qi_Enabled,
215 kDmarDiag_CcmdReg_Ttm_Invalid,
216
217 /* IQA_REG faults. */
218 kDmarDiag_IqaReg_Dsc_Fetch_Error,
219 kDmarDiag_IqaReg_Dw_128_Invalid,
220 kDmarDiag_IqaReg_Dw_256_Invalid,
221
222 /* Invalidation Queue Error Info. */
223 kDmarDiag_Iqei_Dsc_Type_Invalid,
224 kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd,
225 kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd,
226 kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid,
227 kDmarDiag_Iqei_Ttm_Rsvd,
228
229 /* IQT_REG faults. */
230 kDmarDiag_IqtReg_Qt_Invalid,
231 kDmarDiag_IqtReg_Qt_Not_Aligned,
232
233 /* Interrupt Remapping Faults. */
234 kDmarDiag_Ir_Cfi_Blocked,
235 kDmarDiag_Ir_Rfi_Intr_Index_Invalid,
236 kDmarDiag_Ir_Rfi_Irte_Mode_Invalid,
237 kDmarDiag_Ir_Rfi_Irte_Not_Present,
238 kDmarDiag_Ir_Rfi_Irte_Read_Failed,
239 kDmarDiag_Ir_Rfi_Irte_Rsvd,
240 kDmarDiag_Ir_Rfi_Irte_Svt_Bus,
241 kDmarDiag_Ir_Rfi_Irte_Svt_Masked,
242 kDmarDiag_Ir_Rfi_Irte_Svt_Rsvd,
243 kDmarDiag_Ir_Rfi_Rsvd,
244
245 /* Member for determining array index limit. */
246 kDmarDiag_End,
247
248 /* Usual 32-bit type size hack. */
249 kDmarDiag_32Bit_Hack = 0x7fffffff
250} DMARDIAG;
251AssertCompileSize(DMARDIAG, 4);
252
253#ifdef IN_RING3
254/** DMAR diagnostic enum description expansion.
255 * The below construct ensures typos in the input to this macro are caught
256 * during compile time. */
257# define DMARDIAG_DESC(a_Name) RT_CONCAT(kDmarDiag_, a_Name) < kDmarDiag_End ? RT_STR(a_Name) : "Ignored"
258
259/** DMAR diagnostics description for members in DMARDIAG. */
260static const char *const g_apszDmarDiagDesc[] =
261{
262 DMARDIAG_DESC(None ),
263
264 /* Address Translation Faults. */
265 DMARDIAG_DESC(At_Lm_CtxEntry_Not_Present ),
266 DMARDIAG_DESC(At_Lm_CtxEntry_Read_Failed ),
267 DMARDIAG_DESC(At_Lm_CtxEntry_Rsvd ),
268 DMARDIAG_DESC(At_Lm_Pt_At_Block ),
269 DMARDIAG_DESC(At_Lm_Pt_Aw_Invalid ),
270 DMARDIAG_DESC(At_Lm_RootEntry_Not_Present),
271 DMARDIAG_DESC(At_Lm_RootEntry_Read_Failed),
272 DMARDIAG_DESC(At_Lm_RootEntry_Rsvd ),
273 DMARDIAG_DESC(At_Lm_Tt_Invalid ),
274 DMARDIAG_DESC(At_Lm_Ut_At_Block ),
275 DMARDIAG_DESC(At_Lm_Ut_Aw_Invalid ),
276 DMARDIAG_DESC(At_Rta_Adms_Not_Supported ),
277 DMARDIAG_DESC(At_Rta_Rsvd ),
278 DMARDIAG_DESC(At_Rta_Smts_Not_Supported ),
279 DMARDIAG_DESC(At_Xm_AddrIn_Invalid ),
280 DMARDIAG_DESC(At_Xm_AddrOut_Invalid ),
281 DMARDIAG_DESC(At_Xm_Perm_Denied ),
282 DMARDIAG_DESC(At_Xm_Pte_Rsvd ),
283 DMARDIAG_DESC(At_Xm_Pte_Sllps_Invalid ),
284 DMARDIAG_DESC(At_Xm_Read_Pte_Failed ),
285 DMARDIAG_DESC(At_Xm_Slpptr_Read_Failed ),
286
287 /* CCMD_REG faults. */
288 DMARDIAG_DESC(CcmdReg_Not_Supported ),
289 DMARDIAG_DESC(CcmdReg_Qi_Enabled ),
290 DMARDIAG_DESC(CcmdReg_Ttm_Invalid ),
291
292 /* IQA_REG faults. */
293 DMARDIAG_DESC(IqaReg_Dsc_Fetch_Error ),
294 DMARDIAG_DESC(IqaReg_Dw_128_Invalid ),
295 DMARDIAG_DESC(IqaReg_Dw_256_Invalid ),
296
297 /* Invalidation Queue Error Info. */
298 DMARDIAG_DESC(Iqei_Dsc_Type_Invalid ),
299 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_0_1_Rsvd ),
300 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_2_3_Rsvd ),
301 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_Invalid ),
302 DMARDIAG_DESC(Iqei_Ttm_Rsvd ),
303
304 /* IQT_REG faults. */
305 DMARDIAG_DESC(IqtReg_Qt_Invalid ),
306 DMARDIAG_DESC(IqtReg_Qt_Not_Aligned ),
307
308 /* Interrupt remapping faults. */
309 DMARDIAG_DESC(Ir_Cfi_Blocked ),
310 DMARDIAG_DESC(Ir_Rfi_Intr_Index_Invalid ),
311 DMARDIAG_DESC(Ir_Rfi_Irte_Mode_Invalid ),
312 DMARDIAG_DESC(Ir_Rfi_Irte_Not_Present ),
313 DMARDIAG_DESC(Ir_Rfi_Irte_Read_Failed ),
314 DMARDIAG_DESC(Ir_Rfi_Irte_Rsvd ),
315 DMARDIAG_DESC(Ir_Rfi_Irte_Svt_Bus ),
316 DMARDIAG_DESC(Ir_Rfi_Irte_Svt_Masked ),
317 DMARDIAG_DESC(Ir_Rfi_Irte_Svt_Rsvd ),
318 DMARDIAG_DESC(Ir_Rfi_Rsvd ),
319 /* kDmarDiag_End */
320};
321AssertCompile(RT_ELEMENTS(g_apszDmarDiagDesc) == kDmarDiag_End);
322# undef DMARDIAG_DESC
323#endif /* IN_RING3 */
324
325/**
326 * The shared DMAR device state.
327 */
328typedef struct DMAR
329{
330 /** IOMMU device index. */
331 uint32_t idxIommu;
332 /** Padding. */
333 uint32_t u32Padding0;
334
335 /** Registers (group 0). */
336 uint8_t abRegs0[DMAR_MMIO_GROUP_0_SIZE];
337 /** Registers (group 1). */
338 uint8_t abRegs1[DMAR_MMIO_GROUP_1_SIZE];
339
340 /** @name Lazily activated registers.
341 * These are the active values for lazily activated registers. Software is free to
342 * modify the actual register values while remapping/translation is enabled but they
343 * take effect only when explicitly signaled by software, hence we need to hold the
344 * active values separately.
345 * @{ */
346 /** Currently active IRTA_REG. */
347 uint64_t uIrtaReg;
348 /** Currently active RTADDR_REG. */
349 uint64_t uRtaddrReg;
350 /** @} */
351
352 /** @name Register copies for a tiny bit faster and more convenient access.
353 * @{ */
354 /** Copy of VER_REG. */
355 uint8_t uVerReg;
356 /** Alignment. */
357 uint8_t abPadding0[7];
358 /** Copy of CAP_REG. */
359 uint64_t fCapReg;
360 /** Copy of ECAP_REG. */
361 uint64_t fExtCapReg;
362 /** @} */
363
364 /** Host-address width (HAW) base address mask. */
365 uint64_t fHawBaseMask;
366 /** Maximum guest-address width (MGAW) invalid address mask. */
367 uint64_t fMgawInvMask;
368 /** Context-entry qword-1 valid mask. */
369 uint64_t fCtxEntryQw1ValidMask;
370 /** Maximum supported paging level (3, 4 or 5). */
371 uint8_t cMaxPagingLevel;
372 /** DMA request valid permissions mask. */
373 uint8_t fPermValidMask;
374 /** Alignment. */
375 uint8_t abPadding1[6];
376
377 /** The event semaphore the invalidation-queue thread waits on. */
378 SUPSEMEVENT hEvtInvQueue;
379 /** Error diagnostic. */
380 DMARDIAG enmDiag;
381 /** Padding. */
382 uint32_t uPadding0;
383 /** The MMIO handle. */
384 IOMMMIOHANDLE hMmio;
385
386#ifdef VBOX_WITH_STATISTICS
387 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */
388 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */
389 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */
390 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */
391
392 STAMCOUNTER StatMsiRemapCfiR3; /**< Number of compatibility-format interrupts remap requests in R3. */
393 STAMCOUNTER StatMsiRemapCfiRZ; /**< Number of compatibility-format interrupts remap requests in RZ. */
394 STAMCOUNTER StatMsiRemapRfiR3; /**< Number of remappable-format interrupts remap requests in R3. */
395 STAMCOUNTER StatMsiRemapRfiRZ; /**< Number of remappable-format interrupts remap requests in RZ. */
396
397 STAMCOUNTER StatMemReadR3; /**< Number of memory read translation requests in R3. */
398 STAMCOUNTER StatMemReadRZ; /**< Number of memory read translation requests in RZ. */
399 STAMCOUNTER StatMemWriteR3; /**< Number of memory write translation requests in R3. */
400 STAMCOUNTER StatMemWriteRZ; /**< Number of memory write translation requests in RZ. */
401
402 STAMCOUNTER StatMemBulkReadR3; /**< Number of memory read bulk translation requests in R3. */
403 STAMCOUNTER StatMemBulkReadRZ; /**< Number of memory read bulk translation requests in RZ. */
404 STAMCOUNTER StatMemBulkWriteR3; /**< Number of memory write bulk translation requests in R3. */
405 STAMCOUNTER StatMemBulkWriteRZ; /**< Number of memory write bulk translation requests in RZ. */
406
407 STAMCOUNTER StatCcInvDsc; /**< Number of Context-cache descriptors processed. */
408 STAMCOUNTER StatIotlbInvDsc; /**< Number of IOTLB descriptors processed. */
409 STAMCOUNTER StatDevtlbInvDsc; /**< Number of Device-TLB descriptors processed. */
410 STAMCOUNTER StatIecInvDsc; /**< Number of Interrupt-Entry cache descriptors processed. */
411 STAMCOUNTER StatInvWaitDsc; /**< Number of Invalidation wait descriptors processed. */
412 STAMCOUNTER StatPasidIotlbInvDsc; /**< Number of PASID-based IOTLB descriptors processed. */
413 STAMCOUNTER StatPasidCacheInvDsc; /**< Number of PASID-cache descriptors processed. */
414 STAMCOUNTER StatPasidDevtlbInvDsc; /**< Number of PASID-based device-TLB descriptors processed. */
415#endif
416} DMAR;
417/** Pointer to the DMAR device state. */
418typedef DMAR *PDMAR;
419/** Pointer to the const DMAR device state. */
420typedef DMAR const *PCDMAR;
421AssertCompileMemberAlignment(DMAR, abRegs0, 8);
422AssertCompileMemberAlignment(DMAR, abRegs1, 8);
423
424/**
425 * The ring-3 DMAR device state.
426 */
427typedef struct DMARR3
428{
429 /** Device instance. */
430 PPDMDEVINSR3 pDevInsR3;
431 /** The IOMMU helper. */
432 R3PTRTYPE(PCPDMIOMMUHLPR3) pIommuHlpR3;
433 /** The invalidation-queue thread. */
434 R3PTRTYPE(PPDMTHREAD) pInvQueueThread;
435} DMARR3;
436/** Pointer to the ring-3 DMAR device state. */
437typedef DMARR3 *PDMARR3;
438/** Pointer to the const ring-3 DMAR device state. */
439typedef DMARR3 const *PCDMARR3;
440
441/**
442 * The ring-0 DMAR device state.
443 */
444typedef struct DMARR0
445{
446 /** Device instance. */
447 PPDMDEVINSR0 pDevInsR0;
448 /** The IOMMU helper. */
449 R0PTRTYPE(PCPDMIOMMUHLPR0) pIommuHlpR0;
450} DMARR0;
451/** Pointer to the ring-0 IOMMU device state. */
452typedef DMARR0 *PDMARR0;
453/** Pointer to the const ring-0 IOMMU device state. */
454typedef DMARR0 const *PCDMARR0;
455
456/**
457 * The raw-mode DMAR device state.
458 */
459typedef struct DMARRC
460{
461 /** Device instance. */
462 PPDMDEVINSRC pDevInsRC;
463 /** The IOMMU helper. */
464 RCPTRTYPE(PCPDMIOMMUHLPRC) pIommuHlpRC;
465} DMARRC;
466/** Pointer to the raw-mode DMAR device state. */
467typedef DMARRC *PDMARRC;
468/** Pointer to the const raw-mode DMAR device state. */
469typedef DMARRC const *PCIDMARRC;
470
471/** The DMAR device state for the current context. */
472typedef CTX_SUFF(DMAR) DMARCC;
473/** Pointer to the DMAR device state for the current context. */
474typedef CTX_SUFF(PDMAR) PDMARCC;
475/** Pointer to the const DMAR device state for the current context. */
476typedef CTX_SUFF(PDMAR) const PCDMARCC;
477
478/**
479 * DMAR originated events that generate interrupts.
480 */
481typedef enum DMAREVENTTYPE
482{
483 /** Invalidation completion event. */
484 DMAREVENTTYPE_INV_COMPLETE = 0,
485 /** Fault event. */
486 DMAREVENTTYPE_FAULT
487} DMAREVENTTYPE;
488
489/**
490 * I/O Page.
491 */
492typedef struct DMARIOPAGE
493{
494 /** The base DMA address of a page. */
495 RTGCPHYS GCPhysBase;
496 /** The page shift. */
497 uint8_t cShift;
498 /** The permissions of this page (DMAR_PERM_XXX). */
499 uint8_t fPerm;
500} DMARIOPAGE;
501/** Pointer to an I/O page. */
502typedef DMARIOPAGE *PDMARIOPAGE;
503/** Pointer to a const I/O address range. */
504typedef DMARIOPAGE const *PCDMARIOPAGE;
505
506/**
507 * I/O Address Range.
508 */
509typedef struct DMARIOADDRRANGE
510{
511 /** The starting DMA address of this range. */
512 uint64_t uAddr;
513 /** The size of the range (in bytes). */
514 size_t cb;
515 /** The permissions of this range (DMAR_PERM_XXX). */
516 uint8_t fPerm;
517} DMARIOADDRRANGE;
518/** Pointer to an I/O address range. */
519typedef DMARIOADDRRANGE *PDMARIOADDRRANGE;
520/** Pointer to a const I/O address range. */
521typedef DMARIOADDRRANGE const *PCDMARIOADDRRANGE;
522
523/**
524 * DMA Memory Request (Input).
525 */
526typedef struct DMARMEMREQIN
527{
528 /** The address range being accessed. */
529 DMARIOADDRRANGE AddrRange;
530 /** The source device ID (bus, device, function). */
531 uint16_t idDevice;
532 /** The PASID if present (can be NIL_PCIPASID). */
533 PCIPASID Pasid;
534 /* The address translation type. */
535 PCIADDRTYPE enmAddrType;
536 /** The request type. */
537 VTDREQTYPE enmReqType;
538} DMARMEMREQIN;
539/** Pointer to a DMA memory request input. */
540typedef DMARMEMREQIN *PDMARMEMREQIN;
541/** Pointer to a const DMA memory input. */
542typedef DMARMEMREQIN const *PCDMARMEMREQIN;
543
544/**
545 * DMA Memory Request (Output).
546 */
547typedef struct DMARMEMREQOUT
548{
549 /** The address range of the translated region. */
550 DMARIOADDRRANGE AddrRange;
551 /** The domain ID of the translated region. */
552 uint16_t idDomain;
553} DMARMEMREQOUT;
554/** Pointer to a DMA memory request output. */
555typedef DMARMEMREQOUT *PDMARMEMREQOUT;
556/** Pointer to a const DMA memory request output. */
557typedef DMARMEMREQOUT const *PCDMARMEMREQOUT;
558
559/**
560 * DMA Memory Request (Auxiliary Info).
561 * These get updated and used as part of the translation process.
562 */
563typedef struct DMARMEMREQAUX
564{
565 /** The table translation mode (VTD_TTM_XXX). */
566 uint8_t fTtm;
567 /** The fault processing disabled (FPD) bit. */
568 uint8_t fFpd;
569 /** The paging level of the translation. */
570 uint8_t cPagingLevel;
571 uint8_t abPadding[5];
572 /** The address of the first-level page-table. */
573 uint64_t GCPhysFlPt;
574 /** The address of second-level page-table. */
575 uint64_t GCPhysSlPt;
576} DMARMEMREQAUX;
577/** Pointer to a DMA memory request output. */
578typedef DMARMEMREQAUX *PDMARMEMREQAUX;
579/** Pointer to a const DMA memory request output. */
580typedef DMARMEMREQAUX const *PCDMARMEMREQAUX;
581
582/**
583 * DMA Memory Request Remapping Information.
584 */
585typedef struct DMARMEMREQREMAP
586{
587 /** The DMA memory request input. */
588 DMARMEMREQIN In;
589 /** DMA memory request auxiliary information. */
590 DMARMEMREQAUX Aux;
591 /** The DMA memory request output. */
592 DMARMEMREQOUT Out;
593} DMARMEMREQREMAP;
594/** Pointer to a DMA remap info. */
595typedef DMARMEMREQREMAP *PDMARMEMREQREMAP;
596/** Pointer to a const DMA remap info. */
597typedef DMARMEMREQREMAP const *PCDMARMEMREQREMAP;
598
599/**
600 * Callback function to lookup a DMA address.
601 *
602 * @returns VBox status code.
603 * @param pDevIns The IOMMU device instance.
604 * @param pMemReqIn The DMA memory request input.
605 * @param pMemReqAux The DMA memory request auxiliary info.
606 * @param pIoPageOut Where to store the output of the lookup.
607 */
608typedef DECLCALLBACKTYPE(int, FNDMADDRLOOKUP,(PPDMDEVINS pDevIns, PCDMARMEMREQIN pMemReqIn, PCDMARMEMREQAUX pMemReqAux,
609 PDMARIOPAGE pIoPageOut));
610/** Pointer to a DMA address-lookup function. */
611typedef FNDMADDRLOOKUP *PFNDMADDRLOOKUP;
612
613
614/*********************************************************************************************************************************
615* Global Variables *
616*********************************************************************************************************************************/
617/**
618 * Read-write masks for DMAR registers (group 0).
619 */
620static uint32_t const g_au32RwMasks0[] =
621{
622 /* Offset Register Low High */
623 /* 0x000 VER_REG */ VTD_VER_REG_RW_MASK,
624 /* 0x004 Reserved */ 0,
625 /* 0x008 CAP_REG */ DMAR_LO_U32(VTD_CAP_REG_RW_MASK), DMAR_HI_U32(VTD_CAP_REG_RW_MASK),
626 /* 0x010 ECAP_REG */ DMAR_LO_U32(VTD_ECAP_REG_RW_MASK), DMAR_HI_U32(VTD_ECAP_REG_RW_MASK),
627 /* 0x018 GCMD_REG */ VTD_GCMD_REG_RW_MASK,
628 /* 0x01c GSTS_REG */ VTD_GSTS_REG_RW_MASK,
629 /* 0x020 RTADDR_REG */ DMAR_LO_U32(VTD_RTADDR_REG_RW_MASK), DMAR_HI_U32(VTD_RTADDR_REG_RW_MASK),
630 /* 0x028 CCMD_REG */ DMAR_LO_U32(VTD_CCMD_REG_RW_MASK), DMAR_HI_U32(VTD_CCMD_REG_RW_MASK),
631 /* 0x030 Reserved */ 0,
632 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW_MASK,
633 /* 0x038 FECTL_REG */ VTD_FECTL_REG_RW_MASK,
634 /* 0x03c FEDATA_REG */ VTD_FEDATA_REG_RW_MASK,
635 /* 0x040 FEADDR_REG */ VTD_FEADDR_REG_RW_MASK,
636 /* 0x044 FEUADDR_REG */ VTD_FEUADDR_REG_RW_MASK,
637 /* 0x048 Reserved */ 0, 0,
638 /* 0x050 Reserved */ 0, 0,
639 /* 0x058 AFLOG_REG */ DMAR_LO_U32(VTD_AFLOG_REG_RW_MASK), DMAR_HI_U32(VTD_AFLOG_REG_RW_MASK),
640 /* 0x060 Reserved */ 0,
641 /* 0x064 PMEN_REG */ 0, /* RO as we don't support PLMR and PHMR. */
642 /* 0x068 PLMBASE_REG */ 0, /* RO as we don't support PLMR. */
643 /* 0x06c PLMLIMIT_REG */ 0, /* RO as we don't support PLMR. */
644 /* 0x070 PHMBASE_REG */ 0, 0, /* RO as we don't support PHMR. */
645 /* 0x078 PHMLIMIT_REG */ 0, 0, /* RO as we don't support PHMR. */
646 /* 0x080 IQH_REG */ DMAR_LO_U32(VTD_IQH_REG_RW_MASK), DMAR_HI_U32(VTD_IQH_REG_RW_MASK),
647 /* 0x088 IQT_REG */ DMAR_LO_U32(VTD_IQT_REG_RW_MASK), DMAR_HI_U32(VTD_IQT_REG_RW_MASK),
648 /* 0x090 IQA_REG */ DMAR_LO_U32(VTD_IQA_REG_RW_MASK), DMAR_HI_U32(VTD_IQA_REG_RW_MASK),
649 /* 0x098 Reserved */ 0,
650 /* 0x09c ICS_REG */ VTD_ICS_REG_RW_MASK,
651 /* 0x0a0 IECTL_REG */ VTD_IECTL_REG_RW_MASK,
652 /* 0x0a4 IEDATA_REG */ VTD_IEDATA_REG_RW_MASK,
653 /* 0x0a8 IEADDR_REG */ VTD_IEADDR_REG_RW_MASK,
654 /* 0x0ac IEUADDR_REG */ VTD_IEUADDR_REG_RW_MASK,
655 /* 0x0b0 IQERCD_REG */ DMAR_LO_U32(VTD_IQERCD_REG_RW_MASK), DMAR_HI_U32(VTD_IQERCD_REG_RW_MASK),
656 /* 0x0b8 IRTA_REG */ DMAR_LO_U32(VTD_IRTA_REG_RW_MASK), DMAR_HI_U32(VTD_IRTA_REG_RW_MASK),
657 /* 0x0c0 PQH_REG */ DMAR_LO_U32(VTD_PQH_REG_RW_MASK), DMAR_HI_U32(VTD_PQH_REG_RW_MASK),
658 /* 0x0c8 PQT_REG */ DMAR_LO_U32(VTD_PQT_REG_RW_MASK), DMAR_HI_U32(VTD_PQT_REG_RW_MASK),
659 /* 0x0d0 PQA_REG */ DMAR_LO_U32(VTD_PQA_REG_RW_MASK), DMAR_HI_U32(VTD_PQA_REG_RW_MASK),
660 /* 0x0d8 Reserved */ 0,
661 /* 0x0dc PRS_REG */ VTD_PRS_REG_RW_MASK,
662 /* 0x0e0 PECTL_REG */ VTD_PECTL_REG_RW_MASK,
663 /* 0x0e4 PEDATA_REG */ VTD_PEDATA_REG_RW_MASK,
664 /* 0x0e8 PEADDR_REG */ VTD_PEADDR_REG_RW_MASK,
665 /* 0x0ec PEUADDR_REG */ VTD_PEUADDR_REG_RW_MASK,
666 /* 0x0f0 Reserved */ 0, 0,
667 /* 0x0f8 Reserved */ 0, 0,
668 /* 0x100 MTRRCAP_REG */ DMAR_LO_U32(VTD_MTRRCAP_REG_RW_MASK), DMAR_HI_U32(VTD_MTRRCAP_REG_RW_MASK),
669 /* 0x108 MTRRDEF_REG */ 0, 0, /* RO as we don't support MTS. */
670 /* 0x110 Reserved */ 0, 0,
671 /* 0x118 Reserved */ 0, 0,
672 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0, /* RO as we don't support MTS. */
673 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
674 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
675 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
676 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
677 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
678 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
679 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
680 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
681 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
682 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
683 /* 0x178 Reserved */ 0, 0,
684 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0, /* RO as we don't support MTS. */
685 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
686 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
687 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
688 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
689 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
690 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
691 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
692 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
693 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
694 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
695 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
696 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
697 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
698 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
699 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
700 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
701 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
702 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
703 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
704};
705AssertCompile(sizeof(g_au32RwMasks0) == DMAR_MMIO_GROUP_0_SIZE);
706
707/**
708 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 0).
709 */
710static uint32_t const g_au32Rw1cMasks0[] =
711{
712 /* Offset Register Low High */
713 /* 0x000 VER_REG */ 0,
714 /* 0x004 Reserved */ 0,
715 /* 0x008 CAP_REG */ 0, 0,
716 /* 0x010 ECAP_REG */ 0, 0,
717 /* 0x018 GCMD_REG */ 0,
718 /* 0x01c GSTS_REG */ 0,
719 /* 0x020 RTADDR_REG */ 0, 0,
720 /* 0x028 CCMD_REG */ 0, 0,
721 /* 0x030 Reserved */ 0,
722 /* 0x034 FSTS_REG */ VTD_FSTS_REG_RW1C_MASK,
723 /* 0x038 FECTL_REG */ 0,
724 /* 0x03c FEDATA_REG */ 0,
725 /* 0x040 FEADDR_REG */ 0,
726 /* 0x044 FEUADDR_REG */ 0,
727 /* 0x048 Reserved */ 0, 0,
728 /* 0x050 Reserved */ 0, 0,
729 /* 0x058 AFLOG_REG */ 0, 0,
730 /* 0x060 Reserved */ 0,
731 /* 0x064 PMEN_REG */ 0,
732 /* 0x068 PLMBASE_REG */ 0,
733 /* 0x06c PLMLIMIT_REG */ 0,
734 /* 0x070 PHMBASE_REG */ 0, 0,
735 /* 0x078 PHMLIMIT_REG */ 0, 0,
736 /* 0x080 IQH_REG */ 0, 0,
737 /* 0x088 IQT_REG */ 0, 0,
738 /* 0x090 IQA_REG */ 0, 0,
739 /* 0x098 Reserved */ 0,
740 /* 0x09c ICS_REG */ VTD_ICS_REG_RW1C_MASK,
741 /* 0x0a0 IECTL_REG */ 0,
742 /* 0x0a4 IEDATA_REG */ 0,
743 /* 0x0a8 IEADDR_REG */ 0,
744 /* 0x0ac IEUADDR_REG */ 0,
745 /* 0x0b0 IQERCD_REG */ 0, 0,
746 /* 0x0b8 IRTA_REG */ 0, 0,
747 /* 0x0c0 PQH_REG */ 0, 0,
748 /* 0x0c8 PQT_REG */ 0, 0,
749 /* 0x0d0 PQA_REG */ 0, 0,
750 /* 0x0d8 Reserved */ 0,
751 /* 0x0dc PRS_REG */ 0,
752 /* 0x0e0 PECTL_REG */ 0,
753 /* 0x0e4 PEDATA_REG */ 0,
754 /* 0x0e8 PEADDR_REG */ 0,
755 /* 0x0ec PEUADDR_REG */ 0,
756 /* 0x0f0 Reserved */ 0, 0,
757 /* 0x0f8 Reserved */ 0, 0,
758 /* 0x100 MTRRCAP_REG */ 0, 0,
759 /* 0x108 MTRRDEF_REG */ 0, 0,
760 /* 0x110 Reserved */ 0, 0,
761 /* 0x118 Reserved */ 0, 0,
762 /* 0x120 MTRR_FIX64_00000_REG */ 0, 0,
763 /* 0x128 MTRR_FIX16K_80000_REG */ 0, 0,
764 /* 0x130 MTRR_FIX16K_A0000_REG */ 0, 0,
765 /* 0x138 MTRR_FIX4K_C0000_REG */ 0, 0,
766 /* 0x140 MTRR_FIX4K_C8000_REG */ 0, 0,
767 /* 0x148 MTRR_FIX4K_D0000_REG */ 0, 0,
768 /* 0x150 MTRR_FIX4K_D8000_REG */ 0, 0,
769 /* 0x158 MTRR_FIX4K_E0000_REG */ 0, 0,
770 /* 0x160 MTRR_FIX4K_E8000_REG */ 0, 0,
771 /* 0x168 MTRR_FIX4K_F0000_REG */ 0, 0,
772 /* 0x170 MTRR_FIX4K_F8000_REG */ 0, 0,
773 /* 0x178 Reserved */ 0, 0,
774 /* 0x180 MTRR_PHYSBASE0_REG */ 0, 0,
775 /* 0x188 MTRR_PHYSMASK0_REG */ 0, 0,
776 /* 0x190 MTRR_PHYSBASE1_REG */ 0, 0,
777 /* 0x198 MTRR_PHYSMASK1_REG */ 0, 0,
778 /* 0x1a0 MTRR_PHYSBASE2_REG */ 0, 0,
779 /* 0x1a8 MTRR_PHYSMASK2_REG */ 0, 0,
780 /* 0x1b0 MTRR_PHYSBASE3_REG */ 0, 0,
781 /* 0x1b8 MTRR_PHYSMASK3_REG */ 0, 0,
782 /* 0x1c0 MTRR_PHYSBASE4_REG */ 0, 0,
783 /* 0x1c8 MTRR_PHYSMASK4_REG */ 0, 0,
784 /* 0x1d0 MTRR_PHYSBASE5_REG */ 0, 0,
785 /* 0x1d8 MTRR_PHYSMASK5_REG */ 0, 0,
786 /* 0x1e0 MTRR_PHYSBASE6_REG */ 0, 0,
787 /* 0x1e8 MTRR_PHYSMASK6_REG */ 0, 0,
788 /* 0x1f0 MTRR_PHYSBASE7_REG */ 0, 0,
789 /* 0x1f8 MTRR_PHYSMASK7_REG */ 0, 0,
790 /* 0x200 MTRR_PHYSBASE8_REG */ 0, 0,
791 /* 0x208 MTRR_PHYSMASK8_REG */ 0, 0,
792 /* 0x210 MTRR_PHYSBASE9_REG */ 0, 0,
793 /* 0x218 MTRR_PHYSMASK9_REG */ 0, 0,
794};
795AssertCompile(sizeof(g_au32Rw1cMasks0) == DMAR_MMIO_GROUP_0_SIZE);
796
797/**
798 * Read-write masks for DMAR registers (group 1).
799 */
800static uint32_t const g_au32RwMasks1[] =
801{
802 /* Offset Register Low High */
803 /* 0xe00 VCCAP_REG */ DMAR_LO_U32(VTD_VCCAP_REG_RW_MASK), DMAR_HI_U32(VTD_VCCAP_REG_RW_MASK),
804 /* 0xe08 VCMD_EO_REG */ DMAR_LO_U32(VTD_VCMD_EO_REG_RW_MASK), DMAR_HI_U32(VTD_VCMD_EO_REG_RW_MASK),
805 /* 0xe10 VCMD_REG */ 0, 0, /* RO: VCS not supported. */
806 /* 0xe18 VCMDRSVD_REG */ 0, 0,
807 /* 0xe20 VCRSP_REG */ 0, 0, /* RO: VCS not supported. */
808 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
809 /* 0xe30 Reserved */ 0, 0,
810 /* 0xe38 Reserved */ 0, 0,
811 /* 0xe40 Reserved */ 0, 0,
812 /* 0xe48 Reserved */ 0, 0,
813 /* 0xe50 IVA_REG */ DMAR_LO_U32(VTD_IVA_REG_RW_MASK), DMAR_HI_U32(VTD_IVA_REG_RW_MASK),
814 /* 0xe58 IOTLB_REG */ DMAR_LO_U32(VTD_IOTLB_REG_RW_MASK), DMAR_HI_U32(VTD_IOTLB_REG_RW_MASK),
815 /* 0xe60 Reserved */ 0, 0,
816 /* 0xe68 Reserved */ 0, 0,
817 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW_MASK),
818 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW_MASK),
819};
820AssertCompile(sizeof(g_au32RwMasks1) == DMAR_MMIO_GROUP_1_SIZE);
821AssertCompile((DMAR_MMIO_OFF_FRCD_LO_REG - DMAR_MMIO_GROUP_1_OFF_FIRST) + DMAR_FRCD_REG_COUNT * 2 * sizeof(uint64_t) );
822
823/**
824 * Read-only Status, Write-1-to-clear masks for DMAR registers (group 1).
825 */
826static uint32_t const g_au32Rw1cMasks1[] =
827{
828 /* Offset Register Low High */
829 /* 0xe00 VCCAP_REG */ 0, 0,
830 /* 0xe08 VCMD_EO_REG */ 0, 0,
831 /* 0xe10 VCMD_REG */ 0, 0,
832 /* 0xe18 VCMDRSVD_REG */ 0, 0,
833 /* 0xe20 VCRSP_REG */ 0, 0,
834 /* 0xe28 VCRSPRSVD_REG */ 0, 0,
835 /* 0xe30 Reserved */ 0, 0,
836 /* 0xe38 Reserved */ 0, 0,
837 /* 0xe40 Reserved */ 0, 0,
838 /* 0xe48 Reserved */ 0, 0,
839 /* 0xe50 IVA_REG */ 0, 0,
840 /* 0xe58 IOTLB_REG */ 0, 0,
841 /* 0xe60 Reserved */ 0, 0,
842 /* 0xe68 Reserved */ 0, 0,
843 /* 0xe70 FRCD_REG_LO */ DMAR_LO_U32(VTD_FRCD_REG_LO_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_LO_RW1C_MASK),
844 /* 0xe78 FRCD_REG_HI */ DMAR_LO_U32(VTD_FRCD_REG_HI_RW1C_MASK), DMAR_HI_U32(VTD_FRCD_REG_HI_RW1C_MASK),
845};
846AssertCompile(sizeof(g_au32Rw1cMasks1) == DMAR_MMIO_GROUP_1_SIZE);
847
848/** Array of RW masks for each register group. */
849static uint8_t const *g_apbRwMasks[] = { (uint8_t *)&g_au32RwMasks0[0], (uint8_t *)&g_au32RwMasks1[0] };
850
851/** Array of RW1C masks for each register group. */
852static uint8_t const *g_apbRw1cMasks[] = { (uint8_t *)&g_au32Rw1cMasks0[0], (uint8_t *)&g_au32Rw1cMasks1[0] };
853
854/* Masks arrays must be identical in size (even bounds checking code assumes this). */
855AssertCompile(sizeof(g_apbRw1cMasks) == sizeof(g_apbRwMasks));
856
857#ifdef IN_RING3
858/** Array of valid domain-ID bits. */
859static uint16_t const g_auNdMask[] = { 0xf, 0x3f, 0xff, 0x3ff, 0xfff, 0x3fff, 0xffff, 0 };
860AssertCompile(RT_ELEMENTS(g_auNdMask) >= DMAR_ND);
861#endif
862
863
864#ifndef VBOX_DEVICE_STRUCT_TESTCASE
865#ifdef IN_RING3
866/**
867 * Returns the supported adjusted guest-address width (SAGAW) given the maximum
868 * guest address width (MGAW).
869 *
870 * @returns The CAP_REG.SAGAW value.
871 * @param uMgaw The CAP_REG.MGAW value.
872 */
873static uint8_t vtdCapRegGetSagaw(uint8_t uMgaw)
874{
875 /*
876 * It doesn't make sense to me that a CPU (or IOMMU hardware) will ever support
877 * 5-level paging but not 4 or 3-level paging. So smaller page-table levels
878 * are always OR'ed in below.
879 *
880 * The bit values below (57, 48, 39 bits) represents the levels of page-table walks
881 * for 4KB base page size (5-level, 4-level and 3-level paging respectively).
882 *
883 * See Intel VT-d spec. 10.4.2 "Capability Register".
884 */
885 ++uMgaw;
886 uint8_t const fSagaw = uMgaw >= 57 ? RT_BIT(3) | RT_BIT(2) | RT_BIT(1)
887 : uMgaw >= 48 ? RT_BIT(2) | RT_BIT(1)
888 : uMgaw >= 39 ? RT_BIT(1)
889 : 0;
890 return fSagaw;
891}
892
893
894/**
895 * Returns the maximum supported paging level given the supported adjusted
896 * guest-address width (SAGAW) field.
897 *
898 * @returns The highest paging level supported, 0 if invalid.
899 * @param fSagaw The CAP_REG.SAGAW value.
900 */
901static uint8_t vtdCapRegGetMaxPagingLevel(uint8_t fSagaw)
902{
903 uint8_t const cMaxPagingLevel = fSagaw & RT_BIT(3) ? 5
904 : fSagaw & RT_BIT(2) ? 4
905 : fSagaw & RT_BIT(1) ? 3
906 : 0;
907 return cMaxPagingLevel;
908}
909
910
911/**
912 * Returns table translation mode's descriptive name.
913 *
914 * @returns The descriptive name.
915 * @param uTtm The RTADDR_REG.TTM value.
916 */
917static const char* vtdRtaddrRegGetTtmDesc(uint8_t uTtm)
918{
919 Assert(!(uTtm & 3));
920 static const char* s_apszTtmNames[] =
921 {
922 "Legacy Mode",
923 "Scalable Mode",
924 "Reserved",
925 "Abort-DMA Mode"
926 };
927 return s_apszTtmNames[uTtm & (RT_ELEMENTS(s_apszTtmNames) - 1)];
928}
929#endif /* IN_RING3 */
930
931
932/**
933 * Returns whether the interrupt remapping (IR) fault is qualified or not.
934 *
935 * @returns @c true if qualified, @c false otherwise.
936 * @param enmIrFault The interrupt remapping fault condition.
937 */
938static bool vtdIrFaultIsQualified(VTDIRFAULT enmIrFault)
939{
940 switch (enmIrFault)
941 {
942 case VTDIRFAULT_IRTE_NOT_PRESENT:
943 case VTDIRFAULT_IRTE_PRESENT_RSVD:
944 case VTDIRFAULT_IRTE_PRESENT_INVALID:
945 case VTDIRFAULT_PID_READ_FAILED:
946 case VTDIRFAULT_PID_RSVD:
947 return true;
948 default:
949 return false;
950 }
951}
952
953
954/**
955 * Gets the index of the group the register belongs to given its MMIO offset.
956 *
957 * @returns The group index.
958 * @param offReg The MMIO offset of the register.
959 * @param cbReg The size of the access being made (for bounds checking on
960 * debug builds).
961 */
962DECLINLINE(uint8_t) dmarRegGetGroupIndex(uint16_t offReg, uint8_t cbReg)
963{
964 uint16_t const offLast = offReg + cbReg - 1;
965 AssertCompile(DMAR_MMIO_GROUP_0_OFF_FIRST == 0);
966 AssertMsg(DMAR_IS_MMIO_OFF_VALID(offLast), ("off=%#x cb=%u\n", offReg, cbReg));
967 return !(offLast < DMAR_MMIO_GROUP_0_OFF_END);
968}
969
970
971/**
972 * Gets the group the register belongs to given its MMIO offset.
973 *
974 * @returns Pointer to the first element of the register group.
975 * @param pThis The shared DMAR device state.
976 * @param offReg The MMIO offset of the register.
977 * @param cbReg The size of the access being made (for bounds checking on
978 * debug builds).
979 * @param pIdxGroup Where to store the index of the register group the register
980 * belongs to.
981 */
982DECLINLINE(uint8_t *) dmarRegGetGroup(PDMAR pThis, uint16_t offReg, uint8_t cbReg, uint8_t *pIdxGroup)
983{
984 *pIdxGroup = dmarRegGetGroupIndex(offReg, cbReg);
985 uint8_t *apbRegs[] = { &pThis->abRegs0[0], &pThis->abRegs1[0] };
986 return apbRegs[*pIdxGroup];
987}
988
989
990/**
991 * Const/read-only version of dmarRegGetGroup.
992 *
993 * @copydoc dmarRegGetGroup
994 */
995DECLINLINE(uint8_t const*) dmarRegGetGroupRo(PCDMAR pThis, uint16_t offReg, uint8_t cbReg, uint8_t *pIdxGroup)
996{
997 *pIdxGroup = dmarRegGetGroupIndex(offReg, cbReg);
998 uint8_t const *apbRegs[] = { &pThis->abRegs0[0], &pThis->abRegs1[0] };
999 return apbRegs[*pIdxGroup];
1000}
1001
1002
1003/**
1004 * Writes a 32-bit register with the exactly the supplied value.
1005 *
1006 * @param pThis The shared DMAR device state.
1007 * @param offReg The MMIO offset of the register.
1008 * @param uReg The 32-bit value to write.
1009 */
1010static void dmarRegWriteRaw32(PDMAR pThis, uint16_t offReg, uint32_t uReg)
1011{
1012 uint8_t idxGroup;
1013 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint32_t), &idxGroup);
1014 NOREF(idxGroup);
1015 *(uint32_t *)(pabRegs + offReg) = uReg;
1016}
1017
1018
1019/**
1020 * Writes a 64-bit register with the exactly the supplied value.
1021 *
1022 * @param pThis The shared DMAR device state.
1023 * @param offReg The MMIO offset of the register.
1024 * @param uReg The 64-bit value to write.
1025 */
1026static void dmarRegWriteRaw64(PDMAR pThis, uint16_t offReg, uint64_t uReg)
1027{
1028 uint8_t idxGroup;
1029 uint8_t *pabRegs = dmarRegGetGroup(pThis, offReg, sizeof(uint64_t), &idxGroup);
1030 NOREF(idxGroup);
1031 *(uint64_t *)(pabRegs + offReg) = uReg;
1032}
1033
1034
1035/**
1036 * Reads a 32-bit register with exactly the value it contains.
1037 *
1038 * @returns The raw register value.
1039 * @param pThis The shared DMAR device state.
1040 * @param offReg The MMIO offset of the register.
1041 */
1042static uint32_t dmarRegReadRaw32(PCDMAR pThis, uint16_t offReg)
1043{
1044 uint8_t idxGroup;
1045 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint32_t), &idxGroup);
1046 NOREF(idxGroup);
1047 return *(uint32_t *)(pabRegs + offReg);
1048}
1049
1050
1051/**
1052 * Reads a 64-bit register with exactly the value it contains.
1053 *
1054 * @returns The raw register value.
1055 * @param pThis The shared DMAR device state.
1056 * @param offReg The MMIO offset of the register.
1057 */
1058static uint64_t dmarRegReadRaw64(PCDMAR pThis, uint16_t offReg)
1059{
1060 uint8_t idxGroup;
1061 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint64_t), &idxGroup);
1062 NOREF(idxGroup);
1063 return *(uint64_t *)(pabRegs + offReg);
1064}
1065
1066
1067/**
1068 * Reads a 32-bit register with exactly the value it contains along with their
1069 * corresponding masks
1070 *
1071 * @param pThis The shared DMAR device state.
1072 * @param offReg The MMIO offset of the register.
1073 * @param puReg Where to store the raw 32-bit register value.
1074 * @param pfRwMask Where to store the RW mask corresponding to this register.
1075 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
1076 */
1077static void dmarRegReadRaw32Ex(PCDMAR pThis, uint16_t offReg, uint32_t *puReg, uint32_t *pfRwMask, uint32_t *pfRw1cMask)
1078{
1079 uint8_t idxGroup;
1080 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint32_t), &idxGroup);
1081 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
1082 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
1083 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
1084 *puReg = *(uint32_t *)(pabRegs + offReg);
1085 *pfRwMask = *(uint32_t *)(pabRwMasks + offReg);
1086 *pfRw1cMask = *(uint32_t *)(pabRw1cMasks + offReg);
1087}
1088
1089
1090/**
1091 * Reads a 64-bit register with exactly the value it contains along with their
1092 * corresponding masks.
1093 *
1094 * @param pThis The shared DMAR device state.
1095 * @param offReg The MMIO offset of the register.
1096 * @param puReg Where to store the raw 64-bit register value.
1097 * @param pfRwMask Where to store the RW mask corresponding to this register.
1098 * @param pfRw1cMask Where to store the RW1C mask corresponding to this register.
1099 */
1100static void dmarRegReadRaw64Ex(PCDMAR pThis, uint16_t offReg, uint64_t *puReg, uint64_t *pfRwMask, uint64_t *pfRw1cMask)
1101{
1102 uint8_t idxGroup;
1103 uint8_t const *pabRegs = dmarRegGetGroupRo(pThis, offReg, sizeof(uint64_t), &idxGroup);
1104 Assert(idxGroup < RT_ELEMENTS(g_apbRwMasks));
1105 uint8_t const *pabRwMasks = g_apbRwMasks[idxGroup];
1106 uint8_t const *pabRw1cMasks = g_apbRw1cMasks[idxGroup];
1107 *puReg = *(uint64_t *)(pabRegs + offReg);
1108 *pfRwMask = *(uint64_t *)(pabRwMasks + offReg);
1109 *pfRw1cMask = *(uint64_t *)(pabRw1cMasks + offReg);
1110}
1111
1112
1113/**
1114 * Writes a 32-bit register as it would be when written by software.
1115 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
1116 *
1117 * @returns The value that's actually written to the register.
1118 * @param pThis The shared DMAR device state.
1119 * @param offReg The MMIO offset of the register.
1120 * @param uReg The 32-bit value to write.
1121 * @param puPrev Where to store the register value prior to writing.
1122 */
1123static uint32_t dmarRegWrite32(PDMAR pThis, uint16_t offReg, uint32_t uReg, uint32_t *puPrev)
1124{
1125 /* Read current value from the 32-bit register. */
1126 uint32_t uCurReg;
1127 uint32_t fRwMask;
1128 uint32_t fRw1cMask;
1129 dmarRegReadRaw32Ex(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
1130 *puPrev = uCurReg;
1131
1132 uint32_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
1133 uint32_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
1134 uint32_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
1135 uint32_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
1136
1137 /* Write new value to the 32-bit register. */
1138 dmarRegWriteRaw32(pThis, offReg, uNewReg);
1139 return uNewReg;
1140}
1141
1142
1143/**
1144 * Writes a 64-bit register as it would be when written by software.
1145 * This will preserve read-only bits, mask off reserved bits and clear RW1C bits.
1146 *
1147 * @returns The value that's actually written to the register.
1148 * @param pThis The shared DMAR device state.
1149 * @param offReg The MMIO offset of the register.
1150 * @param uReg The 64-bit value to write.
1151 * @param puPrev Where to store the register value prior to writing.
1152 */
1153static uint64_t dmarRegWrite64(PDMAR pThis, uint16_t offReg, uint64_t uReg, uint64_t *puPrev)
1154{
1155 /* Read current value from the 64-bit register. */
1156 uint64_t uCurReg;
1157 uint64_t fRwMask;
1158 uint64_t fRw1cMask;
1159 dmarRegReadRaw64Ex(pThis, offReg, &uCurReg, &fRwMask, &fRw1cMask);
1160 *puPrev = uCurReg;
1161
1162 uint64_t const fRoBits = uCurReg & ~fRwMask; /* Preserve current read-only and reserved bits. */
1163 uint64_t const fRwBits = uReg & fRwMask; /* Merge newly written read/write bits. */
1164 uint64_t const fRw1cBits = uReg & fRw1cMask; /* Clear 1s written to RW1C bits. */
1165 uint64_t const uNewReg = (fRoBits | fRwBits) & ~fRw1cBits;
1166
1167 /* Write new value to the 64-bit register. */
1168 dmarRegWriteRaw64(pThis, offReg, uNewReg);
1169 return uNewReg;
1170}
1171
1172
1173/**
1174 * Reads a 32-bit register as it would be when read by software.
1175 *
1176 * @returns The register value.
1177 * @param pThis The shared DMAR device state.
1178 * @param offReg The MMIO offset of the register.
1179 */
1180static uint32_t dmarRegRead32(PCDMAR pThis, uint16_t offReg)
1181{
1182 return dmarRegReadRaw32(pThis, offReg);
1183}
1184
1185
1186/**
1187 * Reads a 64-bit register as it would be when read by software.
1188 *
1189 * @returns The register value.
1190 * @param pThis The shared DMAR device state.
1191 * @param offReg The MMIO offset of the register.
1192 */
1193static uint64_t dmarRegRead64(PCDMAR pThis, uint16_t offReg)
1194{
1195 return dmarRegReadRaw64(pThis, offReg);
1196}
1197
1198
1199/**
1200 * Modifies a 32-bit register.
1201 *
1202 * @param pThis The shared DMAR device state.
1203 * @param offReg The MMIO offset of the register.
1204 * @param fAndMask The AND mask (applied first).
1205 * @param fOrMask The OR mask.
1206 * @remarks This does NOT apply RO or RW1C masks while modifying the
1207 * register.
1208 */
1209static void dmarRegChangeRaw32(PDMAR pThis, uint16_t offReg, uint32_t fAndMask, uint32_t fOrMask)
1210{
1211 uint32_t uReg = dmarRegReadRaw32(pThis, offReg);
1212 uReg = (uReg & fAndMask) | fOrMask;
1213 dmarRegWriteRaw32(pThis, offReg, uReg);
1214}
1215
1216
1217/**
1218 * Modifies a 64-bit register.
1219 *
1220 * @param pThis The shared DMAR device state.
1221 * @param offReg The MMIO offset of the register.
1222 * @param fAndMask The AND mask (applied first).
1223 * @param fOrMask The OR mask.
1224 * @remarks This does NOT apply RO or RW1C masks while modifying the
1225 * register.
1226 */
1227static void dmarRegChangeRaw64(PDMAR pThis, uint16_t offReg, uint64_t fAndMask, uint64_t fOrMask)
1228{
1229 uint64_t uReg = dmarRegReadRaw64(pThis, offReg);
1230 uReg = (uReg & fAndMask) | fOrMask;
1231 dmarRegWriteRaw64(pThis, offReg, uReg);
1232}
1233
1234
1235/**
1236 * Checks if the invalidation-queue is empty.
1237 *
1238 * Extended version which optionally returns the current queue head and tail
1239 * offsets.
1240 *
1241 * @returns @c true if empty, @c false otherwise.
1242 * @param pThis The shared DMAR device state.
1243 * @param poffQh Where to store the queue head offset. Optional, can be NULL.
1244 * @param poffQt Where to store the queue tail offset. Optional, can be NULL.
1245 */
1246static bool dmarInvQueueIsEmptyEx(PCDMAR pThis, uint32_t *poffQh, uint32_t *poffQt)
1247{
1248 /* Read only the low-32 bits of the queue head and queue tail as high bits are all RsvdZ.*/
1249 uint32_t const uIqtReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IQT_REG);
1250 uint32_t const uIqhReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IQH_REG);
1251
1252 /* Don't bother masking QT, QH since other bits are RsvdZ. */
1253 Assert(!(uIqtReg & ~VTD_BF_IQT_REG_QT_MASK));
1254 Assert(!(uIqhReg & ~VTD_BF_IQH_REG_QH_MASK));
1255 if (poffQh)
1256 *poffQh = uIqhReg;
1257 if (poffQt)
1258 *poffQt = uIqtReg;
1259 return uIqtReg == uIqhReg;
1260}
1261
1262
1263/**
1264 * Checks if the invalidation-queue is empty.
1265 *
1266 * @returns @c true if empty, @c false otherwise.
1267 * @param pThis The shared DMAR device state.
1268 */
1269static bool dmarInvQueueIsEmpty(PCDMAR pThis)
1270{
1271 return dmarInvQueueIsEmptyEx(pThis, NULL /* poffQh */, NULL /* poffQt */);
1272}
1273
1274
1275/**
1276 * Checks if the invalidation-queue is capable of processing requests.
1277 *
1278 * @returns @c true if the invalidation-queue can process requests, @c false
1279 * otherwise.
1280 * @param pThis The shared DMAR device state.
1281 */
1282static bool dmarInvQueueCanProcessRequests(PCDMAR pThis)
1283{
1284 /* Check if queued-invalidation is enabled. */
1285 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
1286 if (uGstsReg & VTD_BF_GSTS_REG_QIES_MASK)
1287 {
1288 /* Check if there are no invalidation-queue or timeout errors. */
1289 uint32_t const uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
1290 if (!(uFstsReg & (VTD_BF_FSTS_REG_IQE_MASK | VTD_BF_FSTS_REG_ITE_MASK)))
1291 return true;
1292 }
1293 return false;
1294}
1295
1296
1297/**
1298 * Wakes up the invalidation-queue thread if there are requests to be processed.
1299 *
1300 * @param pDevIns The IOMMU device instance.
1301 */
1302static void dmarInvQueueThreadWakeUpIfNeeded(PPDMDEVINS pDevIns)
1303{
1304 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1305 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1306 LogFlowFunc(("\n"));
1307
1308 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1309
1310 if ( dmarInvQueueCanProcessRequests(pThis)
1311 && !dmarInvQueueIsEmpty(pThis))
1312 {
1313 Log4Func(("Signaling the invalidation-queue thread\n"));
1314 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtInvQueue);
1315 }
1316}
1317
1318
1319/**
1320 * Raises an event on behalf of the DMAR.
1321 *
1322 * These are events that are generated by the DMAR itself (like faults and
1323 * invalidation completion notifications).
1324 *
1325 * @param pDevIns The IOMMU device instance.
1326 * @param enmEventType The DMAR event type.
1327 *
1328 * @remarks The DMAR lock must be held while calling this function.
1329 */
1330static void dmarEventRaiseInterrupt(PPDMDEVINS pDevIns, DMAREVENTTYPE enmEventType)
1331{
1332 uint16_t offCtlReg;
1333 uint32_t fIntrMaskedMask;
1334 uint32_t fIntrPendingMask;
1335 uint16_t offMsiAddrLoReg;
1336 uint16_t offMsiAddrHiReg;
1337 uint16_t offMsiDataReg;
1338 switch (enmEventType)
1339 {
1340 case DMAREVENTTYPE_INV_COMPLETE:
1341 {
1342 offCtlReg = VTD_MMIO_OFF_IECTL_REG;
1343 fIntrMaskedMask = VTD_BF_IECTL_REG_IM_MASK;
1344 fIntrPendingMask = VTD_BF_IECTL_REG_IP_MASK;
1345 offMsiAddrLoReg = VTD_MMIO_OFF_IEADDR_REG;
1346 offMsiAddrHiReg = VTD_MMIO_OFF_IEUADDR_REG;
1347 offMsiDataReg = VTD_MMIO_OFF_IEDATA_REG;
1348 break;
1349 }
1350
1351 case DMAREVENTTYPE_FAULT:
1352 {
1353 offCtlReg = VTD_MMIO_OFF_FECTL_REG;
1354 fIntrMaskedMask = VTD_BF_FECTL_REG_IM_MASK;
1355 fIntrPendingMask = VTD_BF_FECTL_REG_IP_MASK;
1356 offMsiAddrLoReg = VTD_MMIO_OFF_FEADDR_REG;
1357 offMsiAddrHiReg = VTD_MMIO_OFF_FEUADDR_REG;
1358 offMsiDataReg = VTD_MMIO_OFF_FEDATA_REG;
1359 break;
1360 }
1361
1362 default:
1363 {
1364 /* Shouldn't ever happen. */
1365 AssertMsgFailedReturnVoid(("DMAR event type %#x unknown!\n", enmEventType));
1366 }
1367 }
1368
1369 /* Check if software has masked the interrupt. */
1370 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1371 uint32_t uCtlReg = dmarRegReadRaw32(pThis, offCtlReg);
1372 if (!(uCtlReg & fIntrMaskedMask))
1373 {
1374 /*
1375 * Interrupt is unmasked, raise it.
1376 * Interrupts generated by the DMAR have trigger mode and level as 0.
1377 * See Intel spec. 5.1.6 "Remapping Hardware Event Interrupt Programming".
1378 */
1379 MSIMSG Msi;
1380 Msi.Addr.au32[0] = dmarRegReadRaw32(pThis, offMsiAddrLoReg);
1381 Msi.Addr.au32[1] = (pThis->fExtCapReg & VTD_BF_ECAP_REG_EIM_MASK) ? dmarRegReadRaw32(pThis, offMsiAddrHiReg) : 0;
1382 Msi.Data.u32 = dmarRegReadRaw32(pThis, offMsiDataReg);
1383 Assert(Msi.Data.n.u1Level == 0);
1384 Assert(Msi.Data.n.u1TriggerMode == 0);
1385
1386 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1387 pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi(pDevIns, &Msi, 0 /* uTagSrc */);
1388
1389 /* Clear interrupt pending bit. */
1390 uCtlReg &= ~fIntrPendingMask;
1391 dmarRegWriteRaw32(pThis, offCtlReg, uCtlReg);
1392 }
1393 else
1394 {
1395 /* Interrupt is masked, set the interrupt pending bit. */
1396 uCtlReg |= fIntrPendingMask;
1397 dmarRegWriteRaw32(pThis, offCtlReg, uCtlReg);
1398 }
1399}
1400
1401
1402/**
1403 * Raises an interrupt in response to a fault event.
1404 *
1405 * @param pDevIns The IOMMU device instance.
1406 *
1407 * @remarks This assumes the caller has already set the required status bits in the
1408 * FSTS_REG (namely one or more of PPF, PFO, IQE, ICE or ITE bits).
1409 */
1410static void dmarFaultEventRaiseInterrupt(PPDMDEVINS pDevIns)
1411{
1412 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1413 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1414
1415#ifdef RT_STRICT
1416 {
1417 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR);
1418 uint32_t const uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
1419 uint32_t const fFaultMask = VTD_BF_FSTS_REG_PPF_MASK | VTD_BF_FSTS_REG_PFO_MASK
1420 /* | VTD_BF_FSTS_REG_APF_MASK | VTD_BF_FSTS_REG_AFO_MASK */ /* AFL not supported */
1421 /* | VTD_BF_FSTS_REG_ICE_MASK | VTD_BF_FSTS_REG_ITE_MASK */ /* Device-TLBs not supported */
1422 | VTD_BF_FSTS_REG_IQE_MASK;
1423 Assert(uFstsReg & fFaultMask);
1424 }
1425#endif
1426 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_FAULT);
1427}
1428
1429
1430#ifdef IN_RING3
1431/**
1432 * Raises an interrupt in response to an invalidation (complete) event.
1433 *
1434 * @param pDevIns The IOMMU device instance.
1435 */
1436static void dmarR3InvEventRaiseInterrupt(PPDMDEVINS pDevIns)
1437{
1438 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1439 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1440 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1441
1442 uint32_t const uIcsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_ICS_REG);
1443 if (!(uIcsReg & VTD_BF_ICS_REG_IWC_MASK))
1444 {
1445 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_ICS_REG, UINT32_MAX, VTD_BF_ICS_REG_IWC_MASK);
1446 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_INV_COMPLETE);
1447 }
1448}
1449#endif /* IN_RING3 */
1450
1451
1452/**
1453 * Checks if a primary fault can be recorded.
1454 *
1455 * @returns @c true if the fault can be recorded, @c false otherwise.
1456 * @param pDevIns The IOMMU device instance.
1457 * @param pThis The shared DMAR device state.
1458 *
1459 * @remarks Warning: This function has side-effects wrt the DMAR register state. Do
1460 * NOT call it unless there is a fault condition!
1461 */
1462static bool dmarPrimaryFaultCanRecord(PPDMDEVINS pDevIns, PDMAR pThis)
1463{
1464 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1465 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisCC);
1466
1467 uint32_t uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
1468 if (uFstsReg & VTD_BF_FSTS_REG_PFO_MASK)
1469 return false;
1470
1471 /*
1472 * If we add more FRCD registers, we'll have to loop through them here.
1473 * Since we support only one FRCD_REG, we don't support "compression of multiple faults",
1474 * nor do we need to increment FRI.
1475 *
1476 * See Intel VT-d spec. 7.2.1 "Primary Fault Logging".
1477 */
1478 AssertCompile(DMAR_FRCD_REG_COUNT == 1);
1479 uint64_t const uFrcdRegHi = dmarRegReadRaw64(pThis, DMAR_MMIO_OFF_FRCD_HI_REG);
1480 if (uFrcdRegHi & VTD_BF_1_FRCD_REG_F_MASK)
1481 {
1482 uFstsReg |= VTD_BF_FSTS_REG_PFO_MASK;
1483 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, uFstsReg);
1484 return false;
1485 }
1486
1487 return true;
1488}
1489
1490
1491/**
1492 * Records a primary fault.
1493 *
1494 * @param pDevIns The IOMMU device instance.
1495 * @param uFrcdHi The FRCD_HI_REG value for this fault.
1496 * @param uFrcdLo The FRCD_LO_REG value for this fault.
1497 */
1498static void dmarPrimaryFaultRecord(PPDMDEVINS pDevIns, uint64_t uFrcdHi, uint64_t uFrcdLo)
1499{
1500 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1501 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1502
1503 DMAR_LOCK(pDevIns, pThisCC);
1504
1505 /* We don't support advance fault logging. */
1506 Assert(!(dmarRegRead32(pThis, VTD_MMIO_OFF_GSTS_REG) & VTD_BF_GSTS_REG_AFLS_MASK));
1507
1508 if (dmarPrimaryFaultCanRecord(pDevIns, pThis))
1509 {
1510 /* Update the fault recording registers with the fault information. */
1511 dmarRegWriteRaw64(pThis, DMAR_MMIO_OFF_FRCD_HI_REG, uFrcdHi);
1512 dmarRegWriteRaw64(pThis, DMAR_MMIO_OFF_FRCD_LO_REG, uFrcdLo);
1513
1514 /* Set the Pending Primary Fault (PPF) field in the status register. */
1515 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, UINT32_MAX, VTD_BF_FSTS_REG_PPF_MASK);
1516
1517 /* Raise interrupt if necessary. */
1518 dmarFaultEventRaiseInterrupt(pDevIns);
1519 }
1520
1521 DMAR_UNLOCK(pDevIns, pThisCC);
1522}
1523
1524
1525/**
1526 * Records an interrupt request fault.
1527 *
1528 * @param pDevIns The IOMMU device instance.
1529 * @param enmDiag The diagnostic reason.
1530 * @param idDevice The device ID (bus, device, function).
1531 * @param idxIntr The interrupt index.
1532 * @param pIrte The IRTE that caused this fault. Can be NULL if the fault is
1533 * not qualified.
1534 */
1535static void dmarIrFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, uint16_t idDevice, uint16_t idxIntr, PCVTD_IRTE_T pIrte)
1536{
1537 /*
1538 * Update the diagnostic reason (even if software wants to supress faults).
1539 */
1540 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1541 pThis->enmDiag = enmDiag;
1542
1543 /*
1544 * Figure out the fault reason to report to software from our diagnostic code.
1545 * The case labels below are sorted alphabetically for convenience.
1546 */
1547 VTDIRFAULT enmIrFault;
1548 switch (enmDiag)
1549 {
1550 case kDmarDiag_Ir_Cfi_Blocked: enmIrFault = VTDIRFAULT_CFI_BLOCKED; break;
1551 case kDmarDiag_Ir_Rfi_Intr_Index_Invalid: enmIrFault = VTDIRFAULT_INTR_INDEX_INVALID; break;
1552 case kDmarDiag_Ir_Rfi_Irte_Mode_Invalid: enmIrFault = VTDIRFAULT_IRTE_PRESENT_RSVD; break;
1553 case kDmarDiag_Ir_Rfi_Irte_Not_Present: enmIrFault = VTDIRFAULT_IRTE_NOT_PRESENT; break;
1554 case kDmarDiag_Ir_Rfi_Irte_Read_Failed: enmIrFault = VTDIRFAULT_IRTE_READ_FAILED; break;
1555 case kDmarDiag_Ir_Rfi_Irte_Rsvd:
1556 case kDmarDiag_Ir_Rfi_Irte_Svt_Bus:
1557 case kDmarDiag_Ir_Rfi_Irte_Svt_Masked:
1558 case kDmarDiag_Ir_Rfi_Irte_Svt_Rsvd: enmIrFault = VTDIRFAULT_IRTE_PRESENT_RSVD; break;
1559 case kDmarDiag_Ir_Rfi_Rsvd: enmIrFault = VTDIRFAULT_REMAPPABLE_INTR_RSVD; break;
1560
1561 /* Shouldn't ever happen. */
1562 default:
1563 {
1564 AssertLogRelMsgFailedReturnVoid(("%s: Invalid interrupt remapping fault diagnostic code %#x\n", DMAR_LOG_PFX,
1565 enmDiag));
1566 }
1567 }
1568
1569 /*
1570 * Qualified faults are those that can be suppressed by software using the FPD bit
1571 * in the interrupt-remapping table entry.
1572 */
1573 bool fFpd;
1574 bool const fQualifiedFault = vtdIrFaultIsQualified(enmIrFault);
1575 if (fQualifiedFault)
1576 {
1577 AssertReturnVoid(pIrte);
1578 fFpd = RT_BOOL(pIrte->au64[0] & VTD_BF_0_IRTE_FPD_MASK);
1579 }
1580 else
1581 fFpd = false;
1582
1583 if (!fFpd)
1584 {
1585 /* Construct and record the error. */
1586 uint64_t const uFrcdHi = RT_BF_MAKE(VTD_BF_1_FRCD_REG_SID, idDevice)
1587 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_FR, enmIrFault)
1588 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_F, 1);
1589 uint64_t const uFrcdLo = (uint64_t)idxIntr << 48;
1590 dmarPrimaryFaultRecord(pDevIns, uFrcdHi, uFrcdLo);
1591 }
1592}
1593
1594
1595/**
1596 * Records an address translation fault.
1597 *
1598 * @param pDevIns The IOMMU device instance.
1599 * @param enmDiag The diagnostic reason.
1600 * @param pMemReqIn The DMA memory request input.
1601 * @param pMemReqAux The DMA memory request auxiliary info.
1602 */
1603static void dmarAtFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, PCDMARMEMREQIN pMemReqIn, PCDMARMEMREQAUX pMemReqAux)
1604{
1605 /*
1606 * Update the diagnostic reason (even if software wants to supress faults).
1607 */
1608 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1609 pThis->enmDiag = enmDiag;
1610
1611 /*
1612 * Qualified faults are those that can be suppressed by software using the FPD bit
1613 * in the context entry, scalable-mode context entry etc.
1614 */
1615 if (!pMemReqAux->fFpd)
1616 {
1617 /*
1618 * Figure out the fault reason to report to software from our diagnostic code.
1619 * The case labels below are sorted alphabetically for convenience.
1620 */
1621 VTDATFAULT enmAtFault;
1622 bool const fLm = pMemReqAux->fTtm == VTD_TTM_LEGACY_MODE;
1623 switch (enmDiag)
1624 {
1625 /* LM (Legacy Mode) faults. */
1626 case kDmarDiag_At_Lm_CtxEntry_Not_Present: enmAtFault = VTDATFAULT_LCT_2; break;
1627 case kDmarDiag_At_Lm_CtxEntry_Read_Failed: enmAtFault = VTDATFAULT_LCT_1; break;
1628 case kDmarDiag_At_Lm_CtxEntry_Rsvd: enmAtFault = VTDATFAULT_LCT_3; break;
1629 case kDmarDiag_At_Lm_Pt_At_Block: enmAtFault = VTDATFAULT_LCT_5; break;
1630 case kDmarDiag_At_Lm_Pt_Aw_Invalid: enmAtFault = VTDATFAULT_LGN_1_3; break;
1631 case kDmarDiag_At_Lm_RootEntry_Not_Present: enmAtFault = VTDATFAULT_LRT_2; break;
1632 case kDmarDiag_At_Lm_RootEntry_Read_Failed: enmAtFault = VTDATFAULT_LRT_1; break;
1633 case kDmarDiag_At_Lm_RootEntry_Rsvd: enmAtFault = VTDATFAULT_LRT_3; break;
1634 case kDmarDiag_At_Lm_Tt_Invalid: enmAtFault = VTDATFAULT_LCT_4_2; break;
1635 case kDmarDiag_At_Lm_Ut_At_Block: enmAtFault = VTDATFAULT_LCT_5; break;
1636 case kDmarDiag_At_Lm_Ut_Aw_Invalid: enmAtFault = VTDATFAULT_LCT_4_1; break;
1637
1638 /* RTA (Root Table Address) faults. */
1639 case kDmarDiag_At_Rta_Adms_Not_Supported: enmAtFault = VTDATFAULT_RTA_1_1; break;
1640 case kDmarDiag_At_Rta_Rsvd: enmAtFault = VTDATFAULT_RTA_1_2; break;
1641 case kDmarDiag_At_Rta_Smts_Not_Supported: enmAtFault = VTDATFAULT_RTA_1_3; break;
1642
1643 /* XM (Legacy mode or Scalable Mode) faults. */
1644 case kDmarDiag_At_Xm_AddrIn_Invalid: enmAtFault = fLm ? VTDATFAULT_LGN_1_1 : VTDATFAULT_SGN_5; break;
1645 case kDmarDiag_At_Xm_AddrOut_Invalid: enmAtFault = fLm ? VTDATFAULT_LGN_4 : VTDATFAULT_SGN_8; break;
1646 case kDmarDiag_At_Xm_Perm_Denied: enmAtFault = fLm ? VTDATFAULT_LSL_2 : VTDATFAULT_SSL_2; break;
1647 case kDmarDiag_At_Xm_Pte_Rsvd:
1648 case kDmarDiag_At_Xm_Pte_Sllps_Invalid: enmAtFault = fLm ? VTDATFAULT_LSL_2 : VTDATFAULT_SSL_3; break;
1649 case kDmarDiag_At_Xm_Read_Pte_Failed: enmAtFault = fLm ? VTDATFAULT_LSL_1 : VTDATFAULT_SSL_1; break;
1650 case kDmarDiag_At_Xm_Slpptr_Read_Failed: enmAtFault = fLm ? VTDATFAULT_LCT_4_3 : VTDATFAULT_SSL_4; break;
1651
1652 /* Shouldn't ever happen. */
1653 default:
1654 {
1655 AssertLogRelMsgFailedReturnVoid(("%s: Invalid address translation fault diagnostic code %#x\n",
1656 DMAR_LOG_PFX, enmDiag));
1657 }
1658 }
1659
1660 /* Construct and record the error. */
1661 uint16_t const idDevice = pMemReqIn->idDevice;
1662 uint8_t const fType1 = pMemReqIn->enmReqType & RT_BIT(1);
1663 uint8_t const fType2 = pMemReqIn->enmReqType & RT_BIT(0);
1664 uint8_t const fExec = pMemReqIn->AddrRange.fPerm & DMAR_PERM_EXE;
1665 uint8_t const fPriv = pMemReqIn->AddrRange.fPerm & DMAR_PERM_PRIV;
1666 bool const fHasPasid = PCIPASID_IS_VALID(pMemReqIn->Pasid);
1667 uint32_t const uPasid = PCIPASID_VAL(pMemReqIn->Pasid);
1668 PCIADDRTYPE const enmAt = pMemReqIn->enmAddrType;
1669
1670 uint64_t const uFrcdHi = RT_BF_MAKE(VTD_BF_1_FRCD_REG_SID, idDevice)
1671 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_T2, fType2)
1672 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_PP, fHasPasid)
1673 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_EXE, fExec)
1674 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_PRIV, fPriv)
1675 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_FR, enmAtFault)
1676 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_PV, uPasid)
1677 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_AT, enmAt)
1678 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_T1, fType1)
1679 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_F, 1);
1680 uint64_t const uFrcdLo = pMemReqIn->AddrRange.uAddr & X86_PAGE_BASE_MASK;
1681 dmarPrimaryFaultRecord(pDevIns, uFrcdHi, uFrcdLo);
1682 }
1683}
1684
1685
1686/**
1687 * Records an IQE fault.
1688 *
1689 * @param pDevIns The IOMMU device instance.
1690 * @param enmIqei The IQE information.
1691 * @param enmDiag The diagnostic reason.
1692 */
1693static void dmarIqeFaultRecord(PPDMDEVINS pDevIns, DMARDIAG enmDiag, VTDIQEI enmIqei)
1694{
1695 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1696 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
1697
1698 DMAR_LOCK(pDevIns, pThisCC);
1699
1700 /* Update the diagnostic reason. */
1701 pThis->enmDiag = enmDiag;
1702
1703 /* Set the error bit. */
1704 uint32_t const fIqe = RT_BF_MAKE(VTD_BF_FSTS_REG_IQE, 1);
1705 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, UINT32_MAX, fIqe);
1706
1707 /* Set the error information. */
1708 uint64_t const fIqei = RT_BF_MAKE(VTD_BF_IQERCD_REG_IQEI, enmIqei);
1709 dmarRegChangeRaw64(pThis, VTD_MMIO_OFF_IQERCD_REG, UINT64_MAX, fIqei);
1710
1711 dmarFaultEventRaiseInterrupt(pDevIns);
1712
1713 DMAR_UNLOCK(pDevIns, pThisCC);
1714}
1715
1716
1717/**
1718 * Handles writes to GCMD_REG.
1719 *
1720 * @returns Strict VBox status code.
1721 * @param pDevIns The IOMMU device instance.
1722 * @param uGcmdReg The value written to GCMD_REG.
1723 */
1724static VBOXSTRICTRC dmarGcmdRegWrite(PPDMDEVINS pDevIns, uint32_t uGcmdReg)
1725{
1726 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1727 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
1728 uint32_t const fChanged = uGstsReg ^ uGcmdReg;
1729 uint64_t const fExtCapReg = pThis->fExtCapReg;
1730
1731 /* Queued-invalidation. */
1732 if ( (fExtCapReg & VTD_BF_ECAP_REG_QI_MASK)
1733 && (fChanged & VTD_BF_GCMD_REG_QIE_MASK))
1734 {
1735 if (uGcmdReg & VTD_BF_GCMD_REG_QIE_MASK)
1736 {
1737 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_QIES_MASK);
1738 dmarInvQueueThreadWakeUpIfNeeded(pDevIns);
1739 }
1740 else
1741 {
1742 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_QIES_MASK, 0 /* fOrMask */);
1743 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_IQH_REG, 0);
1744 }
1745 }
1746
1747 if (fExtCapReg & VTD_BF_ECAP_REG_IR_MASK)
1748 {
1749 /* Set Interrupt Remapping Table Pointer (SIRTP). */
1750 if (uGcmdReg & VTD_BF_GCMD_REG_SIRTP_MASK)
1751 {
1752 /** @todo Perform global invalidation of all interrupt-entry cache when ESIRTPS is
1753 * supported. */
1754 pThis->uIrtaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IRTA_REG);
1755 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_IRTPS_MASK);
1756 }
1757
1758 /* Interrupt remapping. */
1759 if (fChanged & VTD_BF_GCMD_REG_IRE_MASK)
1760 {
1761 if (uGcmdReg & VTD_BF_GCMD_REG_IRE_MASK)
1762 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_IRES_MASK);
1763 else
1764 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_IRES_MASK, 0 /* fOrMask */);
1765 }
1766
1767 /* Compatibility format interrupts. */
1768 if (fChanged & VTD_BF_GCMD_REG_CFI_MASK)
1769 {
1770 if (uGcmdReg & VTD_BF_GCMD_REG_CFI_MASK)
1771 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_CFIS_MASK);
1772 else
1773 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_CFIS_MASK, 0 /* fOrMask */);
1774 }
1775 }
1776
1777 /* Set Root Table Pointer (SRTP). */
1778 if (uGcmdReg & VTD_BF_GCMD_REG_SRTP_MASK)
1779 {
1780 /** @todo Perform global invalidation of all remapping translation caches when
1781 * ESRTPS is supported. */
1782 pThis->uRtaddrReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_RTADDR_REG);
1783 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_RTPS_MASK);
1784 }
1785
1786 /* Translation (DMA remapping). */
1787 if (fChanged & VTD_BF_GCMD_REG_TE_MASK)
1788 {
1789 if (uGcmdReg & VTD_BF_GCMD_REG_TE_MASK)
1790 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX, VTD_BF_GSTS_REG_TES_MASK);
1791 else
1792 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_GSTS_REG_TES_MASK, 0 /* fOrMask */);
1793 }
1794
1795 return VINF_SUCCESS;
1796}
1797
1798
1799/**
1800 * Handles writes to CCMD_REG.
1801 *
1802 * @returns Strict VBox status code.
1803 * @param pDevIns The IOMMU device instance.
1804 * @param offReg The MMIO register offset.
1805 * @param cbReg The size of the MMIO access (in bytes).
1806 * @param uCcmdReg The value written to CCMD_REG.
1807 */
1808static VBOXSTRICTRC dmarCcmdRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint8_t cbReg, uint64_t uCcmdReg)
1809{
1810 /* At present, we only care about responding to high 32-bits writes, low 32-bits are data. */
1811 if (offReg + cbReg > VTD_MMIO_OFF_CCMD_REG + 4)
1812 {
1813 /* Check if we need to invalidate the context-context. */
1814 bool const fIcc = RT_BF_GET(uCcmdReg, VTD_BF_CCMD_REG_ICC);
1815 if (fIcc)
1816 {
1817 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1818 uint8_t const uMajorVersion = RT_BF_GET(pThis->uVerReg, VTD_BF_VER_REG_MAX);
1819 if (uMajorVersion < 6)
1820 {
1821 /* Register-based invalidation can only be used when queued-invalidations are not enabled. */
1822 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
1823 if (!(uGstsReg & VTD_BF_GSTS_REG_QIES_MASK))
1824 {
1825 /* Verify table translation mode is legacy. */
1826 uint8_t const fTtm = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
1827 if (fTtm == VTD_TTM_LEGACY_MODE)
1828 {
1829 /** @todo Invalidate. */
1830 return VINF_SUCCESS;
1831 }
1832 pThis->enmDiag = kDmarDiag_CcmdReg_Ttm_Invalid;
1833 }
1834 else
1835 pThis->enmDiag = kDmarDiag_CcmdReg_Qi_Enabled;
1836 }
1837 else
1838 pThis->enmDiag = kDmarDiag_CcmdReg_Not_Supported;
1839 dmarRegChangeRaw64(pThis, VTD_MMIO_OFF_GSTS_REG, ~VTD_BF_CCMD_REG_CAIG_MASK, 0 /* fOrMask */);
1840 }
1841 }
1842 return VINF_SUCCESS;
1843}
1844
1845
1846/**
1847 * Handles writes to FECTL_REG.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pDevIns The IOMMU device instance.
1851 * @param uFectlReg The value written to FECTL_REG.
1852 */
1853static VBOXSTRICTRC dmarFectlRegWrite(PPDMDEVINS pDevIns, uint32_t uFectlReg)
1854{
1855 /*
1856 * If software unmasks the interrupt when the interrupt is pending, we must raise
1857 * the interrupt now (which will consequently clear the interrupt pending (IP) bit).
1858 */
1859 if ( (uFectlReg & VTD_BF_FECTL_REG_IP_MASK)
1860 && ~(uFectlReg & VTD_BF_FECTL_REG_IM_MASK))
1861 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_FAULT);
1862 return VINF_SUCCESS;
1863}
1864
1865
1866/**
1867 * Handles writes to FSTS_REG.
1868 *
1869 * @returns Strict VBox status code.
1870 * @param pDevIns The IOMMU device instance.
1871 * @param uFstsReg The value written to FSTS_REG.
1872 * @param uPrev The value in FSTS_REG prior to writing it.
1873 */
1874static VBOXSTRICTRC dmarFstsRegWrite(PPDMDEVINS pDevIns, uint32_t uFstsReg, uint32_t uPrev)
1875{
1876 /*
1877 * If software clears other status bits in FSTS_REG (pertaining to primary fault logging),
1878 * the interrupt pending (IP) bit must be cleared.
1879 *
1880 * See Intel VT-d spec. 10.4.10 "Fault Event Control Register".
1881 */
1882 uint32_t const fChanged = uPrev ^ uFstsReg;
1883 if (fChanged & ( VTD_BF_FSTS_REG_ICE_MASK | VTD_BF_FSTS_REG_ITE_MASK
1884 | VTD_BF_FSTS_REG_IQE_MASK | VTD_BF_FSTS_REG_PFO_MASK))
1885 {
1886 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1887 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, ~VTD_BF_FECTL_REG_IP_MASK, 0 /* fOrMask */);
1888 }
1889 return VINF_SUCCESS;
1890}
1891
1892
1893/**
1894 * Handles writes to IQT_REG.
1895 *
1896 * @returns Strict VBox status code.
1897 * @param pDevIns The IOMMU device instance.
1898 * @param offReg The MMIO register offset.
1899 * @param uIqtReg The value written to IQT_REG.
1900 */
1901static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqtReg)
1902{
1903 /* We only care about the low 32-bits, high 32-bits are reserved. */
1904 Assert(offReg == VTD_MMIO_OFF_IQT_REG);
1905 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1906
1907 /* Paranoia. */
1908 Assert(!(uIqtReg & ~VTD_BF_IQT_REG_QT_MASK));
1909
1910 uint32_t const offQt = uIqtReg;
1911 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
1912 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
1913
1914 /* If the descriptor width is 256-bits, the queue tail offset must be aligned accordingly. */
1915 if ( fDw != VTD_IQA_REG_DW_256_BIT
1916 || !(offQt & RT_BIT(4)))
1917 dmarInvQueueThreadWakeUpIfNeeded(pDevIns);
1918 else
1919 {
1920 /* Hardware treats bit 4 as RsvdZ in this situation, so clear it. */
1921 dmarRegChangeRaw32(pThis, offReg, ~RT_BIT(4), 0 /* fOrMask */);
1922 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_Not_Aligned, VTDIQEI_QUEUE_TAIL_MISALIGNED);
1923 }
1924 return VINF_SUCCESS;
1925}
1926
1927
1928/**
1929 * Handles writes to IQA_REG.
1930 *
1931 * @returns Strict VBox status code.
1932 * @param pDevIns The IOMMU device instance.
1933 * @param offReg The MMIO register offset.
1934 * @param uIqaReg The value written to IQA_REG.
1935 */
1936static VBOXSTRICTRC dmarIqaRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqaReg)
1937{
1938 /* At present, we only care about the low 32-bits, high 32-bits are data. */
1939 Assert(offReg == VTD_MMIO_OFF_IQA_REG); NOREF(offReg);
1940
1941 /** @todo What happens if IQA_REG is written when dmarInvQueueCanProcessRequests
1942 * returns true? The Intel VT-d spec. doesn't state anywhere that it
1943 * cannot happen or that it's ignored when it does happen. */
1944
1945 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1946 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
1947 if (fDw == VTD_IQA_REG_DW_256_BIT)
1948 {
1949 bool const fSupports256BitDw = (pThis->fExtCapReg & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK));
1950 if (fSupports256BitDw)
1951 { /* likely */ }
1952 else
1953 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_256_Invalid, VTDIQEI_INVALID_DESCRIPTOR_WIDTH);
1954 }
1955 /* else: 128-bit descriptor width is validated lazily, see explanation in dmarR3InvQueueProcessRequests. */
1956
1957 return VINF_SUCCESS;
1958}
1959
1960
1961/**
1962 * Handles writes to ICS_REG.
1963 *
1964 * @returns Strict VBox status code.
1965 * @param pDevIns The IOMMU device instance.
1966 * @param uIcsReg The value written to ICS_REG.
1967 */
1968static VBOXSTRICTRC dmarIcsRegWrite(PPDMDEVINS pDevIns, uint32_t uIcsReg)
1969{
1970 /*
1971 * If the IP field is set when software services the interrupt condition,
1972 * (by clearing the IWC field), the IP field must be cleared.
1973 */
1974 if (!(uIcsReg & VTD_BF_ICS_REG_IWC_MASK))
1975 {
1976 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
1977 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_IECTL_REG, ~VTD_BF_IECTL_REG_IP_MASK, 0 /* fOrMask */);
1978 }
1979 return VINF_SUCCESS;
1980}
1981
1982
1983/**
1984 * Handles writes to IECTL_REG.
1985 *
1986 * @returns Strict VBox status code.
1987 * @param pDevIns The IOMMU device instance.
1988 * @param uIectlReg The value written to IECTL_REG.
1989 */
1990static VBOXSTRICTRC dmarIectlRegWrite(PPDMDEVINS pDevIns, uint32_t uIectlReg)
1991{
1992 /*
1993 * If software unmasks the interrupt when the interrupt is pending, we must raise
1994 * the interrupt now (which will consequently clear the interrupt pending (IP) bit).
1995 */
1996 if ( (uIectlReg & VTD_BF_IECTL_REG_IP_MASK)
1997 && ~(uIectlReg & VTD_BF_IECTL_REG_IM_MASK))
1998 dmarEventRaiseInterrupt(pDevIns, DMAREVENTTYPE_INV_COMPLETE);
1999 return VINF_SUCCESS;
2000}
2001
2002
2003/**
2004 * Handles writes to FRCD_REG (High 64-bits).
2005 *
2006 * @returns Strict VBox status code.
2007 * @param pDevIns The IOMMU device instance.
2008 * @param offReg The MMIO register offset.
2009 * @param cbReg The size of the MMIO access (in bytes).
2010 * @param uFrcdHiReg The value written to FRCD_REG.
2011 * @param uPrev The value in FRCD_REG prior to writing it.
2012 */
2013static VBOXSTRICTRC dmarFrcdHiRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint8_t cbReg, uint64_t uFrcdHiReg, uint64_t uPrev)
2014{
2015 /* We only care about responding to high 32-bits, low 32-bits are read-only. */
2016 if (offReg + cbReg > DMAR_MMIO_OFF_FRCD_HI_REG + 4)
2017 {
2018 /*
2019 * If software cleared the RW1C F (fault) bit in all FRCD_REGs, hardware clears the
2020 * Primary Pending Fault (PPF) and the interrupt pending (IP) bits. Our implementation
2021 * has only 1 FRCD register.
2022 *
2023 * See Intel VT-d spec. 10.4.10 "Fault Event Control Register".
2024 */
2025 AssertCompile(DMAR_FRCD_REG_COUNT == 1);
2026 uint64_t const fChanged = uPrev ^ uFrcdHiReg;
2027 if (fChanged & VTD_BF_1_FRCD_REG_F_MASK)
2028 {
2029 Assert(!(uFrcdHiReg & VTD_BF_1_FRCD_REG_F_MASK)); /* Software should only ever be able to clear this bit. */
2030 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2031 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FSTS_REG, ~VTD_BF_FSTS_REG_PPF_MASK, 0 /* fOrMask */);
2032 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, ~VTD_BF_FECTL_REG_IP_MASK, 0 /* fOrMask */);
2033 }
2034 }
2035 return VINF_SUCCESS;
2036}
2037
2038
2039/**
2040 * Performs a PCI target abort for a DMA remapping (DR) operation.
2041 *
2042 * @param pDevIns The IOMMU device instance.
2043 */
2044static void dmarDrTargetAbort(PPDMDEVINS pDevIns)
2045{
2046 /** @todo r=ramshankar: I don't know for sure if a PCI target abort is caused or not
2047 * as the Intel VT-d spec. is vague. Wording seems to suggest it does, but
2048 * who knows. */
2049 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2050 uint16_t const u16Status = PDMPciDevGetStatus(pPciDev) | VBOX_PCI_STATUS_SIG_TARGET_ABORT;
2051 PDMPciDevSetStatus(pPciDev, u16Status);
2052}
2053
2054
2055/**
2056 * Checks whether the address width (AW) is supported by our hardware
2057 * implementation for legacy mode address translation.
2058 *
2059 * @returns @c true if it's supported, @c false otherwise.
2060 * @param pThis The shared DMAR device state.
2061 * @param pCtxEntry The context entry.
2062 * @param pcPagingLevel Where to store the paging level. Optional, can be NULL.
2063 */
2064static bool dmarDrLegacyModeIsAwValid(PCDMAR pThis, PCVTD_CONTEXT_ENTRY_T pCtxEntry, uint8_t *pcPagingLevel)
2065{
2066 uint8_t const fTt = RT_BF_GET(pCtxEntry->au64[0], VTD_BF_0_CONTEXT_ENTRY_TT);
2067 uint8_t const fAw = RT_BF_GET(pCtxEntry->au64[1], VTD_BF_1_CONTEXT_ENTRY_AW);
2068 uint8_t const fAwMask = RT_BIT(fAw);
2069 uint8_t const fSagaw = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SAGAW);
2070 Assert(!(fSagaw & ~(RT_BIT(1) | RT_BIT(2) | RT_BIT(3))));
2071
2072 uint8_t const cPagingLevel = fAw + 2;
2073 if (pcPagingLevel)
2074 *pcPagingLevel = cPagingLevel;
2075
2076 /* With pass-through, the address width must be the largest AGAW supported by hardware. */
2077 if (fTt == VTD_TT_UNTRANSLATED_PT)
2078 {
2079 Assert(pThis->cMaxPagingLevel >= 3 && pThis->cMaxPagingLevel <= 5); /* Paranoia. */
2080 return cPagingLevel == pThis->cMaxPagingLevel;
2081 }
2082
2083 /* The address width must be any of the ones supported by hardware. */
2084 if (fAw < 4)
2085 return (fSagaw & fAwMask) != 0;
2086
2087 return false;
2088}
2089
2090
2091/**
2092 * Reads a root entry from guest memory.
2093 *
2094 * @returns VBox status code.
2095 * @param pDevIns The IOMMU device instance.
2096 * @param uRtaddrReg The current RTADDR_REG value.
2097 * @param idxRootEntry The index of the root entry to read.
2098 * @param pRootEntry Where to store the read root entry.
2099 */
2100static int dmarDrReadRootEntry(PPDMDEVINS pDevIns, uint64_t uRtaddrReg, uint8_t idxRootEntry, PVTD_ROOT_ENTRY_T pRootEntry)
2101{
2102 size_t const cbRootEntry = sizeof(*pRootEntry);
2103 RTGCPHYS const GCPhysRootEntry = (uRtaddrReg & VTD_BF_RTADDR_REG_RTA_MASK) + (idxRootEntry * cbRootEntry);
2104 return PDMDevHlpPhysReadMeta(pDevIns, GCPhysRootEntry, pRootEntry, cbRootEntry);
2105}
2106
2107
2108/**
2109 * Reads a context entry from guest memory.
2110 *
2111 * @returns VBox status code.
2112 * @param pDevIns The IOMMU device instance.
2113 * @param GCPhysCtxTable The physical address of the context table.
2114 * @param idxCtxEntry The index of the context entry to read.
2115 * @param pCtxEntry Where to store the read context entry.
2116 */
2117static int dmarDrReadCtxEntry(PPDMDEVINS pDevIns, RTGCPHYS GCPhysCtxTable, uint8_t idxCtxEntry, PVTD_CONTEXT_ENTRY_T pCtxEntry)
2118{
2119 /* We don't verify bits 63:HAW of GCPhysCtxTable is 0 since reading from such an address should fail anyway. */
2120 size_t const cbCtxEntry = sizeof(*pCtxEntry);
2121 RTGCPHYS const GCPhysCtxEntry = GCPhysCtxTable + (idxCtxEntry * cbCtxEntry);
2122 return PDMDevHlpPhysReadMeta(pDevIns, GCPhysCtxEntry, pCtxEntry, cbCtxEntry);
2123}
2124
2125
2126/**
2127 * Validates and updates the output I/O page of a translation.
2128 *
2129 * @returns VBox status code.
2130 * @param pDevIns The IOMMU device instance.
2131 * @param GCPhysBase The output address of the translation.
2132 * @param cShift The page shift of the translated address.
2133 * @param fPerm The permissions granted for the translated region.
2134 * @param pMemReqIn The DMA memory request input.
2135 * @param pMemReqAux The DMA memory request auxiliary info.
2136 * @param pIoPageOut Where to store the output of the translation.
2137 */
2138static int dmarDrUpdateIoPageOut(PPDMDEVINS pDevIns, RTGCPHYS GCPhysBase, uint8_t cShift, uint8_t fPerm,
2139 PCDMARMEMREQIN pMemReqIn, PCDMARMEMREQAUX pMemReqAux, PDMARIOPAGE pIoPageOut)
2140{
2141 Assert(!(GCPhysBase & X86_PAGE_4K_OFFSET_MASK));
2142
2143 /* Ensure the output address is not in the interrupt address range. */
2144 if (GCPhysBase - VBOX_MSI_ADDR_BASE >= VBOX_MSI_ADDR_SIZE)
2145 {
2146 pIoPageOut->GCPhysBase = GCPhysBase;
2147 pIoPageOut->cShift = cShift;
2148 pIoPageOut->fPerm = fPerm;
2149 return VINF_SUCCESS;
2150 }
2151
2152 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_AddrOut_Invalid, pMemReqIn, pMemReqAux);
2153 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2154}
2155
2156
2157/**
2158 * Performs second level translation by walking the I/O page tables.
2159 *
2160 * This is a DMA address-lookup callback function which performs the translation
2161 * (and access control) as part of the lookup.
2162 *
2163 * @returns VBox status code.
2164 * @param pDevIns The IOMMU device instance.
2165 * @param pMemReqIn The DMA memory request input.
2166 * @param pMemReqAux The DMA memory request auxiliary info.
2167 * @param pIoPageOut Where to store the output of the translation.
2168 */
2169static DECLCALLBACK(int) dmarDrSecondLevelTranslate(PPDMDEVINS pDevIns, PCDMARMEMREQIN pMemReqIn, PCDMARMEMREQAUX pMemReqAux,
2170 PDMARIOPAGE pIoPageOut)
2171{
2172 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR);
2173
2174 /* Sanity. */
2175 Assert(pIoPageOut);
2176 Assert(pMemReqIn->AddrRange.fPerm & (DMAR_PERM_READ | DMAR_PERM_WRITE));
2177 Assert( pMemReqAux->fTtm == VTD_TTM_LEGACY_MODE
2178 || pMemReqAux->fTtm == VTD_TTM_SCALABLE_MODE);
2179 Assert(!(pMemReqAux->GCPhysSlPt & X86_PAGE_4K_OFFSET_MASK));
2180
2181 /* Mask of reserved paging entry bits. */
2182 static uint64_t const s_auPtEntityInvMasks[] = { ~VTD_SL_PTE_VALID_MASK,
2183 ~VTD_SL_PDE_VALID_MASK,
2184 ~VTD_SL_PDPE_VALID_MASK,
2185 ~VTD_SL_PML4E_VALID_MASK,
2186 ~VTD_SL_PML5E_VALID_MASK };
2187
2188 /* Paranoia. */
2189 Assert(pMemReqAux->cPagingLevel >= 3 && pMemReqAux->cPagingLevel <= 5);
2190 AssertCompile(RT_ELEMENTS(s_auPtEntityInvMasks) == 5);
2191
2192 /* Second-level translations restricts input address to an implementation-specific MGAW. */
2193 uint64_t const uAddrIn = pMemReqIn->AddrRange.uAddr;
2194 if (!(uAddrIn & pThis->fMgawInvMask))
2195 { /* likely */ }
2196 else
2197 {
2198 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_AddrIn_Invalid, pMemReqIn, pMemReqAux);
2199 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2200 }
2201
2202 /*
2203 * Traverse the I/O page table starting with the SLPTPTR (second-level page table pointer).
2204 * Unlike AMD IOMMU paging, here there is no feature for "skipping" levels.
2205 */
2206 uint64_t uPtEntity = pMemReqAux->GCPhysSlPt;
2207 for (int8_t idxLevel = pMemReqAux->cPagingLevel - 1; idxLevel >= 0; idxLevel--)
2208 {
2209 /*
2210 * Read the paging entry for the current level.
2211 */
2212 uint8_t const cLevelShift = X86_PAGE_4K_SHIFT + (idxLevel * 9);
2213 {
2214 uint16_t const idxPte = (uAddrIn >> cLevelShift) & UINT64_C(0x1ff);
2215 uint16_t const offPte = idxPte << 3;
2216 RTGCPHYS const GCPhysPtEntity = (uPtEntity & X86_PAGE_4K_BASE_MASK) | offPte;
2217 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysPtEntity, &uPtEntity, sizeof(uPtEntity));
2218 if (RT_SUCCESS(rc))
2219 { /* likely */ }
2220 else
2221 {
2222 if ((GCPhysPtEntity & X86_PAGE_BASE_MASK) == pMemReqAux->GCPhysSlPt)
2223 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Slpptr_Read_Failed, pMemReqIn, pMemReqAux);
2224 else
2225 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Read_Pte_Failed, pMemReqIn, pMemReqAux);
2226 break;
2227 }
2228 }
2229
2230 /*
2231 * Check I/O permissions.
2232 * This must be done prior to check reserved bits for properly reporting errors SSL.2 and SSL.3.
2233 * See Intel spec. 7.1.3 "Fault conditions and Remapping hardware behavior for various request".
2234 */
2235 uint8_t const fReqPerm = pMemReqIn->AddrRange.fPerm & pThis->fPermValidMask;
2236 uint8_t const fPtPerm = uPtEntity & pThis->fPermValidMask;
2237 Assert(!(fReqPerm & DMAR_PERM_EXE)); /* No Execute-requests support yet. */
2238 Assert(!(pThis->fExtCapReg & VTD_BF_ECAP_REG_SLADS_MASK)); /* No Second-level access/dirty support. */
2239 if ((fPtPerm & fReqPerm) == fReqPerm)
2240 { /* likely */ }
2241 else
2242 {
2243 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Perm_Denied, pMemReqIn, pMemReqAux);
2244 break;
2245 }
2246
2247 /*
2248 * Validate reserved bits of the current paging entry.
2249 */
2250 if (!(uPtEntity & s_auPtEntityInvMasks[idxLevel]))
2251 { /* likely */ }
2252 else
2253 {
2254 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Rsvd, pMemReqIn, pMemReqAux);
2255 break;
2256 }
2257
2258 /*
2259 * Check if this is a 1GB page or a 2MB page.
2260 */
2261 AssertCompile(VTD_BF_SL_PDE_PS_MASK == VTD_BF_SL_PDPE_PS_MASK);
2262 uint8_t const fLargePage = RT_BF_GET(uPtEntity, VTD_BF_SL_PDE_PS);
2263 if (fLargePage && idxLevel > 0)
2264 {
2265 Assert(idxLevel == 1 || idxLevel == 2); /* Is guaranteed by the reserved bits check above. */
2266 uint8_t const fSllpsMask = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SLLPS);
2267 if (fSllpsMask & RT_BIT(idxLevel - 1))
2268 {
2269 /*
2270 * We don't support MTS (asserted below), hence IPAT and EMT fields of the paging entity are ignored.
2271 * All other reserved bits are identical to the regular page-size paging entity which we've already
2272 * checked above.
2273 */
2274 Assert(!(pThis->fExtCapReg & VTD_BF_ECAP_REG_MTS_MASK));
2275
2276 RTGCPHYS const GCPhysBase = uPtEntity & X86_GET_PAGE_BASE_MASK(cLevelShift);
2277 return dmarDrUpdateIoPageOut(pDevIns, GCPhysBase, cLevelShift, fPtPerm, pMemReqIn, pMemReqAux, pIoPageOut);
2278 }
2279
2280 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Sllps_Invalid, pMemReqIn, pMemReqAux);
2281 break;
2282 }
2283
2284 /*
2285 * If this is the final PTE, compute the translation address and we're done.
2286 */
2287 if (idxLevel == 0)
2288 {
2289 RTGCPHYS const GCPhysBase = uPtEntity & X86_GET_PAGE_BASE_MASK(cLevelShift);
2290 return dmarDrUpdateIoPageOut(pDevIns, GCPhysBase, cLevelShift, fPtPerm, pMemReqIn, pMemReqAux, pIoPageOut);
2291 }
2292 }
2293
2294 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2295}
2296
2297
2298/**
2299 * Looks up the range of addresses for a DMA memory request remapping.
2300 *
2301 * @returns VBox status code.
2302 * @param pDevIns The IOMMU device instance.
2303 * @param pfnLookup The DMA address lookup function.
2304 * @param pMemReqRemap The DMA memory request remapping info.
2305 */
2306static int dmarDrMemRangeLookup(PPDMDEVINS pDevIns, PFNDMADDRLOOKUP pfnLookup, PDMARMEMREQREMAP pMemReqRemap)
2307{
2308 AssertPtr(pfnLookup);
2309
2310 RTGCPHYS GCPhysAddr = NIL_RTGCPHYS;
2311 DMARMEMREQIN MemReqIn = pMemReqRemap->In;
2312 uint64_t const uAddrIn = MemReqIn.AddrRange.uAddr;
2313 size_t const cbAddrIn = MemReqIn.AddrRange.cb;
2314 uint64_t uAddrInBase = MemReqIn.AddrRange.uAddr & X86_PAGE_4K_BASE_MASK;
2315 uint64_t offAddrIn = MemReqIn.AddrRange.uAddr & X86_PAGE_4K_OFFSET_MASK;
2316 size_t cbRemaining = cbAddrIn;
2317 size_t const cbPage = X86_PAGE_4K_SIZE;
2318
2319 int rc;
2320 DMARIOPAGE IoPagePrev;
2321 RT_ZERO(IoPagePrev);
2322 for (;;)
2323 {
2324 /* Update the input memory request with the next address in our range that needs translation. */
2325 MemReqIn.AddrRange.uAddr = uAddrInBase;
2326 MemReqIn.AddrRange.cb = cbRemaining; /* Not currently accessed by pfnLookup, but keep things consistent. */
2327
2328 /* Lookup the physical page corresponding to the I/O virtual address. */
2329 DMARIOPAGE IoPage;
2330 rc = pfnLookup(pDevIns, &MemReqIn, &pMemReqRemap->Aux, &IoPage);
2331 if (RT_SUCCESS(rc))
2332 {
2333 /* Validate results of the translation. */
2334 Assert(IoPage.cShift >= X86_PAGE_4K_SHIFT && IoPage.cShift <= X86_PAGE_1G_SHIFT);
2335 Assert(!(IoPage.GCPhysBase & X86_GET_PAGE_OFFSET_MASK(IoPage.cShift)));
2336 Assert((IoPage.fPerm & MemReqIn.AddrRange.fPerm) == MemReqIn.AddrRange.fPerm);
2337
2338 /* Store the translated address before continuing to access more pages. */
2339 if (cbRemaining == cbAddrIn)
2340 {
2341 uint64_t const fOffMask = X86_GET_PAGE_OFFSET_MASK(IoPage.cShift);
2342 uint64_t const offAddrOut = uAddrIn & fOffMask;
2343 Assert(!(IoPage.GCPhysBase & fOffMask));
2344 GCPhysAddr = IoPage.GCPhysBase | offAddrOut;
2345 }
2346 /* Check if addresses translated so far result in a physically contiguous region. */
2347 /** @todo Ensure permissions are identical as well if we implementing IOTLB caching
2348 * that relies on it being so. */
2349 else if (IoPagePrev.GCPhysBase + cbPage == IoPage.GCPhysBase)
2350 { /* likely */ }
2351 else
2352 {
2353 rc = VERR_OUT_OF_RANGE;
2354 break;
2355 }
2356
2357 /* Store the I/O page lookup from the first/previous access. */
2358 IoPagePrev = IoPage;
2359
2360 /* Check if we need to access more pages. */
2361 if (cbRemaining > cbPage - offAddrIn)
2362 {
2363 cbRemaining -= (cbPage - offAddrIn); /* Calculate how much more we need to access. */
2364 uAddrInBase += cbPage; /* Update address of the next access. */
2365 offAddrIn = 0; /* After first page, all pages are accessed from offset 0. */
2366 }
2367 else
2368 {
2369 /* Caller (PDM) doesn't expect more data accessed than what was requested. */
2370 cbRemaining = 0;
2371 break;
2372 }
2373 }
2374 else
2375 break;
2376 }
2377
2378 pMemReqRemap->Out.AddrRange.uAddr = GCPhysAddr;
2379 pMemReqRemap->Out.AddrRange.cb = cbAddrIn - cbRemaining;
2380 pMemReqRemap->Out.AddrRange.fPerm = IoPagePrev.fPerm;
2381 return rc;
2382}
2383
2384
2385/**
2386 * Handles legacy mode DMA address remapping.
2387 *
2388 * @returns VBox status code.
2389 * @param pDevIns The IOMMU device instance.
2390 * @param uRtaddrReg The current RTADDR_REG value.
2391 * @param pMemReqRemap The DMA memory request remapping info.
2392 */
2393static int dmarDrLegacyModeRemapAddr(PPDMDEVINS pDevIns, uint64_t uRtaddrReg, PDMARMEMREQREMAP pMemReqRemap)
2394{
2395 PCDMARMEMREQIN pMemReqIn = &pMemReqRemap->In;
2396 PDMARMEMREQAUX pMemReqAux = &pMemReqRemap->Aux;
2397 PDMARMEMREQOUT pMemReqOut = &pMemReqRemap->Out;
2398 Assert(pMemReqAux->fTtm == VTD_TTM_LEGACY_MODE); /* Paranoia. */
2399
2400 /* Read the root-entry from guest memory. */
2401 uint8_t const idxRootEntry = RT_HI_U8(pMemReqIn->idDevice);
2402 VTD_ROOT_ENTRY_T RootEntry;
2403 int rc = dmarDrReadRootEntry(pDevIns, uRtaddrReg, idxRootEntry, &RootEntry);
2404 if (RT_SUCCESS(rc))
2405 {
2406 /* Check if the root entry is present (must be done before validating reserved bits). */
2407 uint64_t const uRootEntryQword0 = RootEntry.au64[0];
2408 uint64_t const uRootEntryQword1 = RootEntry.au64[1];
2409 bool const fRootEntryPresent = RT_BF_GET(uRootEntryQword0, VTD_BF_0_ROOT_ENTRY_P);
2410 if (fRootEntryPresent)
2411 {
2412 /* Validate reserved bits in the root entry. */
2413 if ( !(uRootEntryQword0 & ~VTD_ROOT_ENTRY_0_VALID_MASK)
2414 && !(uRootEntryQword1 & ~VTD_ROOT_ENTRY_1_VALID_MASK))
2415 {
2416 /* Read the context-entry from guest memory. */
2417 RTGCPHYS const GCPhysCtxTable = uRootEntryQword0 & VTD_BF_0_ROOT_ENTRY_CTP_MASK;
2418 uint8_t const idxCtxEntry = RT_LO_U8(pMemReqIn->idDevice);
2419 VTD_CONTEXT_ENTRY_T CtxEntry;
2420 rc = dmarDrReadCtxEntry(pDevIns, GCPhysCtxTable, idxCtxEntry, &CtxEntry);
2421 if (RT_SUCCESS(rc))
2422 {
2423 uint64_t const uCtxEntryQword0 = CtxEntry.au64[0];
2424 uint64_t const uCtxEntryQword1 = CtxEntry.au64[1];
2425
2426 /* Note the FPD bit which software can use to supress translation faults from here on in. */
2427 pMemReqAux->fFpd = RT_BF_GET(uCtxEntryQword0, VTD_BF_0_CONTEXT_ENTRY_FPD);
2428
2429 /* Check if the context-entry is present (must be done before validating reserved bits). */
2430 bool const fCtxEntryPresent = RT_BF_GET(uCtxEntryQword0, VTD_BF_0_CONTEXT_ENTRY_P);
2431 if (fCtxEntryPresent)
2432 {
2433 /* Validate reserved bits in the context-entry. */
2434 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR);
2435 if ( !(uCtxEntryQword0 & ~VTD_CONTEXT_ENTRY_0_VALID_MASK)
2436 && !(uCtxEntryQword1 & ~pThis->fCtxEntryQw1ValidMask))
2437 {
2438 /* Get the domain ID for this mapping. */
2439 pMemReqOut->idDomain = RT_BF_GET(uCtxEntryQword1, VTD_BF_1_CONTEXT_ENTRY_DID);
2440
2441 /* Validate the translation type (TT). */
2442 uint8_t const fTt = RT_BF_GET(uCtxEntryQword0, VTD_BF_0_CONTEXT_ENTRY_TT);
2443 switch (fTt)
2444 {
2445 case VTD_TT_UNTRANSLATED_SLP:
2446 {
2447 /*
2448 * Untranslated requests are translated using second-level paging structures referenced
2449 * through SLPTPTR. Translated requests and Translation Requests are blocked.
2450 */
2451 if (pMemReqIn->enmAddrType == PCIADDRTYPE_UNTRANSLATED)
2452 {
2453 /* Validate the address width and get the paging level. */
2454 uint8_t cPagingLevel;
2455 if (dmarDrLegacyModeIsAwValid(pThis, &CtxEntry, &cPagingLevel))
2456 {
2457 /*
2458 * The second-level page table is located at the physical address specified
2459 * in the context entry with which we can finally perform second-level translation.
2460 */
2461 pMemReqAux->cPagingLevel = cPagingLevel;
2462 pMemReqAux->GCPhysSlPt = uCtxEntryQword0 & VTD_BF_0_CONTEXT_ENTRY_SLPTPTR_MASK;
2463 rc = dmarDrMemRangeLookup(pDevIns, dmarDrSecondLevelTranslate, pMemReqRemap);
2464 if (rc == VERR_OUT_OF_RANGE)
2465 rc = VINF_SUCCESS;
2466 return rc;
2467 }
2468 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_Ut_Aw_Invalid, pMemReqIn, pMemReqAux);
2469 }
2470 else
2471 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_Ut_At_Block, pMemReqIn, pMemReqAux);
2472 break;
2473 }
2474
2475 case VTD_TT_UNTRANSLATED_PT:
2476 {
2477 /*
2478 * Untranslated requests are processed as pass-through (PT) if PT is supported.
2479 * Translated and translation requests are blocked. If PT isn't supported this TT value
2480 * is reserved which I assume raises a fault (hence fallthru below).
2481 */
2482 if (pThis->fExtCapReg & VTD_BF_ECAP_REG_PT_MASK)
2483 {
2484 if (pMemReqRemap->In.enmAddrType == PCIADDRTYPE_UNTRANSLATED)
2485 {
2486 if (dmarDrLegacyModeIsAwValid(pThis, &CtxEntry, NULL /* pcPagingLevel */))
2487 {
2488 PDMARMEMREQOUT pOut = &pMemReqRemap->Out;
2489 PCDMARMEMREQIN pIn = &pMemReqRemap->In;
2490 pOut->AddrRange.uAddr = pIn->AddrRange.uAddr;
2491 pOut->AddrRange.cb = pIn->AddrRange.cb;
2492 pOut->AddrRange.fPerm = DMAR_PERM_ALL;
2493 return VINF_SUCCESS;
2494 }
2495 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_Pt_Aw_Invalid, pMemReqIn, pMemReqAux);
2496 }
2497 else
2498 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_Pt_At_Block, pMemReqIn, pMemReqAux);
2499 break;
2500 }
2501 RT_FALL_THRU();
2502 }
2503
2504 case VTD_TT_UNTRANSLATED_DEV_TLB:
2505 {
2506 /*
2507 * Untranslated, translated and translation requests are supported but requires
2508 * device-TLB support. We don't support device-TLBs, so it's treated as reserved.
2509 */
2510 Assert(!(pThis->fExtCapReg & VTD_BF_ECAP_REG_DT_MASK));
2511 RT_FALL_THRU();
2512 }
2513
2514 default:
2515 {
2516 /* Any other TT value is reserved. */
2517 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_Tt_Invalid, pMemReqIn, pMemReqAux);
2518 break;
2519 }
2520 }
2521 }
2522 else
2523 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_CtxEntry_Rsvd, pMemReqIn, pMemReqAux);
2524 }
2525 else
2526 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_CtxEntry_Not_Present, pMemReqIn, pMemReqAux);
2527 }
2528 else
2529 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_CtxEntry_Read_Failed, pMemReqIn, pMemReqAux);
2530 }
2531 else
2532 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_RootEntry_Rsvd, pMemReqIn, pMemReqAux);
2533 }
2534 else
2535 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_RootEntry_Not_Present, pMemReqIn, pMemReqAux);
2536 }
2537 else
2538 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Lm_RootEntry_Read_Failed, pMemReqIn, pMemReqAux);
2539 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2540}
2541
2542
2543/**
2544 * Handles remapping of DMA address requests in scalable mode.
2545 *
2546 * @returns VBox status code.
2547 * @param pDevIns The IOMMU device instance.
2548 * @param uRtaddrReg The current RTADDR_REG value.
2549 * @param pMemReqRemap The DMA memory request remapping info.
2550 */
2551static int dmarDrScalableModeRemapAddr(PPDMDEVINS pDevIns, uint64_t uRtaddrReg, PDMARMEMREQREMAP pMemReqRemap)
2552{
2553 RT_NOREF3(pDevIns, uRtaddrReg, pMemReqRemap);
2554 return VERR_NOT_IMPLEMENTED;
2555}
2556
2557
2558/**
2559 * Gets the DMA access permissions and the address-translation request
2560 * type given the PDM IOMMU memory access flags.
2561 *
2562 * @param pDevIns The IOMMU device instance.
2563 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
2564 * @param fBulk Whether this is a bulk memory access (used for
2565 * statistics).
2566 * @param penmReqType Where to store the address-translation request type.
2567 * @param pfReqPerm Where to store the DMA access permissions.
2568 */
2569static void dmarDrGetPermAndReqType(PPDMDEVINS pDevIns, uint32_t fFlags, bool fBulk, PVTDREQTYPE penmReqType, uint8_t *pfReqPerm)
2570{
2571 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2572 if (fFlags & PDMIOMMU_MEM_F_READ)
2573 {
2574 *penmReqType = VTDREQTYPE_READ;
2575 *pfReqPerm = DMAR_PERM_READ;
2576#ifdef VBOX_WITH_STATISTICS
2577 if (!fBulk)
2578 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemRead));
2579 else
2580 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemBulkRead));
2581#else
2582 RT_NOREF2(pThis, fBulk);
2583#endif
2584 }
2585 else
2586 {
2587 *penmReqType = VTDREQTYPE_WRITE;
2588 *pfReqPerm = DMAR_PERM_WRITE;
2589#ifdef VBOX_WITH_STATISTICS
2590 if (!fBulk)
2591 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemWrite));
2592 else
2593 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemBulkWrite));
2594#else
2595 RT_NOREF2(pThis, fBulk);
2596#endif
2597 }
2598}
2599
2600
2601/**
2602 * Handles DMA remapping based on the table translation mode (TTM).
2603 *
2604 * @returns VBox status code.
2605 * @param pDevIns The IOMMU device instance.
2606 * @param uRtaddrReg The current RTADDR_REG value.
2607 * @param pMemReqRemap The DMA memory request remapping info.
2608 */
2609static int dmarDrMemReqRemap(PPDMDEVINS pDevIns, uint64_t uRtaddrReg, PDMARMEMREQREMAP pMemReqRemap)
2610{
2611 int rc;
2612 switch (pMemReqRemap->Aux.fTtm)
2613 {
2614 case VTD_TTM_LEGACY_MODE:
2615 {
2616 rc = dmarDrLegacyModeRemapAddr(pDevIns, uRtaddrReg, pMemReqRemap);
2617 break;
2618 }
2619
2620 case VTD_TTM_SCALABLE_MODE:
2621 {
2622 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR);
2623 if (pThis->fExtCapReg & VTD_BF_ECAP_REG_SMTS_MASK)
2624 rc = dmarDrScalableModeRemapAddr(pDevIns, uRtaddrReg, pMemReqRemap);
2625 else
2626 {
2627 rc = VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2628 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Rta_Smts_Not_Supported, &pMemReqRemap->In, &pMemReqRemap->Aux);
2629 }
2630 break;
2631 }
2632
2633 case VTD_TTM_ABORT_DMA_MODE:
2634 {
2635 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR);
2636 if (pThis->fExtCapReg & VTD_BF_ECAP_REG_ADMS_MASK)
2637 dmarDrTargetAbort(pDevIns);
2638 else
2639 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Rta_Adms_Not_Supported, &pMemReqRemap->In, &pMemReqRemap->Aux);
2640 rc = VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2641 break;
2642 }
2643
2644 default:
2645 {
2646 rc = VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2647 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Rta_Rsvd, &pMemReqRemap->In, &pMemReqRemap->Aux);
2648 break;
2649 }
2650 }
2651 return rc;
2652}
2653
2654
2655/**
2656 * Memory access bulk (one or more 4K pages) request from a device.
2657 *
2658 * @returns VBox status code.
2659 * @param pDevIns The IOMMU device instance.
2660 * @param idDevice The device ID (bus, device, function).
2661 * @param cIovas The number of addresses being accessed.
2662 * @param pauIovas The I/O virtual addresses for each page being accessed.
2663 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
2664 * @param paGCPhysSpa Where to store the translated physical addresses.
2665 *
2666 * @thread Any.
2667 */
2668static DECLCALLBACK(int) iommuIntelMemBulkAccess(PPDMDEVINS pDevIns, uint16_t idDevice, size_t cIovas, uint64_t const *pauIovas,
2669 uint32_t fFlags, PRTGCPHYS paGCPhysSpa)
2670{
2671 /* Validate. */
2672 AssertPtr(pDevIns);
2673 Assert(cIovas > 0);
2674 AssertPtr(pauIovas);
2675 AssertPtr(paGCPhysSpa);
2676 Assert(!(fFlags & ~PDMIOMMU_MEM_F_VALID_MASK));
2677
2678 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2679 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
2680
2681 DMAR_LOCK(pDevIns, pThisCC);
2682 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
2683 uint64_t const uRtaddrReg = pThis->uRtaddrReg;
2684 DMAR_UNLOCK(pDevIns, pThisCC);
2685
2686 if (uGstsReg & VTD_BF_GSTS_REG_TES_MASK)
2687 {
2688 VTDREQTYPE enmReqType;
2689 uint8_t fReqPerm;
2690 dmarDrGetPermAndReqType(pDevIns, fFlags, true /* fBulk */, &enmReqType, &fReqPerm);
2691
2692 DMARMEMREQREMAP MemReqRemap;
2693 RT_ZERO(MemReqRemap);
2694 MemReqRemap.In.AddrRange.cb = X86_PAGE_SIZE;
2695 MemReqRemap.In.AddrRange.fPerm = fReqPerm;
2696 MemReqRemap.In.idDevice = idDevice;
2697 MemReqRemap.In.Pasid = NIL_PCIPASID;
2698 MemReqRemap.In.enmAddrType = PCIADDRTYPE_UNTRANSLATED;
2699 MemReqRemap.In.enmReqType = enmReqType;
2700 MemReqRemap.Aux.fTtm = RT_BF_GET(uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
2701 MemReqRemap.Out.AddrRange.uAddr = NIL_RTGCPHYS;
2702
2703 for (size_t i = 0; i < cIovas; i++)
2704 {
2705 MemReqRemap.In.AddrRange.uAddr = pauIovas[i] & X86_PAGE_BASE_MASK;
2706 int const rc = dmarDrMemReqRemap(pDevIns, uRtaddrReg, &MemReqRemap);
2707 if (RT_SUCCESS(rc))
2708 {
2709 paGCPhysSpa[i] = MemReqRemap.Out.AddrRange.uAddr | (pauIovas[i] & X86_PAGE_OFFSET_MASK);
2710 Assert(MemReqRemap.Out.AddrRange.cb == MemReqRemap.In.AddrRange.cb);
2711 }
2712 else
2713 {
2714 LogFlowFunc(("idDevice=%#x uIova=%#RX64 fPerm=%#x rc=%Rrc\n", idDevice, pauIovas[i], fReqPerm, rc));
2715 return rc;
2716 }
2717 }
2718 }
2719 else
2720 {
2721 /* Addresses are forwarded without translation when the translation is disabled. */
2722 for (size_t i = 0; i < cIovas; i++)
2723 paGCPhysSpa[i] = pauIovas[i];
2724 }
2725
2726 return VINF_SUCCESS;
2727}
2728
2729
2730/**
2731 * Memory access transaction from a device.
2732 *
2733 * @returns VBox status code.
2734 * @param pDevIns The IOMMU device instance.
2735 * @param idDevice The device ID (bus, device, function).
2736 * @param uIova The I/O virtual address being accessed.
2737 * @param cbIova The size of the access.
2738 * @param fFlags The access flags, see PDMIOMMU_MEM_F_XXX.
2739 * @param pGCPhysSpa Where to store the translated system physical address.
2740 * @param pcbContiguous Where to store the number of contiguous bytes translated
2741 * and permission-checked.
2742 *
2743 * @thread Any.
2744 */
2745static DECLCALLBACK(int) iommuIntelMemAccess(PPDMDEVINS pDevIns, uint16_t idDevice, uint64_t uIova, size_t cbIova,
2746 uint32_t fFlags, PRTGCPHYS pGCPhysSpa, size_t *pcbContiguous)
2747{
2748 /* Validate. */
2749 AssertPtr(pDevIns);
2750 AssertPtr(pGCPhysSpa);
2751 AssertPtr(pcbContiguous);
2752 Assert(cbIova > 0); /** @todo Are we going to support ZLR (zero-length reads to write-only pages)? */
2753 Assert(!(fFlags & ~PDMIOMMU_MEM_F_VALID_MASK));
2754
2755 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2756 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
2757
2758 DMAR_LOCK(pDevIns, pThisCC);
2759 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
2760 uint64_t const uRtaddrReg = pThis->uRtaddrReg;
2761 DMAR_UNLOCK(pDevIns, pThisCC);
2762
2763 if (uGstsReg & VTD_BF_GSTS_REG_TES_MASK)
2764 {
2765 VTDREQTYPE enmReqType;
2766 uint8_t fReqPerm;
2767 dmarDrGetPermAndReqType(pDevIns, fFlags, false /* fBulk */, &enmReqType, &fReqPerm);
2768
2769 DMARMEMREQREMAP MemReqRemap;
2770 RT_ZERO(MemReqRemap);
2771 MemReqRemap.In.AddrRange.uAddr = uIova;
2772 MemReqRemap.In.AddrRange.cb = cbIova;
2773 MemReqRemap.In.AddrRange.fPerm = fReqPerm;
2774 MemReqRemap.In.idDevice = idDevice;
2775 MemReqRemap.In.Pasid = NIL_PCIPASID;
2776 MemReqRemap.In.enmAddrType = PCIADDRTYPE_UNTRANSLATED;
2777 MemReqRemap.In.enmReqType = enmReqType;
2778 MemReqRemap.Aux.fTtm = RT_BF_GET(uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
2779 MemReqRemap.Out.AddrRange.uAddr = NIL_RTGCPHYS;
2780
2781 int const rc = dmarDrMemReqRemap(pDevIns, uRtaddrReg, &MemReqRemap);
2782 *pGCPhysSpa = MemReqRemap.Out.AddrRange.uAddr;
2783 *pcbContiguous = MemReqRemap.Out.AddrRange.cb;
2784 return rc;
2785 }
2786
2787 *pGCPhysSpa = uIova;
2788 *pcbContiguous = cbIova;
2789 return VINF_SUCCESS;
2790}
2791
2792
2793/**
2794 * Reads an IRTE from guest memory.
2795 *
2796 * @returns VBox status code.
2797 * @param pDevIns The IOMMU device instance.
2798 * @param uIrtaReg The IRTA_REG.
2799 * @param idxIntr The interrupt index.
2800 * @param pIrte Where to store the read IRTE.
2801 */
2802static int dmarIrReadIrte(PPDMDEVINS pDevIns, uint64_t uIrtaReg, uint16_t idxIntr, PVTD_IRTE_T pIrte)
2803{
2804 Assert(idxIntr < VTD_IRTA_REG_GET_ENTRY_COUNT(uIrtaReg));
2805
2806 size_t const cbIrte = sizeof(*pIrte);
2807 RTGCPHYS const GCPhysIrte = (uIrtaReg & VTD_BF_IRTA_REG_IRTA_MASK) + (idxIntr * cbIrte);
2808 return PDMDevHlpPhysReadMeta(pDevIns, GCPhysIrte, pIrte, cbIrte);
2809}
2810
2811
2812/**
2813 * Remaps the source MSI to the destination MSI given the IRTE.
2814 *
2815 * @param fExtIntrMode Whether extended interrupt mode is enabled (i.e
2816 * IRTA_REG.EIME).
2817 * @param pIrte The IRTE used for the remapping.
2818 * @param pMsiIn The source MSI (currently unused).
2819 * @param pMsiOut Where to store the remapped MSI.
2820 */
2821static void dmarIrRemapFromIrte(bool fExtIntrMode, PCVTD_IRTE_T pIrte, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
2822{
2823 NOREF(pMsiIn);
2824 uint64_t const uIrteQword0 = pIrte->au64[0];
2825
2826 /*
2827 * Let's start with a clean slate and preserve unspecified bits if the need arises.
2828 * For instance, address bits 1:0 is supposed to be "ignored" by remapping hardware,
2829 * but it's not clear if hardware zeroes out these bits in the remapped MSI or if
2830 * it copies it from the source MSI.
2831 */
2832 RT_ZERO(*pMsiOut);
2833 pMsiOut->Addr.n.u1DestMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DM);
2834 pMsiOut->Addr.n.u1RedirHint = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_RH);
2835 pMsiOut->Addr.n.u12Addr = VBOX_MSI_ADDR_BASE >> VBOX_MSI_ADDR_SHIFT;
2836 if (fExtIntrMode)
2837 {
2838 /*
2839 * Apparently the DMAR stuffs the high 24-bits of the destination ID into the
2840 * high 24-bits of the upper 32-bits of the message address, see @bugref{9967#c22}.
2841 */
2842 uint32_t const idDest = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DST);
2843 pMsiOut->Addr.n.u8DestId = idDest;
2844 pMsiOut->Addr.n.u32Rsvd0 = idDest & UINT32_C(0xffffff00);
2845 }
2846 else
2847 pMsiOut->Addr.n.u8DestId = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DST_XAPIC);
2848
2849 pMsiOut->Data.n.u8Vector = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_V);
2850 pMsiOut->Data.n.u3DeliveryMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_DLM);
2851 pMsiOut->Data.n.u1Level = 1;
2852 pMsiOut->Data.n.u1TriggerMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_TM);
2853}
2854
2855
2856/**
2857 * Handles remapping of interrupts in remappable interrupt format.
2858 *
2859 * @returns VBox status code.
2860 * @param pDevIns The IOMMU device instance.
2861 * @param uIrtaReg The IRTA_REG.
2862 * @param idDevice The device ID (bus, device, function).
2863 * @param pMsiIn The source MSI.
2864 * @param pMsiOut Where to store the remapped MSI.
2865 */
2866static int dmarIrRemapIntr(PPDMDEVINS pDevIns, uint64_t uIrtaReg, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
2867{
2868 Assert(pMsiIn->Addr.dmar_remap.fIntrFormat == VTD_INTR_FORMAT_REMAPPABLE);
2869
2870 /* Validate reserved bits in the interrupt request. */
2871 AssertCompile(VTD_REMAPPABLE_MSI_ADDR_VALID_MASK == UINT32_MAX);
2872 if (!(pMsiIn->Data.u32 & ~VTD_REMAPPABLE_MSI_DATA_VALID_MASK))
2873 {
2874 /* Compute the index into the interrupt remap table. */
2875 uint16_t const uHandleHi = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_HANDLE_HI);
2876 uint16_t const uHandleLo = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_HANDLE_LO);
2877 uint16_t const uHandle = uHandleLo | (uHandleHi << 15);
2878 bool const fSubHandleValid = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_SHV);
2879 uint16_t const idxIntr = fSubHandleValid
2880 ? uHandle + RT_BF_GET(pMsiIn->Data.u32, VTD_BF_REMAPPABLE_MSI_DATA_SUBHANDLE)
2881 : uHandle;
2882
2883 /* Validate the index. */
2884 uint32_t const cEntries = VTD_IRTA_REG_GET_ENTRY_COUNT(uIrtaReg);
2885 if (idxIntr < cEntries)
2886 {
2887 /** @todo Implement and read IRTE from interrupt-entry cache here. */
2888
2889 /* Read the interrupt remap table entry (IRTE) at the index. */
2890 VTD_IRTE_T Irte;
2891 int rc = dmarIrReadIrte(pDevIns, uIrtaReg, idxIntr, &Irte);
2892 if (RT_SUCCESS(rc))
2893 {
2894 /* Check if the IRTE is present (this must be done -before- checking reserved bits). */
2895 uint64_t const uIrteQword0 = Irte.au64[0];
2896 uint64_t const uIrteQword1 = Irte.au64[1];
2897 bool const fPresent = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_P);
2898 if (fPresent)
2899 {
2900 /* Validate reserved bits in the IRTE. */
2901 bool const fExtIntrMode = RT_BF_GET(uIrtaReg, VTD_BF_IRTA_REG_EIME);
2902 uint64_t const fQw0ValidMask = fExtIntrMode ? VTD_IRTE_0_X2APIC_VALID_MASK : VTD_IRTE_0_XAPIC_VALID_MASK;
2903 if ( !(uIrteQword0 & ~fQw0ValidMask)
2904 && !(uIrteQword1 & ~VTD_IRTE_1_VALID_MASK))
2905 {
2906 /* Validate requester id (the device ID) as configured in the IRTE. */
2907 bool fSrcValid;
2908 DMARDIAG enmIrDiag;
2909 uint8_t const fSvt = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SVT);
2910 switch (fSvt)
2911 {
2912 case VTD_IRTE_SVT_NONE:
2913 {
2914 fSrcValid = true;
2915 enmIrDiag = kDmarDiag_None;
2916 break;
2917 }
2918
2919 case VTD_IRTE_SVT_VALIDATE_MASK:
2920 {
2921 static uint16_t const s_afValidMasks[] = { 0xffff, 0xfffb, 0xfff9, 0xfff8 };
2922 uint8_t const idxMask = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SQ) & 3;
2923 uint16_t const fValidMask = s_afValidMasks[idxMask];
2924 uint16_t const idSource = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SID);
2925 fSrcValid = (idDevice & fValidMask) == (idSource & fValidMask);
2926 enmIrDiag = kDmarDiag_Ir_Rfi_Irte_Svt_Masked;
2927 break;
2928 }
2929
2930 case VTD_IRTE_SVT_VALIDATE_BUS_RANGE:
2931 {
2932 uint16_t const idSource = RT_BF_GET(uIrteQword1, VTD_BF_1_IRTE_SID);
2933 uint8_t const uBusFirst = RT_HI_U8(idSource);
2934 uint8_t const uBusLast = RT_LO_U8(idSource);
2935 uint8_t const idDeviceBus = idDevice >> VBOX_PCI_BUS_SHIFT;
2936 fSrcValid = (idDeviceBus >= uBusFirst && idDeviceBus <= uBusLast);
2937 enmIrDiag = kDmarDiag_Ir_Rfi_Irte_Svt_Bus;
2938 break;
2939 }
2940
2941 default:
2942 {
2943 fSrcValid = false;
2944 enmIrDiag = kDmarDiag_Ir_Rfi_Irte_Svt_Rsvd;
2945 break;
2946 }
2947 }
2948
2949 if (fSrcValid)
2950 {
2951 uint8_t const fPostedMode = RT_BF_GET(uIrteQword0, VTD_BF_0_IRTE_IM);
2952 if (!fPostedMode)
2953 {
2954 dmarIrRemapFromIrte(fExtIntrMode, &Irte, pMsiIn, pMsiOut);
2955 return VINF_SUCCESS;
2956 }
2957 dmarIrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Irte_Mode_Invalid, idDevice, idxIntr, &Irte);
2958 }
2959 else
2960 dmarIrFaultRecord(pDevIns, enmIrDiag, idDevice, idxIntr, &Irte);
2961 }
2962 else
2963 dmarIrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Irte_Rsvd, idDevice, idxIntr, &Irte);
2964 }
2965 else
2966 dmarIrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Irte_Not_Present, idDevice, idxIntr, &Irte);
2967 }
2968 else
2969 dmarIrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Irte_Read_Failed, idDevice, idxIntr, NULL /* pIrte */);
2970 }
2971 else
2972 dmarIrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Intr_Index_Invalid, idDevice, idxIntr, NULL /* pIrte */);
2973 }
2974 else
2975 dmarIrFaultRecord(pDevIns, kDmarDiag_Ir_Rfi_Rsvd, idDevice, 0 /* idxIntr */, NULL /* pIrte */);
2976 return VERR_IOMMU_INTR_REMAP_DENIED;
2977}
2978
2979
2980/**
2981 * Interrupt remap request from a device.
2982 *
2983 * @returns VBox status code.
2984 * @param pDevIns The IOMMU device instance.
2985 * @param idDevice The device ID (bus, device, function).
2986 * @param pMsiIn The source MSI.
2987 * @param pMsiOut Where to store the remapped MSI.
2988 */
2989static DECLCALLBACK(int) iommuIntelMsiRemap(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
2990{
2991 /* Validate. */
2992 Assert(pDevIns);
2993 Assert(pMsiIn);
2994 Assert(pMsiOut);
2995 RT_NOREF1(idDevice);
2996
2997 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
2998 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
2999
3000 /* Lock and read all registers required for interrupt remapping up-front. */
3001 DMAR_LOCK(pDevIns, pThisCC);
3002 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
3003 uint64_t const uIrtaReg = pThis->uIrtaReg;
3004 DMAR_UNLOCK(pDevIns, pThisCC);
3005
3006 /* Check if interrupt remapping is enabled. */
3007 if (uGstsReg & VTD_BF_GSTS_REG_IRES_MASK)
3008 {
3009 bool const fIsRemappable = RT_BF_GET(pMsiIn->Addr.au32[0], VTD_BF_REMAPPABLE_MSI_ADDR_INTR_FMT);
3010 if (!fIsRemappable)
3011 {
3012 /* Handle compatibility format interrupts. */
3013 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemapCfi));
3014
3015 /* If EIME is enabled or CFIs are disabled, block the interrupt. */
3016 if ( (uIrtaReg & VTD_BF_IRTA_REG_EIME_MASK)
3017 || !(uGstsReg & VTD_BF_GSTS_REG_CFIS_MASK))
3018 {
3019 dmarIrFaultRecord(pDevIns, kDmarDiag_Ir_Cfi_Blocked, VTDIRFAULT_CFI_BLOCKED, idDevice, 0 /* idxIntr */);
3020 return VERR_IOMMU_INTR_REMAP_DENIED;
3021 }
3022
3023 /* Interrupt isn't subject to remapping, pass-through the interrupt. */
3024 *pMsiOut = *pMsiIn;
3025 return VINF_SUCCESS;
3026 }
3027
3028 /* Handle remappable format interrupts. */
3029 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemapRfi));
3030 return dmarIrRemapIntr(pDevIns, uIrtaReg, idDevice, pMsiIn, pMsiOut);
3031 }
3032
3033 /* Interrupt-remapping isn't enabled, all interrupts are pass-through. */
3034 *pMsiOut = *pMsiIn;
3035 return VINF_SUCCESS;
3036}
3037
3038
3039/**
3040 * @callback_method_impl{FNIOMMMIONEWWRITE}
3041 */
3042static DECLCALLBACK(VBOXSTRICTRC) dmarMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3043{
3044 RT_NOREF1(pvUser);
3045 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
3046
3047 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3048 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioWrite));
3049
3050 uint16_t const offReg = off;
3051 uint16_t const offLast = offReg + cb - 1;
3052 if (DMAR_IS_MMIO_OFF_VALID(offLast))
3053 {
3054 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
3055 DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_WRITE);
3056
3057 uint64_t uPrev = 0;
3058 uint64_t const uRegWritten = cb == 8 ? dmarRegWrite64(pThis, offReg, *(uint64_t *)pv, &uPrev)
3059 : dmarRegWrite32(pThis, offReg, *(uint32_t *)pv, (uint32_t *)&uPrev);
3060 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3061 switch (off)
3062 {
3063 case VTD_MMIO_OFF_GCMD_REG: /* 32-bit */
3064 {
3065 rcStrict = dmarGcmdRegWrite(pDevIns, uRegWritten);
3066 break;
3067 }
3068
3069 case VTD_MMIO_OFF_CCMD_REG: /* 64-bit */
3070 case VTD_MMIO_OFF_CCMD_REG + 4:
3071 {
3072 rcStrict = dmarCcmdRegWrite(pDevIns, offReg, cb, uRegWritten);
3073 break;
3074 }
3075
3076 case VTD_MMIO_OFF_FSTS_REG: /* 32-bit */
3077 {
3078 rcStrict = dmarFstsRegWrite(pDevIns, uRegWritten, uPrev);
3079 break;
3080 }
3081
3082 case VTD_MMIO_OFF_FECTL_REG: /* 32-bit */
3083 {
3084 rcStrict = dmarFectlRegWrite(pDevIns, uRegWritten);
3085 break;
3086 }
3087
3088 case VTD_MMIO_OFF_IQT_REG: /* 64-bit */
3089 /* VTD_MMIO_OFF_IQT_REG + 4: */ /* High 32-bits reserved. */
3090 {
3091 rcStrict = dmarIqtRegWrite(pDevIns, offReg, uRegWritten);
3092 break;
3093 }
3094
3095 case VTD_MMIO_OFF_IQA_REG: /* 64-bit */
3096 /* VTD_MMIO_OFF_IQA_REG + 4: */ /* High 32-bits data. */
3097 {
3098 rcStrict = dmarIqaRegWrite(pDevIns, offReg, uRegWritten);
3099 break;
3100 }
3101
3102 case VTD_MMIO_OFF_ICS_REG: /* 32-bit */
3103 {
3104 rcStrict = dmarIcsRegWrite(pDevIns, uRegWritten);
3105 break;
3106 }
3107
3108 case VTD_MMIO_OFF_IECTL_REG: /* 32-bit */
3109 {
3110 rcStrict = dmarIectlRegWrite(pDevIns, uRegWritten);
3111 break;
3112 }
3113
3114 case DMAR_MMIO_OFF_FRCD_HI_REG: /* 64-bit */
3115 case DMAR_MMIO_OFF_FRCD_HI_REG + 4:
3116 {
3117 rcStrict = dmarFrcdHiRegWrite(pDevIns, offReg, cb, uRegWritten, uPrev);
3118 break;
3119 }
3120 }
3121
3122 DMAR_UNLOCK(pDevIns, pThisCC);
3123 LogFlowFunc(("offReg=%#x uRegWritten=%#RX64 rc=%Rrc\n", offReg, uRegWritten, VBOXSTRICTRC_VAL(rcStrict)));
3124 return rcStrict;
3125 }
3126
3127 return VINF_IOM_MMIO_UNUSED_FF;
3128}
3129
3130
3131/**
3132 * @callback_method_impl{FNIOMMMIONEWREAD}
3133 */
3134static DECLCALLBACK(VBOXSTRICTRC) dmarMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3135{
3136 RT_NOREF1(pvUser);
3137 DMAR_ASSERT_MMIO_ACCESS_RET(off, cb);
3138
3139 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3140 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioRead));
3141
3142 uint16_t const offReg = off;
3143 uint16_t const offLast = offReg + cb - 1;
3144 if (DMAR_IS_MMIO_OFF_VALID(offLast))
3145 {
3146 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
3147 DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_READ);
3148
3149 if (cb == 8)
3150 {
3151 *(uint64_t *)pv = dmarRegRead64(pThis, offReg);
3152 LogFlowFunc(("offReg=%#x pv=%#RX64\n", offReg, *(uint64_t *)pv));
3153 }
3154 else
3155 {
3156 *(uint32_t *)pv = dmarRegRead32(pThis, offReg);
3157 LogFlowFunc(("offReg=%#x pv=%#RX32\n", offReg, *(uint32_t *)pv));
3158 }
3159
3160 DMAR_UNLOCK(pDevIns, pThisCC);
3161 return VINF_SUCCESS;
3162 }
3163
3164 return VINF_IOM_MMIO_UNUSED_FF;
3165}
3166
3167
3168#ifdef IN_RING3
3169/**
3170 * Process requests in the invalidation queue.
3171 *
3172 * @param pDevIns The IOMMU device instance.
3173 * @param pvRequests The requests to process.
3174 * @param cbRequests The size of all requests (in bytes).
3175 * @param fDw The descriptor width (VTD_IQA_REG_DW_128_BIT or
3176 * VTD_IQA_REG_DW_256_BIT).
3177 * @param fTtm The table translation mode. Must not be VTD_TTM_RSVD.
3178 */
3179static void dmarR3InvQueueProcessRequests(PPDMDEVINS pDevIns, void const *pvRequests, uint32_t cbRequests, uint8_t fDw,
3180 uint8_t fTtm)
3181{
3182#define DMAR_IQE_FAULT_RECORD_RET(a_enmDiag, a_enmIqei) \
3183 do \
3184 { \
3185 dmarIqeFaultRecord(pDevIns, (a_enmDiag), (a_enmIqei)); \
3186 return; \
3187 } while (0)
3188
3189 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3190 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
3191
3192 DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3);
3193 Assert(fTtm != VTD_TTM_RSVD); /* Should've beeen handled by caller. */
3194
3195 /*
3196 * The below check is redundant since we check both TTM and DW for each
3197 * descriptor type we process. However, the order of errors reported by hardware
3198 * may differ hence this is kept commented out but not removed if we need to
3199 * change this in the future.
3200 *
3201 * In our implementation, we would report the descriptor type as invalid,
3202 * while on real hardware it may report descriptor width as invalid.
3203 * The Intel VT-d spec. is not clear which error takes preceedence.
3204 */
3205#if 0
3206 /*
3207 * Verify that 128-bit descriptors are not used when operating in scalable mode.
3208 * We don't check this while software writes IQA_REG but defer it until now because
3209 * RTADDR_REG can be updated lazily (via GCMD_REG.SRTP). The 256-bit descriptor check
3210 * -IS- performed when software writes IQA_REG since it only requires checking against
3211 * immutable hardware features.
3212 */
3213 if ( fTtm != VTD_TTM_SCALABLE_MODE
3214 || fDw != VTD_IQA_REG_DW_128_BIT)
3215 { /* likely */ }
3216 else
3217 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_IqaReg_Dw_128_Invalid, VTDIQEI_INVALID_DESCRIPTOR_WIDTH);
3218#endif
3219
3220 /*
3221 * Process requests in FIFO order.
3222 */
3223 uint8_t const cbDsc = fDw == VTD_IQA_REG_DW_256_BIT ? 32 : 16;
3224 for (uint32_t offDsc = 0; offDsc < cbRequests; offDsc += cbDsc)
3225 {
3226 uint64_t const *puDscQwords = (uint64_t const *)((uintptr_t)pvRequests + offDsc);
3227 uint64_t const uQword0 = puDscQwords[0];
3228 uint64_t const uQword1 = puDscQwords[1];
3229 uint8_t const fDscType = VTD_GENERIC_INV_DSC_GET_TYPE(uQword0);
3230 switch (fDscType)
3231 {
3232 case VTD_INV_WAIT_DSC_TYPE:
3233 {
3234 /* Validate descriptor type. */
3235 if ( fTtm == VTD_TTM_LEGACY_MODE
3236 || fDw == VTD_IQA_REG_DW_256_BIT)
3237 { /* likely */ }
3238 else
3239 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid, VTDIQEI_INVALID_DESCRIPTOR_TYPE);
3240
3241 /* Validate reserved bits. */
3242 uint64_t const fValidMask0 = !(pThis->fExtCapReg & VTD_BF_ECAP_REG_PDS_MASK)
3243 ? VTD_INV_WAIT_DSC_0_VALID_MASK & ~VTD_BF_0_INV_WAIT_DSC_PD_MASK
3244 : VTD_INV_WAIT_DSC_0_VALID_MASK;
3245 if ( !(uQword0 & ~fValidMask0)
3246 && !(uQword1 & ~VTD_INV_WAIT_DSC_1_VALID_MASK))
3247 { /* likely */ }
3248 else
3249 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd, VTDIQEI_RSVD_FIELD_VIOLATION);
3250
3251 if (fDw == VTD_IQA_REG_DW_256_BIT)
3252 {
3253 if ( !puDscQwords[2]
3254 && !puDscQwords[3])
3255 { /* likely */ }
3256 else
3257 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd, VTDIQEI_RSVD_FIELD_VIOLATION);
3258 }
3259
3260 /* Perform status write (this must be done prior to generating the completion interrupt). */
3261 bool const fSw = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_SW);
3262 if (fSw)
3263 {
3264 uint32_t const uStatus = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_STDATA);
3265 RTGCPHYS const GCPhysStatus = uQword1 & VTD_BF_1_INV_WAIT_DSC_STADDR_MASK;
3266 int const rc = PDMDevHlpPhysWrite(pDevIns, GCPhysStatus, (void const*)&uStatus, sizeof(uStatus));
3267 AssertRC(rc);
3268 }
3269
3270 /* Generate invalidation event interrupt. */
3271 bool const fIf = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_IF);
3272 if (fIf)
3273 {
3274 DMAR_LOCK(pDevIns, pThisR3);
3275 dmarR3InvEventRaiseInterrupt(pDevIns);
3276 DMAR_UNLOCK(pDevIns, pThisR3);
3277 }
3278
3279 STAM_COUNTER_INC(&pThis->StatInvWaitDsc);
3280 break;
3281 }
3282
3283 case VTD_CC_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatCcInvDsc); break;
3284 case VTD_IOTLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatIotlbInvDsc); break;
3285 case VTD_DEV_TLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatDevtlbInvDsc); break;
3286 case VTD_IEC_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatIecInvDsc); break;
3287 case VTD_P_IOTLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatPasidIotlbInvDsc); break;
3288 case VTD_PC_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatPasidCacheInvDsc); break;
3289 case VTD_P_DEV_TLB_INV_DSC_TYPE: STAM_COUNTER_INC(&pThis->StatPasidDevtlbInvDsc); break;
3290 default:
3291 {
3292 /* Stop processing further requests. */
3293 LogFunc(("Invalid descriptor type: %#x\n", fDscType));
3294 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Dsc_Type_Invalid, VTDIQEI_INVALID_DESCRIPTOR_TYPE);
3295 }
3296 }
3297 }
3298#undef DMAR_IQE_FAULT_RECORD_RET
3299}
3300
3301
3302/**
3303 * The invalidation-queue thread.
3304 *
3305 * @returns VBox status code.
3306 * @param pDevIns The IOMMU device instance.
3307 * @param pThread The command thread.
3308 */
3309static DECLCALLBACK(int) dmarR3InvQueueThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
3310{
3311 NOREF(pThread);
3312 LogFlowFunc(("\n"));
3313
3314 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
3315 return VINF_SUCCESS;
3316
3317 /*
3318 * Pre-allocate the maximum size of the invalidation queue allowed by the spec.
3319 * This prevents trashing the heap as well as deal with out-of-memory situations
3320 * up-front while starting the VM. It also simplifies the code from having to
3321 * dynamically grow/shrink the allocation based on how software sizes the queue.
3322 * Guests normally don't alter the queue size all the time, but that's not an
3323 * assumption we can make.
3324 */
3325 uint8_t const cMaxPages = 1 << VTD_BF_IQA_REG_QS_MASK;
3326 size_t const cbMaxQs = cMaxPages << X86_PAGE_SHIFT;
3327 void *pvRequests = RTMemAllocZ(cbMaxQs);
3328 AssertPtrReturn(pvRequests, VERR_NO_MEMORY);
3329
3330 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3331 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
3332
3333 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
3334 {
3335 /*
3336 * Sleep until we are woken up.
3337 */
3338 {
3339 int const rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEvtInvQueue, RT_INDEFINITE_WAIT);
3340 AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc), rc);
3341 if (RT_UNLIKELY(pThread->enmState != PDMTHREADSTATE_RUNNING))
3342 break;
3343 }
3344
3345 DMAR_LOCK(pDevIns, pThisR3);
3346 if (dmarInvQueueCanProcessRequests(pThis))
3347 {
3348 uint32_t offQueueHead;
3349 uint32_t offQueueTail;
3350 bool const fIsEmpty = dmarInvQueueIsEmptyEx(pThis, &offQueueHead, &offQueueTail);
3351 if (!fIsEmpty)
3352 {
3353 /*
3354 * Get the current queue size, descriptor width, queue base address and the
3355 * table translation mode while the lock is still held.
3356 */
3357 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
3358 uint8_t const cQueuePages = 1 << (uIqaReg & VTD_BF_IQA_REG_QS_MASK);
3359 uint32_t const cbQueue = cQueuePages << X86_PAGE_SHIFT;
3360 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
3361 uint8_t const fTtm = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
3362 RTGCPHYS const GCPhysRequests = (uIqaReg & VTD_BF_IQA_REG_IQA_MASK) + offQueueHead;
3363
3364 /* Paranoia. */
3365 Assert(cbQueue <= cbMaxQs);
3366 Assert(!(offQueueTail & ~VTD_BF_IQT_REG_QT_MASK));
3367 Assert(!(offQueueHead & ~VTD_BF_IQH_REG_QH_MASK));
3368 Assert(fDw != VTD_IQA_REG_DW_256_BIT || !(offQueueTail & RT_BIT(4)));
3369 Assert(fDw != VTD_IQA_REG_DW_256_BIT || !(offQueueHead & RT_BIT(4)));
3370 Assert(offQueueHead < cbQueue);
3371
3372 /*
3373 * A table translation mode of "reserved" isn't valid for any descriptor type.
3374 * However, RTADDR_REG can be modified in parallel to invalidation-queue processing,
3375 * but if ESRTPS is support, we will perform a global invalidation when software
3376 * changes RTADDR_REG, or it's the responsibility of software to do it explicitly.
3377 * So caching TTM while reading all descriptors should not be a problem.
3378 *
3379 * Also, validate the queue tail offset as it's mutable by software.
3380 */
3381 if ( fTtm != VTD_TTM_RSVD
3382 && offQueueTail < cbQueue)
3383 {
3384 /* Don't hold the lock while reading (a potentially large amount of) requests */
3385 DMAR_UNLOCK(pDevIns, pThisR3);
3386
3387 int rc;
3388 uint32_t cbRequests;
3389 if (offQueueTail > offQueueHead)
3390 {
3391 /* The requests have not wrapped around, read them in one go. */
3392 cbRequests = offQueueTail - offQueueHead;
3393 rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysRequests, pvRequests, cbRequests);
3394 }
3395 else
3396 {
3397 /* The requests have wrapped around, read forward and wrapped-around. */
3398 uint32_t const cbForward = cbQueue - offQueueHead;
3399 rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysRequests, pvRequests, cbForward);
3400
3401 uint32_t const cbWrapped = offQueueTail;
3402 if ( RT_SUCCESS(rc)
3403 && cbWrapped > 0)
3404 {
3405 rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysRequests + cbForward,
3406 (void *)((uintptr_t)pvRequests + cbForward), cbWrapped);
3407 }
3408 cbRequests = cbForward + cbWrapped;
3409 }
3410
3411 /* Re-acquire the lock since we need to update device state. */
3412 DMAR_LOCK(pDevIns, pThisR3);
3413
3414 if (RT_SUCCESS(rc))
3415 {
3416 /* Indicate to software we've fetched all requests. */
3417 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_IQH_REG, offQueueTail);
3418
3419 /* Don't hold the lock while processing requests. */
3420 DMAR_UNLOCK(pDevIns, pThisR3);
3421
3422 /* Process all requests. */
3423 Assert(cbRequests <= cbQueue);
3424 dmarR3InvQueueProcessRequests(pDevIns, pvRequests, cbRequests, fDw, fTtm);
3425
3426 /*
3427 * We've processed all requests and the lock shouldn't be held at this point.
3428 * Using 'continue' here allows us to skip re-acquiring the lock just to release
3429 * it again before going back to the thread loop. It's a bit ugly but it certainly
3430 * helps with performance.
3431 */
3432 DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3);
3433 continue;
3434 }
3435 else
3436 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dsc_Fetch_Error, VTDIQEI_FETCH_DESCRIPTOR_ERR);
3437 }
3438 else
3439 {
3440 if (fTtm == VTD_TTM_RSVD)
3441 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Ttm_Rsvd, VTDIQEI_INVALID_TTM);
3442 else
3443 {
3444 Assert(offQueueTail >= cbQueue);
3445 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_Invalid, VTDIQEI_INVALID_TAIL_PTR);
3446 }
3447 }
3448 }
3449 }
3450 DMAR_UNLOCK(pDevIns, pThisR3);
3451 }
3452
3453 RTMemFree(pvRequests);
3454 pvRequests = NULL;
3455
3456 LogFlowFunc(("Invalidation-queue thread terminating\n"));
3457 return VINF_SUCCESS;
3458}
3459
3460
3461/**
3462 * Wakes up the invalidation-queue thread so it can respond to a state
3463 * change.
3464 *
3465 * @returns VBox status code.
3466 * @param pDevIns The IOMMU device instance.
3467 * @param pThread The invalidation-queue thread.
3468 *
3469 * @thread EMT.
3470 */
3471static DECLCALLBACK(int) dmarR3InvQueueThreadWakeUp(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
3472{
3473 RT_NOREF(pThread);
3474 LogFlowFunc(("\n"));
3475 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3476 return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtInvQueue);
3477}
3478
3479
3480/**
3481 * @callback_method_impl{FNDBGFHANDLERDEV}
3482 */
3483static DECLCALLBACK(void) dmarR3DbgInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
3484{
3485 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3486 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
3487 bool const fVerbose = RTStrCmp(pszArgs, "verbose") == 0;
3488
3489 /*
3490 * We lock the device to get a consistent register state as it is
3491 * ASSUMED pHlp->pfnPrintf is expensive, so we copy the registers (the
3492 * ones we care about here) into temporaries and release the lock ASAP.
3493 *
3494 * Order of register being read and outputted is in accordance with the
3495 * spec. for no particular reason.
3496 * See Intel VT-d spec. 10.4 "Register Descriptions".
3497 */
3498 DMAR_LOCK(pDevIns, pThisR3);
3499
3500 DMARDIAG const enmDiag = pThis->enmDiag;
3501 uint32_t const uVerReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_VER_REG);
3502 uint64_t const uCapReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_CAP_REG);
3503 uint64_t const uEcapReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_ECAP_REG);
3504 uint32_t const uGcmdReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GCMD_REG);
3505 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
3506 uint64_t const uRtaddrReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_RTADDR_REG);
3507 uint64_t const uCcmdReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_CCMD_REG);
3508 uint32_t const uFstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FSTS_REG);
3509 uint32_t const uFectlReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FECTL_REG);
3510 uint32_t const uFedataReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FEDATA_REG);
3511 uint32_t const uFeaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FEADDR_REG);
3512 uint32_t const uFeuaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_FEUADDR_REG);
3513 uint64_t const uAflogReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_AFLOG_REG);
3514 uint32_t const uPmenReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PMEN_REG);
3515 uint32_t const uPlmbaseReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PLMBASE_REG);
3516 uint32_t const uPlmlimitReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PLMLIMIT_REG);
3517 uint64_t const uPhmbaseReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PHMBASE_REG);
3518 uint64_t const uPhmlimitReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PHMLIMIT_REG);
3519 uint64_t const uIqhReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQH_REG);
3520 uint64_t const uIqtReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQT_REG);
3521 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
3522 uint32_t const uIcsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_ICS_REG);
3523 uint32_t const uIectlReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IECTL_REG);
3524 uint32_t const uIedataReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEDATA_REG);
3525 uint32_t const uIeaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEADDR_REG);
3526 uint32_t const uIeuaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEUADDR_REG);
3527 uint64_t const uIqercdReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQERCD_REG);
3528 uint64_t const uIrtaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IRTA_REG);
3529 uint64_t const uPqhReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PQH_REG);
3530 uint64_t const uPqtReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PQT_REG);
3531 uint64_t const uPqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_PQA_REG);
3532 uint32_t const uPrsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PRS_REG);
3533 uint32_t const uPectlReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PECTL_REG);
3534 uint32_t const uPedataReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PEDATA_REG);
3535 uint32_t const uPeaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PEADDR_REG);
3536 uint32_t const uPeuaddrReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_PEUADDR_REG);
3537 uint64_t const uMtrrcapReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_MTRRCAP_REG);
3538 uint64_t const uMtrrdefReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_MTRRDEF_REG);
3539
3540 DMAR_UNLOCK(pDevIns, pThisR3);
3541
3542 const char *const pszDiag = enmDiag < RT_ELEMENTS(g_apszDmarDiagDesc) ? g_apszDmarDiagDesc[enmDiag] : "(Unknown)";
3543 pHlp->pfnPrintf(pHlp, "Intel-IOMMU:\n");
3544 pHlp->pfnPrintf(pHlp, " Diag = %s\n", pszDiag);
3545
3546 /*
3547 * Non-verbose output.
3548 */
3549 if (!fVerbose)
3550 {
3551 pHlp->pfnPrintf(pHlp, " VER_REG = %#RX32\n", uVerReg);
3552 pHlp->pfnPrintf(pHlp, " CAP_REG = %#RX64\n", uCapReg);
3553 pHlp->pfnPrintf(pHlp, " ECAP_REG = %#RX64\n", uEcapReg);
3554 pHlp->pfnPrintf(pHlp, " GCMD_REG = %#RX32\n", uGcmdReg);
3555 pHlp->pfnPrintf(pHlp, " GSTS_REG = %#RX32\n", uGstsReg);
3556 pHlp->pfnPrintf(pHlp, " RTADDR_REG = %#RX64\n", uRtaddrReg);
3557 pHlp->pfnPrintf(pHlp, " CCMD_REG = %#RX64\n", uCcmdReg);
3558 pHlp->pfnPrintf(pHlp, " FSTS_REG = %#RX32\n", uFstsReg);
3559 pHlp->pfnPrintf(pHlp, " FECTL_REG = %#RX32\n", uFectlReg);
3560 pHlp->pfnPrintf(pHlp, " FEDATA_REG = %#RX32\n", uFedataReg);
3561 pHlp->pfnPrintf(pHlp, " FEADDR_REG = %#RX32\n", uFeaddrReg);
3562 pHlp->pfnPrintf(pHlp, " FEUADDR_REG = %#RX32\n", uFeuaddrReg);
3563 pHlp->pfnPrintf(pHlp, " AFLOG_REG = %#RX64\n", uAflogReg);
3564 pHlp->pfnPrintf(pHlp, " PMEN_REG = %#RX32\n", uPmenReg);
3565 pHlp->pfnPrintf(pHlp, " PLMBASE_REG = %#RX32\n", uPlmbaseReg);
3566 pHlp->pfnPrintf(pHlp, " PLMLIMIT_REG = %#RX32\n", uPlmlimitReg);
3567 pHlp->pfnPrintf(pHlp, " PHMBASE_REG = %#RX64\n", uPhmbaseReg);
3568 pHlp->pfnPrintf(pHlp, " PHMLIMIT_REG = %#RX64\n", uPhmlimitReg);
3569 pHlp->pfnPrintf(pHlp, " IQH_REG = %#RX64\n", uIqhReg);
3570 pHlp->pfnPrintf(pHlp, " IQT_REG = %#RX64\n", uIqtReg);
3571 pHlp->pfnPrintf(pHlp, " IQA_REG = %#RX64\n", uIqaReg);
3572 pHlp->pfnPrintf(pHlp, " ICS_REG = %#RX32\n", uIcsReg);
3573 pHlp->pfnPrintf(pHlp, " IECTL_REG = %#RX32\n", uIectlReg);
3574 pHlp->pfnPrintf(pHlp, " IEDATA_REG = %#RX32\n", uIedataReg);
3575 pHlp->pfnPrintf(pHlp, " IEADDR_REG = %#RX32\n", uIeaddrReg);
3576 pHlp->pfnPrintf(pHlp, " IEUADDR_REG = %#RX32\n", uIeuaddrReg);
3577 pHlp->pfnPrintf(pHlp, " IQERCD_REG = %#RX64\n", uIqercdReg);
3578 pHlp->pfnPrintf(pHlp, " IRTA_REG = %#RX64\n", uIrtaReg);
3579 pHlp->pfnPrintf(pHlp, " PQH_REG = %#RX64\n", uPqhReg);
3580 pHlp->pfnPrintf(pHlp, " PQT_REG = %#RX64\n", uPqtReg);
3581 pHlp->pfnPrintf(pHlp, " PQA_REG = %#RX64\n", uPqaReg);
3582 pHlp->pfnPrintf(pHlp, " PRS_REG = %#RX32\n", uPrsReg);
3583 pHlp->pfnPrintf(pHlp, " PECTL_REG = %#RX32\n", uPectlReg);
3584 pHlp->pfnPrintf(pHlp, " PEDATA_REG = %#RX32\n", uPedataReg);
3585 pHlp->pfnPrintf(pHlp, " PEADDR_REG = %#RX32\n", uPeaddrReg);
3586 pHlp->pfnPrintf(pHlp, " PEUADDR_REG = %#RX32\n", uPeuaddrReg);
3587 pHlp->pfnPrintf(pHlp, " MTRRCAP_REG = %#RX64\n", uMtrrcapReg);
3588 pHlp->pfnPrintf(pHlp, " MTRRDEF_REG = %#RX64\n", uMtrrdefReg);
3589 pHlp->pfnPrintf(pHlp, "\n");
3590 return;
3591 }
3592
3593 /*
3594 * Verbose output.
3595 */
3596 pHlp->pfnPrintf(pHlp, " VER_REG = %#RX32\n", uVerReg);
3597 {
3598 pHlp->pfnPrintf(pHlp, " MAJ = %#x\n", RT_BF_GET(uVerReg, VTD_BF_VER_REG_MAX));
3599 pHlp->pfnPrintf(pHlp, " MIN = %#x\n", RT_BF_GET(uVerReg, VTD_BF_VER_REG_MIN));
3600 }
3601 pHlp->pfnPrintf(pHlp, " CAP_REG = %#RX64\n", uCapReg);
3602 {
3603 uint8_t const uMgaw = RT_BF_GET(uCapReg, VTD_BF_CAP_REG_MGAW);
3604 uint8_t const uNfr = RT_BF_GET(uCapReg, VTD_BF_CAP_REG_NFR);
3605 pHlp->pfnPrintf(pHlp, " ND = %u\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ND));
3606 pHlp->pfnPrintf(pHlp, " AFL = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_AFL));
3607 pHlp->pfnPrintf(pHlp, " RWBF = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_RWBF));
3608 pHlp->pfnPrintf(pHlp, " PLMR = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PLMR));
3609 pHlp->pfnPrintf(pHlp, " PHMR = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PHMR));
3610 pHlp->pfnPrintf(pHlp, " CM = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_CM));
3611 pHlp->pfnPrintf(pHlp, " SAGAW = %#x\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_SAGAW));
3612 pHlp->pfnPrintf(pHlp, " MGAW = %#x (%u bits)\n", uMgaw, uMgaw + 1);
3613 pHlp->pfnPrintf(pHlp, " ZLR = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ZLR));
3614 pHlp->pfnPrintf(pHlp, " FRO = %#x bytes\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_FRO));
3615 pHlp->pfnPrintf(pHlp, " SLLPS = %#x\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_SLLPS));
3616 pHlp->pfnPrintf(pHlp, " PSI = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PSI));
3617 pHlp->pfnPrintf(pHlp, " NFR = %u (%u FRCD register%s)\n", uNfr, uNfr + 1, uNfr > 0 ? "s" : "");
3618 pHlp->pfnPrintf(pHlp, " MAMV = %#x\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_MAMV));
3619 pHlp->pfnPrintf(pHlp, " DWD = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_DWD));
3620 pHlp->pfnPrintf(pHlp, " DRD = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_DRD));
3621 pHlp->pfnPrintf(pHlp, " FL1GP = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_FL1GP));
3622 pHlp->pfnPrintf(pHlp, " PI = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_PI));
3623 pHlp->pfnPrintf(pHlp, " FL5LP = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_FL5LP));
3624 pHlp->pfnPrintf(pHlp, " ESIRTPS = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ESIRTPS));
3625 pHlp->pfnPrintf(pHlp, " ESRTPS = %RTbool\n", RT_BF_GET(uCapReg, VTD_BF_CAP_REG_ESRTPS));
3626 }
3627 pHlp->pfnPrintf(pHlp, " ECAP_REG = %#RX64\n", uEcapReg);
3628 {
3629 uint8_t const uPss = RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PSS);
3630 pHlp->pfnPrintf(pHlp, " C = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_C));
3631 pHlp->pfnPrintf(pHlp, " QI = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_QI));
3632 pHlp->pfnPrintf(pHlp, " DT = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_DT));
3633 pHlp->pfnPrintf(pHlp, " IR = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_IR));
3634 pHlp->pfnPrintf(pHlp, " EIM = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_EIM));
3635 pHlp->pfnPrintf(pHlp, " PT = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PT));
3636 pHlp->pfnPrintf(pHlp, " SC = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SC));
3637 pHlp->pfnPrintf(pHlp, " IRO = %#x bytes\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_IRO));
3638 pHlp->pfnPrintf(pHlp, " MHMV = %#x\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_MHMV));
3639 pHlp->pfnPrintf(pHlp, " MTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_MTS));
3640 pHlp->pfnPrintf(pHlp, " NEST = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_NEST));
3641 pHlp->pfnPrintf(pHlp, " PRS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PRS));
3642 pHlp->pfnPrintf(pHlp, " ERS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_ERS));
3643 pHlp->pfnPrintf(pHlp, " SRS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SRS));
3644 pHlp->pfnPrintf(pHlp, " NWFS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_NWFS));
3645 pHlp->pfnPrintf(pHlp, " EAFS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_EAFS));
3646 pHlp->pfnPrintf(pHlp, " PSS = %u (%u bits)\n", uPss, uPss > 0 ? uPss + 1 : 0);
3647 pHlp->pfnPrintf(pHlp, " PASID = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PASID));
3648 pHlp->pfnPrintf(pHlp, " DIT = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_DIT));
3649 pHlp->pfnPrintf(pHlp, " PDS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_PDS));
3650 pHlp->pfnPrintf(pHlp, " SMTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SMTS));
3651 pHlp->pfnPrintf(pHlp, " VCS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_VCS));
3652 pHlp->pfnPrintf(pHlp, " SLADS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SLADS));
3653 pHlp->pfnPrintf(pHlp, " SLTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SLTS));
3654 pHlp->pfnPrintf(pHlp, " FLTS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_FLTS));
3655 pHlp->pfnPrintf(pHlp, " SMPWCS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_SMPWCS));
3656 pHlp->pfnPrintf(pHlp, " RPS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_RPS));
3657 pHlp->pfnPrintf(pHlp, " ADMS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_ADMS));
3658 pHlp->pfnPrintf(pHlp, " RPRIVS = %RTbool\n", RT_BF_GET(uEcapReg, VTD_BF_ECAP_REG_RPRIVS));
3659 }
3660 pHlp->pfnPrintf(pHlp, " GCMD_REG = %#RX32\n", uGcmdReg);
3661 {
3662 uint8_t const fCfi = RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_CFI);
3663 pHlp->pfnPrintf(pHlp, " CFI = %u (%s)\n", fCfi, fCfi ? "Passthrough" : "Blocked");
3664 pHlp->pfnPrintf(pHlp, " SIRTP = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SIRTP));
3665 pHlp->pfnPrintf(pHlp, " IRE = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_IRE));
3666 pHlp->pfnPrintf(pHlp, " QIE = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_QIE));
3667 pHlp->pfnPrintf(pHlp, " WBF = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_WBF));
3668 pHlp->pfnPrintf(pHlp, " EAFL = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SFL));
3669 pHlp->pfnPrintf(pHlp, " SFL = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SFL));
3670 pHlp->pfnPrintf(pHlp, " SRTP = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_SRTP));
3671 pHlp->pfnPrintf(pHlp, " TE = %u\n", RT_BF_GET(uGcmdReg, VTD_BF_GCMD_REG_TE));
3672 }
3673 pHlp->pfnPrintf(pHlp, " GSTS_REG = %#RX32\n", uGstsReg);
3674 {
3675 uint8_t const fCfis = RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_CFIS);
3676 pHlp->pfnPrintf(pHlp, " CFIS = %u (%s)\n", fCfis, fCfis ? "Passthrough" : "Blocked");
3677 pHlp->pfnPrintf(pHlp, " IRTPS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_IRTPS));
3678 pHlp->pfnPrintf(pHlp, " IRES = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_IRES));
3679 pHlp->pfnPrintf(pHlp, " QIES = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_QIES));
3680 pHlp->pfnPrintf(pHlp, " WBFS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_WBFS));
3681 pHlp->pfnPrintf(pHlp, " AFLS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_AFLS));
3682 pHlp->pfnPrintf(pHlp, " FLS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_FLS));
3683 pHlp->pfnPrintf(pHlp, " RTPS = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_RTPS));
3684 pHlp->pfnPrintf(pHlp, " TES = %u\n", RT_BF_GET(uGstsReg, VTD_BF_GSTS_REG_TES));
3685 }
3686 pHlp->pfnPrintf(pHlp, " RTADDR_REG = %#RX64\n", uRtaddrReg);
3687 {
3688 uint8_t const uTtm = RT_BF_GET(uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
3689 pHlp->pfnPrintf(pHlp, " RTA = %#RX64\n", uRtaddrReg & VTD_BF_RTADDR_REG_RTA_MASK);
3690 pHlp->pfnPrintf(pHlp, " TTM = %u (%s)\n", uTtm, vtdRtaddrRegGetTtmDesc(uTtm));
3691 }
3692 pHlp->pfnPrintf(pHlp, " CCMD_REG = %#RX64\n", uCcmdReg);
3693 pHlp->pfnPrintf(pHlp, " FSTS_REG = %#RX32\n", uFstsReg);
3694 {
3695 pHlp->pfnPrintf(pHlp, " PFO = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_PFO));
3696 pHlp->pfnPrintf(pHlp, " PPF = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_PPF));
3697 pHlp->pfnPrintf(pHlp, " AFO = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_AFO));
3698 pHlp->pfnPrintf(pHlp, " APF = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_APF));
3699 pHlp->pfnPrintf(pHlp, " IQE = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_IQE));
3700 pHlp->pfnPrintf(pHlp, " ICS = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_ICE));
3701 pHlp->pfnPrintf(pHlp, " ITE = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_ITE));
3702 pHlp->pfnPrintf(pHlp, " FRI = %u\n", RT_BF_GET(uFstsReg, VTD_BF_FSTS_REG_FRI));
3703 }
3704 pHlp->pfnPrintf(pHlp, " FECTL_REG = %#RX32\n", uFectlReg);
3705 {
3706 pHlp->pfnPrintf(pHlp, " IM = %RTbool\n", RT_BF_GET(uFectlReg, VTD_BF_FECTL_REG_IM));
3707 pHlp->pfnPrintf(pHlp, " IP = %RTbool\n", RT_BF_GET(uFectlReg, VTD_BF_FECTL_REG_IP));
3708 }
3709 pHlp->pfnPrintf(pHlp, " FEDATA_REG = %#RX32\n", uFedataReg);
3710 pHlp->pfnPrintf(pHlp, " FEADDR_REG = %#RX32\n", uFeaddrReg);
3711 pHlp->pfnPrintf(pHlp, " FEUADDR_REG = %#RX32\n", uFeuaddrReg);
3712 pHlp->pfnPrintf(pHlp, " AFLOG_REG = %#RX64\n", uAflogReg);
3713 pHlp->pfnPrintf(pHlp, " PMEN_REG = %#RX32\n", uPmenReg);
3714 pHlp->pfnPrintf(pHlp, " PLMBASE_REG = %#RX32\n", uPlmbaseReg);
3715 pHlp->pfnPrintf(pHlp, " PLMLIMIT_REG = %#RX32\n", uPlmlimitReg);
3716 pHlp->pfnPrintf(pHlp, " PHMBASE_REG = %#RX64\n", uPhmbaseReg);
3717 pHlp->pfnPrintf(pHlp, " PHMLIMIT_REG = %#RX64\n", uPhmlimitReg);
3718 pHlp->pfnPrintf(pHlp, " IQH_REG = %#RX64\n", uIqhReg);
3719 pHlp->pfnPrintf(pHlp, " IQT_REG = %#RX64\n", uIqtReg);
3720 pHlp->pfnPrintf(pHlp, " IQA_REG = %#RX64\n", uIqaReg);
3721 {
3722 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
3723 uint8_t const fQs = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_QS);
3724 uint8_t const cQueuePages = 1 << fQs;
3725 pHlp->pfnPrintf(pHlp, " DW = %u (%s)\n", fDw, fDw == VTD_IQA_REG_DW_128_BIT ? "128-bit" : "256-bit");
3726 pHlp->pfnPrintf(pHlp, " QS = %u (%u page%s)\n", fQs, cQueuePages, cQueuePages > 1 ? "s" : "");
3727 }
3728 pHlp->pfnPrintf(pHlp, " ICS_REG = %#RX32\n", uIcsReg);
3729 {
3730 pHlp->pfnPrintf(pHlp, " IWC = %u\n", RT_BF_GET(uIcsReg, VTD_BF_ICS_REG_IWC));
3731 }
3732 pHlp->pfnPrintf(pHlp, " IECTL_REG = %#RX32\n", uIectlReg);
3733 {
3734 pHlp->pfnPrintf(pHlp, " IM = %RTbool\n", RT_BF_GET(uIectlReg, VTD_BF_IECTL_REG_IM));
3735 pHlp->pfnPrintf(pHlp, " IP = %RTbool\n", RT_BF_GET(uIectlReg, VTD_BF_IECTL_REG_IP));
3736 }
3737 pHlp->pfnPrintf(pHlp, " IEDATA_REG = %#RX32\n", uIedataReg);
3738 pHlp->pfnPrintf(pHlp, " IEADDR_REG = %#RX32\n", uIeaddrReg);
3739 pHlp->pfnPrintf(pHlp, " IEUADDR_REG = %#RX32\n", uIeuaddrReg);
3740 pHlp->pfnPrintf(pHlp, " IQERCD_REG = %#RX64\n", uIqercdReg);
3741 {
3742 pHlp->pfnPrintf(pHlp, " ICESID = %#RX32\n", RT_BF_GET(uIqercdReg, VTD_BF_IQERCD_REG_ICESID));
3743 pHlp->pfnPrintf(pHlp, " ITESID = %#RX32\n", RT_BF_GET(uIqercdReg, VTD_BF_IQERCD_REG_ITESID));
3744 pHlp->pfnPrintf(pHlp, " IQEI = %#RX32\n", RT_BF_GET(uIqercdReg, VTD_BF_IQERCD_REG_IQEI));
3745 }
3746 pHlp->pfnPrintf(pHlp, " IRTA_REG = %#RX64\n", uIrtaReg);
3747 {
3748 uint32_t const cIrtEntries = VTD_IRTA_REG_GET_ENTRY_COUNT(uIrtaReg);
3749 uint32_t const cbIrt = sizeof(VTD_IRTE_T) * cIrtEntries;
3750 pHlp->pfnPrintf(pHlp, " IRTA = %#RX64\n", uIrtaReg & VTD_BF_IRTA_REG_IRTA_MASK);
3751 pHlp->pfnPrintf(pHlp, " EIME = %RTbool\n", RT_BF_GET(uIrtaReg, VTD_BF_IRTA_REG_EIME));
3752 pHlp->pfnPrintf(pHlp, " S = %u entries (%u bytes)\n", cIrtEntries, cbIrt);
3753 }
3754 pHlp->pfnPrintf(pHlp, " PQH_REG = %#RX64\n", uPqhReg);
3755 pHlp->pfnPrintf(pHlp, " PQT_REG = %#RX64\n", uPqtReg);
3756 pHlp->pfnPrintf(pHlp, " PQA_REG = %#RX64\n", uPqaReg);
3757 pHlp->pfnPrintf(pHlp, " PRS_REG = %#RX32\n", uPrsReg);
3758 pHlp->pfnPrintf(pHlp, " PECTL_REG = %#RX32\n", uPectlReg);
3759 pHlp->pfnPrintf(pHlp, " PEDATA_REG = %#RX32\n", uPedataReg);
3760 pHlp->pfnPrintf(pHlp, " PEADDR_REG = %#RX32\n", uPeaddrReg);
3761 pHlp->pfnPrintf(pHlp, " PEUADDR_REG = %#RX32\n", uPeuaddrReg);
3762 pHlp->pfnPrintf(pHlp, " MTRRCAP_REG = %#RX64\n", uMtrrcapReg);
3763 pHlp->pfnPrintf(pHlp, " MTRRDEF_REG = %#RX64\n", uMtrrdefReg);
3764 pHlp->pfnPrintf(pHlp, "\n");
3765}
3766
3767
3768/**
3769 * Initializes all registers in the DMAR unit.
3770 *
3771 * @param pDevIns The IOMMU device instance.
3772 */
3773static void dmarR3RegsInit(PPDMDEVINS pDevIns)
3774{
3775 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3776 LogFlowFunc(("\n"));
3777
3778 /*
3779 * Wipe all registers (required on reset).
3780 */
3781 RT_ZERO(pThis->abRegs0);
3782 RT_ZERO(pThis->abRegs1);
3783
3784 /*
3785 * Initialize registers not mutable by software prior to initializing other registers.
3786 */
3787 /* VER_REG */
3788 {
3789 pThis->uVerReg = RT_BF_MAKE(VTD_BF_VER_REG_MIN, DMAR_VER_MINOR)
3790 | RT_BF_MAKE(VTD_BF_VER_REG_MAX, DMAR_VER_MAJOR);
3791 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_VER_REG, pThis->uVerReg);
3792 }
3793
3794 uint8_t const fFlts = 0; /* First-level translation support. */
3795 uint8_t const fSlts = 1; /* Second-level translation support. */
3796 uint8_t const fPt = 1; /* Pass-Through support. */
3797 uint8_t const fSmts = fFlts & fSlts & fPt; /* Scalable mode translation support.*/
3798 uint8_t const fNest = 0; /* Nested translation support. */
3799
3800 /* CAP_REG */
3801 {
3802 uint8_t cGstPhysAddrBits;
3803 uint8_t cGstLinearAddrBits;
3804 PDMDevHlpCpuGetGuestAddrWidths(pDevIns, &cGstPhysAddrBits, &cGstLinearAddrBits);
3805
3806 uint8_t const fFl1gp = 1; /* First-level 1GB pages support. */
3807 uint8_t const fFl5lp = 1; /* First-level 5-level paging support (PML5E). */
3808 uint8_t const fSl2mp = 1; /* Second-level 2MB pages support. */
3809 uint8_t const fSl2gp = fSl2mp & 1; /* Second-level 1GB pages support. */
3810 uint8_t const fSllps = fSl2mp | (fSl2gp << 1); /* Second-level large page support. */
3811 uint8_t const fMamv = (fSl2gp ? X86_PAGE_1G_SHIFT /* Maximum address mask value (for 2nd-level invalidations). */
3812 : X86_PAGE_2M_SHIFT)
3813 - X86_PAGE_4K_SHIFT;
3814 uint8_t const fNd = DMAR_ND; /* Number of domains supported. */
3815 uint8_t const fPsi = 1; /* Page selective invalidation. */
3816 uint8_t const uMgaw = cGstPhysAddrBits - 1; /* Maximum guest address width. */
3817 uint8_t const fSagaw = vtdCapRegGetSagaw(uMgaw); /* Supported adjust guest address width. */
3818 uint16_t const offFro = DMAR_MMIO_OFF_FRCD_LO_REG >> 4; /* MMIO offset of FRCD registers. */
3819 uint8_t const fEsrtps = 1; /* Enhanced SRTPS (auto invalidate cache on SRTP). */
3820 uint8_t const fEsirtps = 1; /* Enhanced SIRTPS (auto invalidate cache on SIRTP). */
3821
3822 pThis->fCapReg = RT_BF_MAKE(VTD_BF_CAP_REG_ND, fNd)
3823 | RT_BF_MAKE(VTD_BF_CAP_REG_AFL, 0) /* Advanced fault logging not supported. */
3824 | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF, 0) /* Software need not flush write-buffers. */
3825 | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR, 0) /* Protected Low-Memory Region not supported. */
3826 | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR, 0) /* Protected High-Memory Region not supported. */
3827 | RT_BF_MAKE(VTD_BF_CAP_REG_CM, 1) /* Software should invalidate on mapping structure changes. */
3828 | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW, fSlts ? fSagaw : 0)
3829 | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW, uMgaw)
3830 | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR, 1) /** @todo Figure out if/how to support zero-length reads. */
3831 | RT_BF_MAKE(VTD_BF_CAP_REG_FRO, offFro)
3832 | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS, fSlts & fSllps)
3833 | RT_BF_MAKE(VTD_BF_CAP_REG_PSI, fPsi)
3834 | RT_BF_MAKE(VTD_BF_CAP_REG_NFR, DMAR_FRCD_REG_COUNT - 1)
3835 | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV, fPsi & fMamv)
3836 | RT_BF_MAKE(VTD_BF_CAP_REG_DWD, 1)
3837 | RT_BF_MAKE(VTD_BF_CAP_REG_DRD, 1)
3838 | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP, fFlts & fFl1gp)
3839 | RT_BF_MAKE(VTD_BF_CAP_REG_PI, 0) /* Posted Interrupts not supported. */
3840 | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP, fFlts & fFl5lp)
3841 | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, fEsirtps)
3842 | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS, fEsrtps);
3843 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCapReg);
3844
3845 AssertCompile(fNd <= RT_ELEMENTS(g_auNdMask));
3846 pThis->fHawBaseMask = ~(UINT64_MAX << cGstPhysAddrBits) & X86_PAGE_4K_BASE_MASK;
3847 pThis->fMgawInvMask = UINT64_MAX << cGstPhysAddrBits;
3848 pThis->cMaxPagingLevel = vtdCapRegGetMaxPagingLevel(fSagaw);
3849 pThis->fCtxEntryQw1ValidMask = VTD_BF_1_CONTEXT_ENTRY_AW_MASK
3850 | VTD_BF_1_CONTEXT_ENTRY_IGN_6_3_MASK
3851 | RT_BF_MAKE(VTD_BF_1_CONTEXT_ENTRY_DID, g_auNdMask[fNd]);
3852 }
3853
3854 /* ECAP_REG */
3855 {
3856 uint8_t const fQi = 1; /* Queued-invalidations. */
3857 uint8_t const fIr = !!(DMAR_ACPI_DMAR_FLAGS & ACPI_DMAR_F_INTR_REMAP); /* Interrupt remapping support. */
3858 uint8_t const fMhmv = 0xf; /* Maximum handle mask value. */
3859 uint16_t const offIro = DMAR_MMIO_OFF_IVA_REG >> 4; /* MMIO offset of IOTLB registers. */
3860 uint8_t const fEim = 1; /* Extended interrupt mode.*/
3861 uint8_t const fAdms = 1; /* Abort DMA mode support. */
3862 uint8_t const fErs = 0; /* Execute Request (not supported). */
3863
3864 pThis->fExtCapReg = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */
3865 | RT_BF_MAKE(VTD_BF_ECAP_REG_QI, fQi)
3866 | RT_BF_MAKE(VTD_BF_ECAP_REG_DT, 0) /* Device-TLBs not supported. */
3867 | RT_BF_MAKE(VTD_BF_ECAP_REG_IR, fQi & fIr)
3868 | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM, fIr & fEim)
3869 | RT_BF_MAKE(VTD_BF_ECAP_REG_PT, fPt)
3870 | RT_BF_MAKE(VTD_BF_ECAP_REG_SC, 0) /* Snoop control not supported. */
3871 | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO, offIro)
3872 | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV, fIr & fMhmv)
3873 | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS, 0) /* Memory type not supported. */
3874 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest)
3875 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */
3876 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, fErs)
3877 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, 0) /* Supervisor request not supported. */
3878 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */
3879 | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS, 0) /* 0 as SMPWCS not supported. */
3880 | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS, 0) /* 0 as PASID not supported. */
3881 | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID, 0) /* PASID not supported. */
3882 | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT, 0) /* 0 as DT not supported. */
3883 | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS, 0) /* 0 as DT not supported. */
3884 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS, fSmts)
3885 | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS, 0) /* 0 as PASID not supported (commands seem PASID specific). */
3886 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS, 0) /* Second-level accessed/dirty not supported. */
3887 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS, fSlts)
3888 | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS, fFlts)
3889 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0) /* 0 as PASID not supported. */
3890 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS, 0) /* We don't support RID_PASID field in SM context entry. */
3891 | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS, fAdms)
3892 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /* 0 as SRS not supported. */
3893 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCapReg);
3894
3895 pThis->fPermValidMask = DMAR_PERM_READ | DMAR_PERM_WRITE;
3896 if (fErs)
3897 pThis->fPermValidMask = DMAR_PERM_EXE;
3898 }
3899
3900 /*
3901 * Initialize registers mutable by software.
3902 */
3903 /* FECTL_REG */
3904 {
3905 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_FECTL_REG_IM, 1);
3906 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_FECTL_REG, uCtl);
3907 }
3908
3909 /* ICETL_REG */
3910 {
3911 uint32_t const uCtl = RT_BF_MAKE(VTD_BF_IECTL_REG_IM, 1);
3912 dmarRegWriteRaw32(pThis, VTD_MMIO_OFF_IECTL_REG, uCtl);
3913 }
3914
3915#ifdef VBOX_STRICT
3916 Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_PRS)); /* PECTL_REG - Reserved if don't support PRS. */
3917 Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_MTS)); /* MTRRCAP_REG - Reserved if we don't support MTS. */
3918#endif
3919}
3920
3921
3922/**
3923 * @callback_method_impl{FNSSMDEVSAVEEXEC}
3924 */
3925static DECLCALLBACK(int) dmarR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
3926{
3927 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR);
3928 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
3929 LogFlowFunc(("\n"));
3930
3931 /* First, save software-immutable registers that we validate on state load. */
3932 pHlp->pfnSSMPutU32(pSSM, pThis->uVerReg);
3933 pHlp->pfnSSMPutU64(pSSM, pThis->fCapReg);
3934 pHlp->pfnSSMPutU64(pSSM, pThis->fExtCapReg);
3935
3936 /* Save MMIO registers. */
3937 pHlp->pfnSSMPutU32(pSSM, DMAR_MMIO_GROUP_COUNT);
3938 pHlp->pfnSSMPutU32(pSSM, sizeof(pThis->abRegs0));
3939 pHlp->pfnSSMPutMem(pSSM, &pThis->abRegs0[0], sizeof(pThis->abRegs0));
3940 pHlp->pfnSSMPutU32(pSSM, sizeof(pThis->abRegs1));
3941 pHlp->pfnSSMPutMem(pSSM, &pThis->abRegs1[0], sizeof(pThis->abRegs1));
3942
3943 /*
3944 * Save our implemention-defined MMIO registers offsets.
3945 * The register themselves are currently all part of group 1 (saved above).
3946 * We save these to ensure they're located where the code expects them while loading state.
3947 */
3948 pHlp->pfnSSMPutU16(pSSM, DMAR_MMIO_OFF_IMPL_COUNT);
3949 AssertCompile(DMAR_MMIO_OFF_IMPL_COUNT == 2);
3950 pHlp->pfnSSMPutU16(pSSM, DMAR_MMIO_OFF_IVA_REG);
3951 pHlp->pfnSSMPutU16(pSSM, DMAR_MMIO_OFF_FRCD_LO_REG);
3952
3953 /* Save lazily activated registers. */
3954 pHlp->pfnSSMPutU64(pSSM, pThis->uIrtaReg);
3955 pHlp->pfnSSMPutU64(pSSM, pThis->uRtaddrReg);
3956
3957 /* Save terminator marker and return status. */
3958 return pHlp->pfnSSMPutU32(pSSM, UINT32_MAX);
3959}
3960
3961
3962/**
3963 * @callback_method_impl{FNSSMDEVLOADEXEC}
3964 */
3965static DECLCALLBACK(int) dmarR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3966{
3967 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
3968 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
3969 int const rcDataErr = VERR_SSM_UNEXPECTED_DATA;
3970 int const rcFmtErr = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3971 LogFlowFunc(("\n"));
3972
3973 /*
3974 * Validate saved-state version.
3975 */
3976 AssertReturn(uPass == SSM_PASS_FINAL, VERR_WRONG_ORDER);
3977 if (uVersion != DMAR_SAVED_STATE_VERSION)
3978 {
3979 LogRel(("%s: Invalid saved-state version %#x\n", DMAR_LOG_PFX, uVersion));
3980 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3981 }
3982
3983 /*
3984 * Load and validate software-immutable registers.
3985 * The features we had exposed to the guest (in the saved state) must be identical
3986 * to what is currently emulated.
3987 */
3988 {
3989 /* VER_REG */
3990 uint32_t uVerReg = 0;
3991 int rc = pHlp->pfnSSMGetU32(pSSM, &uVerReg);
3992 AssertRCReturn(rc, rc);
3993 AssertLogRelMsgReturn(uVerReg == pThis->uVerReg,
3994 ("%s: VER_REG mismatch (expected %#RX32 got %#RX32)\n", DMAR_LOG_PFX, pThis->uVerReg, uVerReg),
3995 rcDataErr);
3996 /* CAP_REG */
3997 uint64_t fCapReg = 0;
3998 pHlp->pfnSSMGetU64(pSSM, &fCapReg);
3999 AssertLogRelMsgReturn(fCapReg == pThis->fCapReg,
4000 ("%s: CAP_REG mismatch (expected %#RX64 got %#RX64)\n", DMAR_LOG_PFX, pThis->fCapReg, fCapReg),
4001 rcDataErr);
4002 /* ECAP_REG */
4003 uint64_t fExtCapReg = 0;
4004 pHlp->pfnSSMGetU64(pSSM, &fExtCapReg);
4005 AssertLogRelMsgReturn(fExtCapReg == pThis->fExtCapReg,
4006 ("%s: ECAP_REG mismatch (expected %#RX64 got %#RX64)\n", DMAR_LOG_PFX, pThis->fExtCapReg,
4007 fExtCapReg), rcDataErr);
4008 }
4009
4010 /*
4011 * Load MMIO registers.
4012 */
4013 {
4014 /* Group count. */
4015 uint32_t cRegGroups = 0;
4016 pHlp->pfnSSMGetU32(pSSM, &cRegGroups);
4017 AssertLogRelMsgReturn(cRegGroups == DMAR_MMIO_GROUP_COUNT,
4018 ("%s: MMIO group count mismatch (expected %u got %u)\n", DMAR_LOG_PFX, DMAR_MMIO_GROUP_COUNT,
4019 cRegGroups), rcFmtErr);
4020 /* Group 0. */
4021 uint32_t cbRegs0 = 0;
4022 pHlp->pfnSSMGetU32(pSSM, &cbRegs0);
4023 AssertLogRelMsgReturn(cbRegs0 == sizeof(pThis->abRegs0),
4024 ("%s: MMIO group 0 size mismatch (expected %u got %u)\n", DMAR_LOG_PFX, sizeof(pThis->abRegs0),
4025 cbRegs0), rcFmtErr);
4026 pHlp->pfnSSMGetMem(pSSM, &pThis->abRegs0[0], cbRegs0);
4027 /* Group 1. */
4028 uint32_t cbRegs1 = 0;
4029 pHlp->pfnSSMGetU32(pSSM, &cbRegs1);
4030 AssertLogRelMsgReturn(cbRegs1 == sizeof(pThis->abRegs1),
4031 ("%s: MMIO group 1 size mismatch (expected %u got %u)\n", DMAR_LOG_PFX, sizeof(pThis->abRegs1),
4032 cbRegs1), rcFmtErr);
4033 pHlp->pfnSSMGetMem(pSSM, &pThis->abRegs1[0], cbRegs1);
4034 }
4035
4036 /*
4037 * Validate implementation-defined MMIO register offsets.
4038 */
4039 {
4040 /* Offset count. */
4041 uint16_t cOffsets = 0;
4042 pHlp->pfnSSMGetU16(pSSM, &cOffsets);
4043 AssertLogRelMsgReturn(cOffsets == DMAR_MMIO_OFF_IMPL_COUNT,
4044 ("%s: MMIO offset count mismatch (expected %u got %u)\n", DMAR_LOG_PFX, DMAR_MMIO_OFF_IMPL_COUNT,
4045 cOffsets), rcFmtErr);
4046 /* IVA_REG. */
4047 uint16_t offReg = 0;
4048 pHlp->pfnSSMGetU16(pSSM, &offReg);
4049 AssertLogRelMsgReturn(offReg == DMAR_MMIO_OFF_IVA_REG,
4050 ("%s: IVA_REG offset mismatch (expected %u got %u)\n", DMAR_LOG_PFX, DMAR_MMIO_OFF_IVA_REG,
4051 offReg), rcFmtErr);
4052 /* IOTLB_REG. */
4053 AssertLogRelMsgReturn(offReg + 8 == DMAR_MMIO_OFF_IOTLB_REG,
4054 ("%s: IOTLB_REG offset mismatch (expected %u got %u)\n", DMAR_LOG_PFX, DMAR_MMIO_OFF_IOTLB_REG,
4055 offReg), rcFmtErr);
4056 /* FRCD_LO_REG. */
4057 pHlp->pfnSSMGetU16(pSSM, &offReg);
4058 AssertLogRelMsgReturn(offReg == DMAR_MMIO_OFF_FRCD_LO_REG,
4059 ("%s: FRCD_LO_REG offset mismatch (expected %u got %u)\n", DMAR_LOG_PFX, DMAR_MMIO_OFF_FRCD_LO_REG,
4060 offReg), rcFmtErr);
4061 /* FRCD_HI_REG. */
4062 AssertLogRelMsgReturn(offReg + 8 == DMAR_MMIO_OFF_FRCD_HI_REG,
4063 ("%s: FRCD_HI_REG offset mismatch (expected %u got %u)\n", DMAR_LOG_PFX, DMAR_MMIO_OFF_FRCD_HI_REG,
4064 offReg), rcFmtErr);
4065 }
4066
4067 /*
4068 * Load lazily activated registers.
4069 */
4070 {
4071 /* Active IRTA_REG. */
4072 pHlp->pfnSSMGetU64(pSSM, &pThis->uIrtaReg);
4073 AssertLogRelMsgReturn(!(pThis->uIrtaReg & ~VTD_IRTA_REG_RW_MASK),
4074 ("%s: IRTA_REG reserved bits set %#RX64\n", DMAR_LOG_PFX, pThis->uIrtaReg), rcDataErr);
4075 /* Active RTADDR_REG. */
4076 pHlp->pfnSSMGetU64(pSSM, &pThis->uRtaddrReg);
4077 AssertLogRelMsgReturn(!(pThis->uRtaddrReg & ~VTD_RTADDR_REG_RW_MASK),
4078 ("%s: RTADDR_REG reserved bits set %#RX64\n", DMAR_LOG_PFX, pThis->uRtaddrReg), rcDataErr);
4079 }
4080
4081 /*
4082 * Verify terminator marker.
4083 */
4084 {
4085 uint32_t uEndMarker = 0;
4086 int const rc = pHlp->pfnSSMGetU32(pSSM, &uEndMarker);
4087 AssertRCReturn(rc, rc);
4088 AssertLogRelMsgReturn(uEndMarker == UINT32_MAX,
4089 ("%s: End marker mismatch (expected %#RX32 got %#RX32)\n", DMAR_LOG_PFX, UINT32_MAX, uEndMarker),
4090 rcFmtErr);
4091 }
4092 return VINF_SUCCESS;
4093}
4094
4095
4096/**
4097 * @callback_method_impl{FNSSMDEVLOADDONE}
4098 */
4099static DECLCALLBACK(int) dmarR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
4100{
4101 PDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PDMARR3);
4102 LogFlowFunc(("\n"));
4103 RT_NOREF(pSSM);
4104 AssertPtrReturn(pThisR3, VERR_INVALID_POINTER);
4105
4106 DMAR_LOCK(pDevIns, pThisR3);
4107 dmarInvQueueThreadWakeUpIfNeeded(pDevIns);
4108 DMAR_UNLOCK(pDevIns, pThisR3);
4109 return VINF_SUCCESS;
4110}
4111
4112
4113/**
4114 * @interface_method_impl{PDMDEVREG,pfnReset}
4115 */
4116static DECLCALLBACK(void) iommuIntelR3Reset(PPDMDEVINS pDevIns)
4117{
4118 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
4119 LogFlowFunc(("\n"));
4120
4121 DMAR_LOCK(pDevIns, pThisR3);
4122 dmarR3RegsInit(pDevIns);
4123 DMAR_UNLOCK(pDevIns, pThisR3);
4124}
4125
4126
4127/**
4128 * @interface_method_impl{PDMDEVREG,pfnDestruct}
4129 */
4130static DECLCALLBACK(int) iommuIntelR3Destruct(PPDMDEVINS pDevIns)
4131{
4132 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
4133 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
4134 LogFlowFunc(("\n"));
4135
4136 DMAR_LOCK(pDevIns, pThisR3);
4137
4138 if (pThis->hEvtInvQueue != NIL_SUPSEMEVENT)
4139 {
4140 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEvtInvQueue);
4141 pThis->hEvtInvQueue = NIL_SUPSEMEVENT;
4142 }
4143
4144 DMAR_UNLOCK(pDevIns, pThisR3);
4145 return VINF_SUCCESS;
4146}
4147
4148
4149/**
4150 * @interface_method_impl{PDMDEVREG,pfnConstruct}
4151 */
4152static DECLCALLBACK(int) iommuIntelR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
4153{
4154 RT_NOREF(pCfg);
4155
4156 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
4157 PDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PDMARR3);
4158 pThisR3->pDevInsR3 = pDevIns;
4159
4160 LogFlowFunc(("iInstance=%d\n", iInstance));
4161 NOREF(iInstance);
4162
4163 /*
4164 * Register the IOMMU with PDM.
4165 */
4166 PDMIOMMUREGR3 IommuReg;
4167 RT_ZERO(IommuReg);
4168 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
4169 IommuReg.pfnMemAccess = iommuIntelMemAccess;
4170 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
4171 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
4172 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
4173 int rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
4174 if (RT_FAILURE(rc))
4175 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
4176 if (pThisR3->CTX_SUFF(pIommuHlp)->u32Version != PDM_IOMMUHLPR3_VERSION)
4177 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
4178 N_("IOMMU helper version mismatch; got %#x expected %#x"),
4179 pThisR3->CTX_SUFF(pIommuHlp)->u32Version, PDM_IOMMUHLPR3_VERSION);
4180 if (pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd != PDM_IOMMUHLPR3_VERSION)
4181 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
4182 N_("IOMMU helper end-version mismatch; got %#x expected %#x"),
4183 pThisR3->CTX_SUFF(pIommuHlp)->u32TheEnd, PDM_IOMMUHLPR3_VERSION);
4184 AssertPtr(pThisR3->pIommuHlpR3->pfnLock);
4185 AssertPtr(pThisR3->pIommuHlpR3->pfnUnlock);
4186 AssertPtr(pThisR3->pIommuHlpR3->pfnLockIsOwner);
4187 AssertPtr(pThisR3->pIommuHlpR3->pfnSendMsi);
4188
4189 /*
4190 * Use PDM's critical section (via helpers) for the IOMMU device.
4191 */
4192 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
4193 AssertRCReturn(rc, rc);
4194
4195 /*
4196 * Initialize PCI configuration registers.
4197 */
4198 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
4199 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
4200
4201 /* Header. */
4202 PDMPciDevSetVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
4203 PDMPciDevSetDeviceId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
4204 PDMPciDevSetRevisionId(pPciDev, DMAR_PCI_REVISION_ID); /* VirtualBox specific device implementation revision */
4205 PDMPciDevSetClassBase(pPciDev, VBOX_PCI_CLASS_SYSTEM); /* System Base Peripheral */
4206 PDMPciDevSetClassSub(pPciDev, VBOX_PCI_SUB_SYSTEM_OTHER); /* Other */
4207 PDMPciDevSetHeaderType(pPciDev, 0); /* Single function, type 0 */
4208 PDMPciDevSetSubSystemId(pPciDev, DMAR_PCI_DEVICE_ID); /* VirtualBox DMAR device */
4209 PDMPciDevSetSubSystemVendorId(pPciDev, DMAR_PCI_VENDOR_ID); /* Intel */
4210
4211 /** @todo Chipset spec says PCI Express Capability Id. Relevant for us? */
4212 PDMPciDevSetStatus(pPciDev, 0);
4213 PDMPciDevSetCapabilityList(pPciDev, 0);
4214 /** @todo VTBAR at 0x180? */
4215
4216 /*
4217 * Register the PCI function with PDM.
4218 */
4219 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
4220 AssertLogRelRCReturn(rc, rc);
4221
4222 /*
4223 * Register MMIO region.
4224 */
4225 AssertCompile(!(DMAR_MMIO_BASE_PHYSADDR & X86_PAGE_4K_OFFSET_MASK));
4226 rc = PDMDevHlpMmioCreateAndMap(pDevIns, DMAR_MMIO_BASE_PHYSADDR, DMAR_MMIO_SIZE, dmarMmioWrite, dmarMmioRead,
4227 IOMMMIO_FLAGS_READ_DWORD_QWORD | IOMMMIO_FLAGS_WRITE_DWORD_QWORD_ZEROED, "Intel-IOMMU",
4228 &pThis->hMmio);
4229 AssertLogRelRCReturn(rc, rc);
4230
4231 /*
4232 * Register saved state handlers.
4233 */
4234 rc = PDMDevHlpSSMRegisterEx(pDevIns, DMAR_SAVED_STATE_VERSION, sizeof(DMAR), NULL /* pszBefore */,
4235 NULL /* pfnLivePrep */, NULL /* pfnLiveExec */, NULL /* pfnLiveVote */,
4236 NULL /* pfnSavePrep */, dmarR3SaveExec, NULL /* pfnSaveDone */,
4237 NULL /* pfnLoadPrep */, dmarR3LoadExec, dmarR3LoadDone);
4238 AssertLogRelRCReturn(rc, rc);
4239
4240 /*
4241 * Register debugger info items.
4242 */
4243 rc = PDMDevHlpDBGFInfoRegister(pDevIns, "iommu", "Display IOMMU state.", dmarR3DbgInfo);
4244 AssertLogRelRCReturn(rc, rc);
4245
4246#ifdef VBOX_WITH_STATISTICS
4247 /*
4248 * Statistics.
4249 */
4250 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadR3, STAMTYPE_COUNTER, "R3/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in R3");
4251 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadRZ, STAMTYPE_COUNTER, "RZ/MmioRead", STAMUNIT_OCCURENCES, "Number of MMIO reads in RZ.");
4252
4253 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteR3, STAMTYPE_COUNTER, "R3/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in R3.");
4254 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteRZ, STAMTYPE_COUNTER, "RZ/MmioWrite", STAMUNIT_OCCURENCES, "Number of MMIO writes in RZ.");
4255
4256 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapCfiR3, STAMTYPE_COUNTER, "R3/MsiRemapCfi", STAMUNIT_OCCURENCES, "Number of compatibility-format interrupt remap requests in R3.");
4257 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapCfiRZ, STAMTYPE_COUNTER, "RZ/MsiRemapCfi", STAMUNIT_OCCURENCES, "Number of compatibility-format interrupt remap requests in RZ.");
4258 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRfiR3, STAMTYPE_COUNTER, "R3/MsiRemapRfi", STAMUNIT_OCCURENCES, "Number of remappable-format interrupt remap requests in R3.");
4259 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRfiRZ, STAMTYPE_COUNTER, "RZ/MsiRemapRfi", STAMUNIT_OCCURENCES, "Number of remappable-format interrupt remap requests in RZ.");
4260
4261 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadR3, STAMTYPE_COUNTER, "R3/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in R3.");
4262 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemReadRZ, STAMTYPE_COUNTER, "RZ/MemRead", STAMUNIT_OCCURENCES, "Number of memory read translation requests in RZ.");
4263
4264 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteR3, STAMTYPE_COUNTER, "R3/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in R3.");
4265 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemWriteRZ, STAMTYPE_COUNTER, "RZ/MemWrite", STAMUNIT_OCCURENCES, "Number of memory write translation requests in RZ.");
4266
4267 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadR3, STAMTYPE_COUNTER, "R3/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in R3.");
4268 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkReadRZ, STAMTYPE_COUNTER, "RZ/MemBulkRead", STAMUNIT_OCCURENCES, "Number of memory bulk read translation requests in RZ.");
4269
4270 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteR3, STAMTYPE_COUNTER, "R3/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in R3.");
4271 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMemBulkWriteRZ, STAMTYPE_COUNTER, "RZ/MemBulkWrite", STAMUNIT_OCCURENCES, "Number of memory bulk write translation requests in RZ.");
4272
4273 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCcInvDsc, STAMTYPE_COUNTER, "R3/QI/CcInv", STAMUNIT_OCCURENCES, "Number of cc_inv_dsc processed.");
4274 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbInvDsc, STAMTYPE_COUNTER, "R3/QI/IotlbInv", STAMUNIT_OCCURENCES, "Number of iotlb_inv_dsc processed.");
4275 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatDevtlbInvDsc, STAMTYPE_COUNTER, "R3/QI/DevtlbInv", STAMUNIT_OCCURENCES, "Number of dev_tlb_inv_dsc processed.");
4276 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIecInvDsc, STAMTYPE_COUNTER, "R3/QI/IecInv", STAMUNIT_OCCURENCES, "Number of iec_inv processed.");
4277 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatInvWaitDsc, STAMTYPE_COUNTER, "R3/QI/InvWait", STAMUNIT_OCCURENCES, "Number of inv_wait_dsc processed.");
4278 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPasidIotlbInvDsc, STAMTYPE_COUNTER, "R3/QI/PasidIotlbInv", STAMUNIT_OCCURENCES, "Number of p_iotlb_inv_dsc processed.");
4279 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPasidCacheInvDsc, STAMTYPE_COUNTER, "R3/QI/PasidCacheInv", STAMUNIT_OCCURENCES, "Number of pc_inv_dsc pprocessed.");
4280 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPasidDevtlbInvDsc, STAMTYPE_COUNTER, "R3/QI/PasidDevtlbInv", STAMUNIT_OCCURENCES, "Number of p_dev_tlb_inv_dsc processed.");
4281#endif
4282
4283 /*
4284 * Initialize registers.
4285 */
4286 dmarR3RegsInit(pDevIns);
4287
4288 /*
4289 * Create invalidation-queue thread and semaphore.
4290 */
4291 char szInvQueueThread[32];
4292 RT_ZERO(szInvQueueThread);
4293 RTStrPrintf(szInvQueueThread, sizeof(szInvQueueThread), "IOMMU-QI-%u", iInstance);
4294 rc = PDMDevHlpThreadCreate(pDevIns, &pThisR3->pInvQueueThread, pThis, dmarR3InvQueueThread, dmarR3InvQueueThreadWakeUp,
4295 0 /* cbStack */, RTTHREADTYPE_IO, szInvQueueThread);
4296 AssertLogRelRCReturn(rc, rc);
4297
4298 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEvtInvQueue);
4299 AssertLogRelRCReturn(rc, rc);
4300
4301 /*
4302 * Log some of the features exposed to software.
4303 */
4304 uint8_t const uVerMax = RT_BF_GET(pThis->uVerReg, VTD_BF_VER_REG_MAX);
4305 uint8_t const uVerMin = RT_BF_GET(pThis->uVerReg, VTD_BF_VER_REG_MIN);
4306 uint8_t const cMgawBits = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_MGAW) + 1;
4307 uint8_t const fSagaw = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SAGAW);
4308 uint16_t const offFrcd = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_FRO);
4309 uint16_t const offIva = RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_IRO);
4310 LogRel(("%s: Mapped at %#RGp (%u-level page-table supported)\n",
4311 DMAR_LOG_PFX, DMAR_MMIO_BASE_PHYSADDR, pThis->cMaxPagingLevel));
4312 LogRel(("%s: Version=%u.%u Cap=%#RX64 ExtCap=%#RX64 Mgaw=%u bits Sagaw=%#x HawBaseMask=%#RX64 MgawInvMask=%#RX64 FRO=%#x IRO=%#x\n",
4313 DMAR_LOG_PFX, uVerMax, uVerMin, pThis->fCapReg, pThis->fExtCapReg, cMgawBits, fSagaw, pThis->fHawBaseMask,
4314 pThis->fMgawInvMask, offFrcd, offIva));
4315 return VINF_SUCCESS;
4316}
4317
4318#else
4319
4320/**
4321 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
4322 */
4323static DECLCALLBACK(int) iommuIntelRZConstruct(PPDMDEVINS pDevIns)
4324{
4325 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
4326 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
4327 PDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PDMARCC);
4328 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
4329
4330 /* We will use PDM's critical section (via helpers) for the IOMMU device. */
4331 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
4332 AssertRCReturn(rc, rc);
4333
4334 /* Set up the MMIO RZ handlers. */
4335 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, dmarMmioWrite, dmarMmioRead, NULL /* pvUser */);
4336 AssertRCReturn(rc, rc);
4337
4338 /* Set up the IOMMU RZ callbacks. */
4339 PDMIOMMUREGCC IommuReg;
4340 RT_ZERO(IommuReg);
4341 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
4342 IommuReg.idxIommu = pThis->idxIommu;
4343 IommuReg.pfnMemAccess = iommuIntelMemAccess;
4344 IommuReg.pfnMemBulkAccess = iommuIntelMemBulkAccess;
4345 IommuReg.pfnMsiRemap = iommuIntelMsiRemap;
4346 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
4347
4348 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp));
4349 AssertRCReturn(rc, rc);
4350 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp), VERR_IOMMU_IPE_1);
4351 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32Version == CTX_MID(PDM_IOMMUHLP,_VERSION), VERR_VERSION_MISMATCH);
4352 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd == CTX_MID(PDM_IOMMUHLP,_VERSION), VERR_VERSION_MISMATCH);
4353 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnLock);
4354 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnUnlock);
4355 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnLockIsOwner);
4356 AssertPtr(pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi);
4357
4358 return VINF_SUCCESS;
4359}
4360
4361#endif
4362
4363
4364/**
4365 * The device registration structure.
4366 */
4367PDMDEVREG const g_DeviceIommuIntel =
4368{
4369 /* .u32Version = */ PDM_DEVREG_VERSION,
4370 /* .uReserved0 = */ 0,
4371 /* .szName = */ "iommu-intel",
4372 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
4373 /* .fClass = */ PDM_DEVREG_CLASS_PCI_BUILTIN,
4374 /* .cMaxInstances = */ 1,
4375 /* .uSharedVersion = */ 42,
4376 /* .cbInstanceShared = */ sizeof(DMAR),
4377 /* .cbInstanceCC = */ sizeof(DMARCC),
4378 /* .cbInstanceRC = */ sizeof(DMARRC),
4379 /* .cMaxPciDevices = */ 1,
4380 /* .cMaxMsixVectors = */ 0,
4381 /* .pszDescription = */ "IOMMU (Intel)",
4382#if defined(IN_RING3)
4383 /* .pszRCMod = */ "VBoxDDRC.rc",
4384 /* .pszR0Mod = */ "VBoxDDR0.r0",
4385 /* .pfnConstruct = */ iommuIntelR3Construct,
4386 /* .pfnDestruct = */ iommuIntelR3Destruct,
4387 /* .pfnRelocate = */ NULL,
4388 /* .pfnMemSetup = */ NULL,
4389 /* .pfnPowerOn = */ NULL,
4390 /* .pfnReset = */ iommuIntelR3Reset,
4391 /* .pfnSuspend = */ NULL,
4392 /* .pfnResume = */ NULL,
4393 /* .pfnAttach = */ NULL,
4394 /* .pfnDetach = */ NULL,
4395 /* .pfnQueryInterface = */ NULL,
4396 /* .pfnInitComplete = */ NULL,
4397 /* .pfnPowerOff = */ NULL,
4398 /* .pfnSoftReset = */ NULL,
4399 /* .pfnReserved0 = */ NULL,
4400 /* .pfnReserved1 = */ NULL,
4401 /* .pfnReserved2 = */ NULL,
4402 /* .pfnReserved3 = */ NULL,
4403 /* .pfnReserved4 = */ NULL,
4404 /* .pfnReserved5 = */ NULL,
4405 /* .pfnReserved6 = */ NULL,
4406 /* .pfnReserved7 = */ NULL,
4407#elif defined(IN_RING0)
4408 /* .pfnEarlyConstruct = */ NULL,
4409 /* .pfnConstruct = */ iommuIntelRZConstruct,
4410 /* .pfnDestruct = */ NULL,
4411 /* .pfnFinalDestruct = */ NULL,
4412 /* .pfnRequest = */ NULL,
4413 /* .pfnReserved0 = */ NULL,
4414 /* .pfnReserved1 = */ NULL,
4415 /* .pfnReserved2 = */ NULL,
4416 /* .pfnReserved3 = */ NULL,
4417 /* .pfnReserved4 = */ NULL,
4418 /* .pfnReserved5 = */ NULL,
4419 /* .pfnReserved6 = */ NULL,
4420 /* .pfnReserved7 = */ NULL,
4421#elif defined(IN_RC)
4422 /* .pfnConstruct = */ iommuIntelRZConstruct,
4423 /* .pfnReserved0 = */ NULL,
4424 /* .pfnReserved1 = */ NULL,
4425 /* .pfnReserved2 = */ NULL,
4426 /* .pfnReserved3 = */ NULL,
4427 /* .pfnReserved4 = */ NULL,
4428 /* .pfnReserved5 = */ NULL,
4429 /* .pfnReserved6 = */ NULL,
4430 /* .pfnReserved7 = */ NULL,
4431#else
4432# error "Not in IN_RING3, IN_RING0 or IN_RC!"
4433#endif
4434 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
4435};
4436
4437#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
4438
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette