VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp@ 84228

Last change on this file since 84228 was 84228, checked in by vboxsync, 5 years ago

AMD IOMMU: bugref:9654 Bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 240.6 KB
Line 
1/* $Id: DevIommuAmd.cpp 84228 2020-05-09 18:05:08Z vboxsync $ */
2/** @file
3 * IOMMU - Input/Output Memory Management Unit - AMD implementation.
4 */
5
6/*
7 * Copyright (C) 2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_IOMMU
23#include <VBox/msi.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/AssertGuest.h>
26
27#include "VBoxDD.h"
28#include <iprt/x86.h>
29#include <iprt/string.h>
30
31
32/*********************************************************************************************************************************
33* Defined Constants And Macros *
34*********************************************************************************************************************************/
35/**
36 * @name PCI configuration register offsets.
37 * In accordance with the AMD spec.
38 * @{
39 */
40#define IOMMU_PCI_OFF_CAP_HDR 0x40
41#define IOMMU_PCI_OFF_BASE_ADDR_REG_LO 0x44
42#define IOMMU_PCI_OFF_BASE_ADDR_REG_HI 0x48
43#define IOMMU_PCI_OFF_RANGE_REG 0x4c
44#define IOMMU_PCI_OFF_MISCINFO_REG_0 0x50
45#define IOMMU_PCI_OFF_MISCINFO_REG_1 0x54
46#define IOMMU_PCI_OFF_MSI_CAP_HDR 0x64
47#define IOMMU_PCI_OFF_MSI_ADDR_LO 0x68
48#define IOMMU_PCI_OFF_MSI_ADDR_HI 0x6c
49#define IOMMU_PCI_OFF_MSI_DATA 0x70
50#define IOMMU_PCI_OFF_MSI_MAP_CAP_HDR 0x74
51/** @} */
52
53/**
54 * @name MMIO register offsets.
55 * In accordance with the AMD spec.
56 * @{
57 */
58#define IOMMU_MMIO_OFF_DEV_TAB_BAR 0x00
59#define IOMMU_MMIO_OFF_CMD_BUF_BAR 0x08
60#define IOMMU_MMIO_OFF_EVT_LOG_BAR 0x10
61#define IOMMU_MMIO_OFF_CTRL 0x18
62#define IOMMU_MMIO_OFF_EXCL_BAR 0x20
63#define IOMMU_MMIO_OFF_EXCL_RANGE_LIMIT 0x28
64#define IOMMU_MMIO_OFF_EXT_FEAT 0x30
65
66#define IOMMU_MMIO_OFF_PPR_LOG_BAR 0x38
67#define IOMMU_MMIO_OFF_HW_EVT_HI 0x40
68#define IOMMU_MMIO_OFF_HW_EVT_LO 0x48
69#define IOMMU_MMIO_OFF_HW_EVT_STATUS 0x50
70
71#define IOMMU_MMIO_OFF_SMI_FLT_FIRST 0x60
72#define IOMMU_MMIO_OFF_SMI_FLT_LAST 0xd8
73
74#define IOMMU_MMIO_OFF_GALOG_BAR 0xe0
75#define IOMMU_MMIO_OFF_GALOG_TAIL_ADDR 0xe8
76
77#define IOMMU_MMIO_OFF_PPR_LOG_B_BAR 0xf0
78#define IOMMU_MMIO_OFF_PPR_EVT_B_BAR 0xf8
79
80#define IOMMU_MMIO_OFF_DEV_TAB_SEG_FIRST 0x100
81#define IOMMU_MMIO_OFF_DEV_TAB_SEG_1 0x100
82#define IOMMU_MMIO_OFF_DEV_TAB_SEG_2 0x108
83#define IOMMU_MMIO_OFF_DEV_TAB_SEG_3 0x110
84#define IOMMU_MMIO_OFF_DEV_TAB_SEG_4 0x118
85#define IOMMU_MMIO_OFF_DEV_TAB_SEG_5 0x120
86#define IOMMU_MMIO_OFF_DEV_TAB_SEG_6 0x128
87#define IOMMU_MMIO_OFF_DEV_TAB_SEG_7 0x130
88#define IOMMU_MMIO_OFF_DEV_TAB_SEG_LAST 0x130
89
90#define IOMMU_MMIO_OFF_DEV_SPECIFIC_FEAT 0x138
91#define IOMMU_MMIO_OFF_DEV_SPECIFIC_CTRL 0x140
92#define IOMMU_MMIO_OFF_DEV_SPECIFIC_STATUS 0x148
93
94#define IOMMU_MMIO_OFF_MSI_VECTOR_0 0x150
95#define IOMMU_MMIO_OFF_MSI_VECTOR_1 0x154
96#define IOMMU_MMIO_OFF_MSI_CAP_HDR 0x158
97#define IOMMU_MMIO_OFF_MSI_ADDR_LO 0x15c
98#define IOMMU_MMIO_OFF_MSI_ADDR_HI 0x160
99#define IOMMU_MMIO_OFF_MSI_DATA 0x164
100#define IOMMU_MMIO_OFF_MSI_MAPPING_CAP_HDR 0x168
101
102#define IOMMU_MMIO_OFF_PERF_OPT_CTRL 0x16c
103
104#define IOMMU_MMIO_OFF_XT_GEN_INTR_CTRL 0x170
105#define IOMMU_MMIO_OFF_XT_PPR_INTR_CTRL 0x178
106#define IOMMU_MMIO_OFF_XT_GALOG_INT_CTRL 0x180
107
108#define IOMMU_MMIO_OFF_MARC_APER_BAR_0 0x200
109#define IOMMU_MMIO_OFF_MARC_APER_RELOC_0 0x208
110#define IOMMU_MMIO_OFF_MARC_APER_LEN_0 0x210
111#define IOMMU_MMIO_OFF_MARC_APER_BAR_1 0x218
112#define IOMMU_MMIO_OFF_MARC_APER_RELOC_1 0x220
113#define IOMMU_MMIO_OFF_MARC_APER_LEN_1 0x228
114#define IOMMU_MMIO_OFF_MARC_APER_BAR_2 0x230
115#define IOMMU_MMIO_OFF_MARC_APER_RELOC_2 0x238
116#define IOMMU_MMIO_OFF_MARC_APER_LEN_2 0x240
117#define IOMMU_MMIO_OFF_MARC_APER_BAR_3 0x248
118#define IOMMU_MMIO_OFF_MARC_APER_RELOC_3 0x250
119#define IOMMU_MMIO_OFF_MARC_APER_LEN_3 0x258
120
121#define IOMMU_MMIO_OFF_RSVD_REG 0x1ff8
122
123#define IOMMU_MMIO_CMD_BUF_HEAD_PTR 0x2000
124#define IOMMU_MMIO_CMD_BUF_TAIL_PTR 0x2008
125#define IOMMU_MMIO_EVT_LOG_HEAD_PTR 0x2010
126#define IOMMU_MMIO_EVT_LOG_TAIL_PTR 0x2018
127
128#define IOMMU_MMIO_OFF_STATUS 0x2020
129
130#define IOMMU_MMIO_OFF_PPR_LOG_HEAD_PTR 0x2030
131#define IOMMU_MMIO_OFF_PPR_LOG_TAIL_PTR 0x2038
132
133#define IOMMU_MMIO_OFF_GALOG_HEAD_PTR 0x2040
134#define IOMMU_MMIO_OFF_GALOG_TAIL_PTR 0x2048
135
136#define IOMMU_MMIO_OFF_PPR_LOG_B_HEAD_PTR 0x2050
137#define IOMMU_MMIO_OFF_PPR_LOG_B_TAIL_PTR 0x2058
138
139#define IOMMU_MMIO_OFF_EVT_LOG_B_HEAD_PTR 0x2070
140#define IOMMU_MMIO_OFF_EVT_LOG_B_TAIL_PTR 0x2078
141
142#define IOMMU_MMIO_OFF_PPR_LOG_AUTO_RESP 0x2080
143#define IOMMU_MMIO_OFF_PPR_LOG_OVERFLOW_EARLY 0x2088
144#define IOMMU_MMIO_OFF_PPR_LOG_B_OVERFLOW_EARLY 0x2090
145/** @} */
146
147/**
148 * @name MMIO register-access table offsets.
149 * Each table [first..last] (both inclusive) represents the range of registers
150 * covered by a distinct register-access table. This is done due to arbitrary large
151 * gaps in the MMIO register offsets themselves.
152 * @{
153 */
154#define IOMMU_MMIO_OFF_TABLE_0_FIRST 0x00
155#define IOMMU_MMIO_OFF_TABLE_0_LAST 0x258
156
157#define IOMMU_MMIO_OFF_TABLE_1_FIRST 0x1ff8
158#define IOMMU_MMIO_OFF_TABLE_1_LAST 0x2090
159/** @} */
160
161/**
162 * @name Commands.
163 * In accordance with the AMD spec.
164 * @{
165 */
166#define IOMMU_CMD_COMPLETION_WAIT 0x01
167#define IOMMU_CMD_INV_DEV_TAB_ENTRY 0x02
168#define IOMMU_CMD_INV_IOMMU_PAGES 0x03
169#define IOMMU_CMD_INV_IOTLB_PAGES 0x04
170#define IOMMU_CMD_INV_INTR_TABLE 0x05
171#define IOMMU_CMD_PREFETCH_IOMMU_PAGES 0x06
172#define IOMMU_CMD_COMPLETE_PPR_REQ 0x07
173#define IOMMU_CMD_INV_IOMMU_ALL 0x08
174/** @} */
175
176/**
177 * @name Event codes.
178 * In accordance with the AMD spec.
179 * @{
180 */
181#define IOMMU_EVT_ILLEGAL_DEV_TAB_ENTRY 0x01
182#define IOMMU_EVT_IO_PAGE_FAULT 0x02
183#define IOMMU_EVT_DEV_TAB_HW_ERROR 0x03
184#define IOMMU_EVT_PAGE_TAB_HW_ERROR 0x04
185#define IOMMU_EVT_ILLEGAL_CMD_ERROR 0x05
186#define IOMMU_EVT_COMMAND_HW_ERROR 0x06
187#define IOMMU_EVT_IOTLB_INV_TIMEOUT 0x07
188#define IOMMU_EVT_INVALID_DEV_REQ 0x08
189#define IOMMU_EVT_INVALID_PPR_REQ 0x09
190#define IOMMU_EVT_EVENT_COUNTER_ZERO 0x10
191#define IOMMU_EVT_GUEST_EVENT_FAULT 0x11
192/** @} */
193
194/**
195 * @name IOMMU Capability Header.
196 * In accordance with the AMD spec.
197 * @{
198 */
199/** CapId: Capability ID. */
200#define IOMMU_BF_CAPHDR_CAP_ID_SHIFT 0
201#define IOMMU_BF_CAPHDR_CAP_ID_MASK UINT32_C(0x000000ff)
202/** CapPtr: Capability Pointer. */
203#define IOMMU_BF_CAPHDR_CAP_PTR_SHIFT 8
204#define IOMMU_BF_CAPHDR_CAP_PTR_MASK UINT32_C(0x0000ff00)
205/** CapType: Capability Type. */
206#define IOMMU_BF_CAPHDR_CAP_TYPE_SHIFT 16
207#define IOMMU_BF_CAPHDR_CAP_TYPE_MASK UINT32_C(0x00070000)
208/** CapRev: Capability Revision. */
209#define IOMMU_BF_CAPHDR_CAP_REV_SHIFT 19
210#define IOMMU_BF_CAPHDR_CAP_REV_MASK UINT32_C(0x00f80000)
211/** IoTlbSup: IO TLB Support. */
212#define IOMMU_BF_CAPHDR_IOTLB_SUP_SHIFT 24
213#define IOMMU_BF_CAPHDR_IOTLB_SUP_MASK UINT32_C(0x01000000)
214/** HtTunnel: HyperTransport Tunnel translation support. */
215#define IOMMU_BF_CAPHDR_HT_TUNNEL_SHIFT 25
216#define IOMMU_BF_CAPHDR_HT_TUNNEL_MASK UINT32_C(0x02000000)
217/** NpCache: Not Present table entries Cached. */
218#define IOMMU_BF_CAPHDR_NP_CACHE_SHIFT 26
219#define IOMMU_BF_CAPHDR_NP_CACHE_MASK UINT32_C(0x04000000)
220/** EFRSup: Extended Feature Register (EFR) Supported. */
221#define IOMMU_BF_CAPHDR_EFR_SUP_SHIFT 27
222#define IOMMU_BF_CAPHDR_EFR_SUP_MASK UINT32_C(0x08000000)
223/** CapExt: Miscellaneous Information Register Supported . */
224#define IOMMU_BF_CAPHDR_CAP_EXT_SHIFT 28
225#define IOMMU_BF_CAPHDR_CAP_EXT_MASK UINT32_C(0x10000000)
226/** Bits 31:29 reserved. */
227#define IOMMU_BF_CAPHDR_RSVD_29_31_SHIFT 29
228#define IOMMU_BF_CAPHDR_RSVD_29_31_MASK UINT32_C(0xe0000000)
229RT_BF_ASSERT_COMPILE_CHECKS(IOMMU_BF_CAPHDR_, UINT32_C(0), UINT32_MAX,
230 (CAP_ID, CAP_PTR, CAP_TYPE, CAP_REV, IOTLB_SUP, HT_TUNNEL, NP_CACHE, EFR_SUP, CAP_EXT, RSVD_29_31));
231/** @} */
232
233/**
234 * @name IOMMU Base Address Low Register.
235 * In accordance with the AMD spec.
236 * @{
237 */
238/** Enable: Enables access to the address specified in the Base Address Register. */
239#define IOMMU_BF_BASEADDR_LO_ENABLE_SHIFT 0
240#define IOMMU_BF_BASEADDR_LO_ENABLE_MASK UINT32_C(0x00000001)
241/** Bits 13:1 reserved. */
242#define IOMMU_BF_BASEADDR_LO_RSVD_1_13_SHIFT 1
243#define IOMMU_BF_BASEADDR_LO_RSVD_1_13_MASK UINT32_C(0x00003ffe)
244/** Base Address[31:14]: Low Base address of IOMMU MMIO control registers. */
245#define IOMMU_BF_BASEADDR_LO_ADDR_SHIFT 14
246#define IOMMU_BF_BASEADDR_LO_ADDR_MASK UINT32_C(0xffffc000)
247RT_BF_ASSERT_COMPILE_CHECKS(IOMMU_BF_BASEADDR_LO_, UINT32_C(0), UINT32_MAX,
248 (ENABLE, RSVD_1_13, ADDR));
249/** @} */
250
251/**
252 * @name IOMMU Range Register.
253 * In accordance with the AMD spec.
254 * @{
255 */
256/** UnitID: HyperTransport Unit ID. */
257#define IOMMU_BF_RANGE_UNIT_ID_SHIFT 0
258#define IOMMU_BF_RANGE_UNIT_ID_MASK UINT32_C(0x0000001f)
259/** Bits 6:5 reserved. */
260#define IOMMU_BF_RANGE_RSVD_5_6_SHIFT 5
261#define IOMMU_BF_RANGE_RSVD_5_6_MASK UINT32_C(0x00000060)
262/** RngValid: Range valid. */
263#define IOMMU_BF_RANGE_VALID_SHIFT 7
264#define IOMMU_BF_RANGE_VALID_MASK UINT32_C(0x00000080)
265/** BusNumber: Device range bus number. */
266#define IOMMU_BF_RANGE_BUS_NUMBER_SHIFT 8
267#define IOMMU_BF_RANGE_BUS_NUMBER_MASK UINT32_C(0x0000ff00)
268/** First Device. */
269#define IOMMU_BF_RANGE_FIRST_DEVICE_SHIFT 16
270#define IOMMU_BF_RANGE_FIRST_DEVICE_MASK UINT32_C(0x00ff0000)
271/** Last Device. */
272#define IOMMU_BF_RANGE_LAST_DEVICE_SHIFT 24
273#define IOMMU_BF_RANGE_LAST_DEVICE_MASK UINT32_C(0xff000000)
274RT_BF_ASSERT_COMPILE_CHECKS(IOMMU_BF_RANGE_, UINT32_C(0), UINT32_MAX,
275 (UNIT_ID, RSVD_5_6, VALID, BUS_NUMBER, FIRST_DEVICE, LAST_DEVICE));
276/** @} */
277
278/**
279 * @name IOMMU Miscellaneous Information Register 0.
280 * In accordance with the AMD spec.
281 * @{
282 */
283/** MsiNum: MSI message number. */
284#define IOMMU_BF_MISCINFO_0_MSI_NUM_SHIFT 0
285#define IOMMU_BF_MISCINFO_0_MSI_NUM_MASK UINT32_C(0x0000001f)
286/** GvaSize: Guest Virtual Address Size. */
287#define IOMMU_BF_MISCINFO_0_GVA_SIZE_SHIFT 5
288#define IOMMU_BF_MISCINFO_0_GVA_SIZE_MASK UINT32_C(0x000000e0)
289/** PaSize: Physical Address Size. */
290#define IOMMU_BF_MISCINFO_0_PA_SIZE_SHIFT 8
291#define IOMMU_BF_MISCINFO_0_PA_SIZE_MASK UINT32_C(0x00007f00)
292/** VaSize: Virtual Address Size. */
293#define IOMMU_BF_MISCINFO_0_VA_SIZE_SHIFT 15
294#define IOMMU_BF_MISCINFO_0_VA_SIZE_MASK UINT32_C(0x003f8000)
295/** HtAtsResv: HyperTransport ATS Response Address range Reserved. */
296#define IOMMU_BF_MISCINFO_0_HT_ATS_RESV_SHIFT 22
297#define IOMMU_BF_MISCINFO_0_HT_ATS_RESV_MASK UINT32_C(0x00400000)
298/** Bits 26:23 reserved. */
299#define IOMMU_BF_MISCINFO_0_RSVD_23_26_SHIFT 23
300#define IOMMU_BF_MISCINFO_0_RSVD_23_26_MASK UINT32_C(0x07800000)
301/** MsiNumPPR: Peripheral Page Request MSI message number. */
302#define IOMMU_BF_MISCINFO_0_MSI_NUM_PPR_SHIFT 27
303#define IOMMU_BF_MISCINFO_0_MSI_NUM_PPR_MASK UINT32_C(0xf8000000)
304RT_BF_ASSERT_COMPILE_CHECKS(IOMMU_BF_MISCINFO_0_, UINT32_C(0), UINT32_MAX,
305 (MSI_NUM, GVA_SIZE, PA_SIZE, VA_SIZE, HT_ATS_RESV, RSVD_23_26, MSI_NUM_PPR));
306/** @} */
307
308/**
309 * @name IOMMU Miscellaneous Information Register 1.
310 * In accordance with the AMD spec.
311 * @{
312 */
313/** MsiNumGA: MSI message number for guest virtual-APIC log. */
314#define IOMMU_BF_MISCINFO_1_MSI_NUM_GA_SHIFT 0
315#define IOMMU_BF_MISCINFO_1_MSI_NUM_GA_MASK UINT32_C(0x0000001f)
316/** Bits 31:5 reserved. */
317#define IOMMU_BF_MISCINFO_1_RSVD_5_31_SHIFT 5
318#define IOMMU_BF_MISCINFO_1_RSVD_5_31_MASK UINT32_C(0xffffffe0)
319RT_BF_ASSERT_COMPILE_CHECKS(IOMMU_BF_MISCINFO_1_, UINT32_C(0), UINT32_MAX,
320 (MSI_NUM_GA, RSVD_5_31));
321/** @} */
322
323/**
324 * @name MSI Capability Header Register.
325 * In accordance with the AMD spec.
326 * @{
327 */
328/** MsiCapId: Capability ID. */
329#define IOMMU_BF_MSI_CAP_HDR_CAP_ID_SHIFT 0
330#define IOMMU_BF_MSI_CAP_HDR_CAP_ID_MASK UINT32_C(0x000000ff)
331/** MsiCapPtr: Pointer (PCI config offset) to the next capability. */
332#define IOMMU_BF_MSI_CAP_HDR_CAP_PTR_SHIFT 8
333#define IOMMU_BF_MSI_CAP_HDR_CAP_PTR_MASK UINT32_C(0x0000ff00)
334/** MsiEn: Message Signal Interrupt enable. */
335#define IOMMU_BF_MSI_CAP_HDR_EN_SHIFT 16
336#define IOMMU_BF_MSI_CAP_HDR_EN_MASK UINT32_C(0x00010000)
337/** MsiMultMessCap: MSI Multi-Message Capability. */
338#define IOMMU_BF_MSI_CAP_HDR_MULTMESS_CAP_SHIFT 17
339#define IOMMU_BF_MSI_CAP_HDR_MULTMESS_CAP_MASK UINT32_C(0x000e0000)
340/** MsiMultMessEn: MSI Mult-Message Enable. */
341#define IOMMU_BF_MSI_CAP_HDR_MULTMESS_EN_SHIFT 20
342#define IOMMU_BF_MSI_CAP_HDR_MULTMESS_EN_MASK UINT32_C(0x00700000)
343/** Msi64BitEn: MSI 64-bit Enabled. */
344#define IOMMU_BF_MSI_CAP_HDR_64BIT_EN_SHIFT 23
345#define IOMMU_BF_MSI_CAP_HDR_64BIT_EN_MASK UINT32_C(0x00800000)
346/** Bits 31:24 reserved. */
347#define IOMMU_BF_MSI_CAP_HDR_RSVD_24_31_SHIFT 24
348#define IOMMU_BF_MSI_CAP_HDR_RSVD_24_31_MASK UINT32_C(0xff000000)
349RT_BF_ASSERT_COMPILE_CHECKS(IOMMU_BF_MSI_CAP_HDR_, UINT32_C(0), UINT32_MAX,
350 (CAP_ID, CAP_PTR, EN, MULTMESS_CAP, MULTMESS_EN, 64BIT_EN, RSVD_24_31));
351/** @} */
352
353/**
354 * @name MSI Mapping Capability Header Register.
355 * In accordance with the AMD spec.
356 * @{
357 */
358/** MsiMapCapId: Capability ID. */
359#define IOMMU_BF_MSI_MAP_CAPHDR_CAP_ID_SHIFT 0
360#define IOMMU_BF_MSI_MAP_CAPHDR_CAP_ID_MASK UINT32_C(0x000000ff)
361/** MsiMapCapPtr: Pointer (PCI config offset) to the next capability. */
362#define IOMMU_BF_MSI_MAP_CAPHDR_CAP_PTR_SHIFT 8
363#define IOMMU_BF_MSI_MAP_CAPHDR_CAP_PTR_MASK UINT32_C(0x0000ff00)
364/** MsiMapEn: MSI mapping capability enable. */
365#define IOMMU_BF_MSI_MAP_CAPHDR_EN_SHIFT 16
366#define IOMMU_BF_MSI_MAP_CAPHDR_EN_MASK UINT32_C(0x00010000)
367/** MsiMapFixd: MSI interrupt mapping range is not programmable. */
368#define IOMMU_BF_MSI_MAP_CAPHDR_FIXED_SHIFT 17
369#define IOMMU_BF_MSI_MAP_CAPHDR_FIXED_MASK UINT32_C(0x00020000)
370/** Bits 18:28 reserved. */
371#define IOMMU_BF_MSI_MAP_CAPHDR_RSVD_18_28_SHIFT 18
372#define IOMMU_BF_MSI_MAP_CAPHDR_RSVD_18_28_MASK UINT32_C(0x07fc0000)
373/** MsiMapCapType: MSI mapping capability. */
374#define IOMMU_BF_MSI_MAP_CAPHDR_CAP_TYPE_SHIFT 27
375#define IOMMU_BF_MSI_MAP_CAPHDR_CAP_TYPE_MASK UINT32_C(0xf8000000)
376RT_BF_ASSERT_COMPILE_CHECKS(IOMMU_BF_MSI_MAP_CAPHDR_, UINT32_C(0), UINT32_MAX,
377 (CAP_ID, CAP_PTR, EN, FIXED, RSVD_18_28, CAP_TYPE));
378/** @} */
379
380/**
381 * @name IOMMU Status Register Bits.
382 * In accordance with the AMD spec.
383 * @{
384 */
385/** EventOverflow: Event log overflow. */
386#define IOMMU_STATUS_EVT_LOG_OVERFLOW RT_BIT_64(0)
387/** EventLogInt: Event log interrupt. */
388#define IOMMU_STATUS_EVT_LOG_INTR RT_BIT_64(1)
389/** ComWaitInt: Completion wait interrupt. */
390#define IOMMU_STATUS_COMPLETION_WAIT_INTR RT_BIT_64(2)
391/** EventLogRun: Event log is running. */
392#define IOMMU_STATUS_EVT_LOG_RUNNING RT_BIT_64(3)
393/** CmdBufRun: Command buffer is running. */
394#define IOMMU_STATUS_CMD_BUF_RUNNING RT_BIT_64(4)
395/** PprOverflow: Peripheral page request log overflow. */
396#define IOMMU_STATUS_PPR_LOG_OVERFLOW RT_BIT_64(5)
397/** PprInt: Peripheral page request log interrupt. */
398#define IOMMU_STATUS_PPR_LOG_INTR RT_BIT_64(6)
399/** PprLogRun: Peripheral page request log is running. */
400#define IOMMU_STATUS_PPR_LOG_RUN RT_BIT_64(7)
401/** GALogRun: Guest virtual-APIC log is running. */
402#define IOMMU_STATUS_GA_LOG_RUN RT_BIT_64(8)
403/** GALOverflow: Guest virtual-APIC log overflow. */
404#define IOMMU_STATUS_GA_LOG_OVERFLOW RT_BIT_64(9)
405/** GAInt: Guest virtual-APIC log interrupt. */
406#define IOMMU_STATUS_GA_LOG_INTR RT_BIT_64(10)
407/** PprOvrflwB: PPR Log B overflow. */
408#define IOMMU_STATUS_PPR_LOG_B_OVERFLOW RT_BIT_64(11)
409/** PprLogActive: PPR Log B is active. */
410#define IOMMU_STATUS_PPR_LOG_B_ACTIVE RT_BIT_64(12)
411/** EvtOvrflwB: Event log B overflow. */
412#define IOMMU_STATUS_EVT_LOG_B_OVERFLOW RT_BIT_64(15)
413/** EventLogActive: Event log B active. */
414#define IOMMU_STATUS_EVT_LOG_B_ACTIVE RT_BIT_64(16)
415/** PprOvrflwEarlyB: PPR log B overflow early warning. */
416#define IOMMU_STATUS_PPR_LOG_B_OVERFLOW_EARLY RT_BIT_64(17)
417/** PprOverflowEarly: PPR log overflow early warning. */
418#define IOMMU_STATUS_PPR_LOG_OVERFLOW_EARLY RT_BIT_64(18)
419/** @} */
420
421/** @name IOMMU_IO_PERM_XXX: IOMMU I/O access permissions bits.
422 * In accordance with the AMD spec.
423 *
424 * These values match the shifted values of the IR and IW field of the DTE and the
425 * PTE, PDE of the I/O page tables.
426 *
427 * @{ */
428#define IOMMU_IO_PERM_NONE (0)
429#define IOMMU_IO_PERM_READ RT_BIT_64(0)
430#define IOMMU_IO_PERM_WRITE RT_BIT_64(1)
431#define IOMMU_IO_PERM_READ_WRITE (IOMMU_IO_PERM_READ | IOMMU_IO_PERM_WRITE)
432#define IOMMU_IO_PERM_SHIFT 61
433#define IOMMU_IO_PERM_MASK 0x3
434/** @} */
435
436/** @name SYSMGT_TYPE_XXX: System Management Message Enable Types.
437 * In accordance with the AMD spec.
438 * @{ */
439#define SYSMGTTYPE_DMA_DENY (0)
440#define SYSMGTTYPE_MSG_ALL_ALLOW (1)
441#define SYSMGTTYPE_MSG_INT_ALLOW (2)
442#define SYSMGTTYPE_DMA_ALLOW (3)
443/** @} */
444
445/**
446 * @name IOMMU Control Register Bits.
447 * In accordance with the AMD spec.
448 * @{
449 */
450/** IommuEn: Enable the IOMMU. */
451#define IOMMU_CTRL_IOMMU_EN RT_BIT_64(0)
452/** HtTunEn: HyperTransport tunnel translation enable. */
453#define IOMMU_CTRL_HT_TUNNEL_EN RT_BIT_64(1)
454/** EventLogEn: Event log enable. */
455#define IOMMU_CTRL_EVT_LOG_EN RT_BIT_64(2)
456/** EventIntEn: Event interrupt enable. */
457#define IOMMU_CTRL_EVT_INTR_EN RT_BIT_64(3)
458/** ComWaitIntEn: Completion wait interrupt enable. */
459#define IOMMU_CTRL_COMPLETION_WAIT_INTR_EN RT_BIT_64(4)
460/** InvTimeout: Invalidation timeout. */
461#define IOMMU_CTRL_INV_TIMEOUT RT_BIT_64(5) | RT_BIT_64(6) | RT_BIT_64(7)
462/** @todo IOMMU: the rest or remove it. */
463/** @} */
464
465/** @name Miscellaneous IOMMU defines.
466 * @{ */
467/** Log prefix string. */
468#define IOMMU_LOG_PFX "AMD_IOMMU"
469/** The current saved state version. */
470#define IOMMU_SAVED_STATE_VERSION 1
471/** AMD's vendor ID. */
472#define IOMMU_PCI_VENDOR_ID 0x1022
473/** VirtualBox IOMMU device ID. */
474#define IOMMU_PCI_DEVICE_ID 0xc0de
475/** VirtualBox IOMMU device revision ID. */
476#define IOMMU_PCI_REVISION_ID 0x01
477/** Size of the MMIO region in bytes. */
478#define IOMMU_MMIO_REGION_SIZE _16K
479/** Number of device table segments supported (power of 2). */
480#define IOMMU_MAX_DEV_TAB_SEGMENTS 3
481/** Maximum host address translation level supported (inclusive). */
482#define IOMMU_MAX_HOST_PT_LEVEL 6
483/** The IOTLB entry magic. */
484#define IOMMU_IOTLBE_MAGIC 0x10acce55
485/** @} */
486
487/**
488 * Acquires the IOMMU PDM lock or returns @a a_rcBusy if it's busy.
489 */
490#define IOMMU_LOCK_RET(a_pDevIns, a_pThis, a_rcBusy) \
491 do { \
492 NOREF(pThis); \
493 int rcLock = PDMDevHlpCritSectEnter((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo), (a_rcBusy)); \
494 if (RT_LIKELY(rcLock == VINF_SUCCESS)) \
495 { /* likely */ } \
496 else \
497 return rcLock; \
498 } while (0)
499
500/**
501 * Releases the IOMMU PDM lock.
502 */
503#define IOMMU_UNLOCK(a_pDevIns, a_pThis) \
504 do { \
505 PDMDevHlpCritSectLeave((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo)); \
506 } while (0)
507
508/**
509 * Asserts that the critsect is owned by this thread.
510 */
511#define IOMMU_ASSERT_LOCKED(a_pDevIns) \
512 do { \
513 Assert(PDMDevHlpCritSectIsOwner(pDevIns, pDevIns->CTX_SUFF(pCritSectRo))); \
514 } while (0)
515
516/**
517 * Gets the device table size given the size field.
518 */
519#define IOMMU_GET_DEV_TAB_SIZE(a_uSize) (((a_uSize) + 1) << X86_PAGE_4K_SHIFT)
520
521
522/*********************************************************************************************************************************
523* Structures and Typedefs *
524*********************************************************************************************************************************/
525/**
526 * The Device ID.
527 * In accordance with VirtualBox's PCI configuration.
528 */
529typedef union
530{
531 struct
532 {
533 uint16_t u3Function : 3; /**< Bits 2:0 - Function. */
534 uint16_t u9Device : 9; /**< Bits 11:3 - Device. */
535 uint16_t u4Bus : 4; /**< Bits 15:12 - Bus. */
536 } n;
537 /** The unsigned integer view. */
538 uint16_t u;
539} DEVICE_ID_T;
540AssertCompileSize(DEVICE_ID_T, 2);
541
542/**
543 * Device Table Entry (DTE).
544 * In accordance with the AMD spec.
545 */
546typedef union
547{
548 struct
549 {
550 RT_GCC_EXTENSION uint64_t u1Valid : 1; /**< Bit 0 - V: Valid. */
551 RT_GCC_EXTENSION uint64_t u1TranslationValid : 1; /**< Bit 1 - TV: Translation information Valid. */
552 RT_GCC_EXTENSION uint64_t u5Rsvd0 : 5; /**< Bits 6:2 - Reserved. */
553 RT_GCC_EXTENSION uint64_t u2Had : 2; /**< Bits 8:7 - HAD: Host Access Dirty. */
554 RT_GCC_EXTENSION uint64_t u3Mode : 3; /**< Bits 11:9 - Mode: Paging mode. */
555 RT_GCC_EXTENSION uint64_t u40PageTableRootPtrLo : 40; /**< Bits 51:12 - Page Table Root Pointer. */
556 RT_GCC_EXTENSION uint64_t u1Ppr : 1; /**< Bit 52 - PPR: Peripheral Page Request. */
557 RT_GCC_EXTENSION uint64_t u1GstPprRespPasid : 1; /**< Bit 53 - GRPR: Guest PPR Response with PASID. */
558 RT_GCC_EXTENSION uint64_t u1GstIoValid : 1; /**< Bit 54 - GIoV: Guest I/O Protection Valid. */
559 RT_GCC_EXTENSION uint64_t u1GstTranslateValid : 1; /**< Bit 55 - GV: Guest translation Valid. */
560 RT_GCC_EXTENSION uint64_t u2GstMode : 2; /**< Bits 57:56 - GLX: Guest Paging mode levels. */
561 RT_GCC_EXTENSION uint64_t u3GstCr3TableRootPtrLo : 2; /**< Bits 60:58 - GCR3 TRP: Guest CR3 Table Root Ptr (Lo). */
562 RT_GCC_EXTENSION uint64_t u1IoRead : 1; /**< Bit 61 - IR: I/O Read permission. */
563 RT_GCC_EXTENSION uint64_t u1IoWrite : 1; /**< Bit 62 - IW: I/O Write permission. */
564 RT_GCC_EXTENSION uint64_t u1Rsvd0 : 1; /**< Bit 63 - Reserved. */
565 RT_GCC_EXTENSION uint64_t u16DomainId : 1; /**< Bits 79:64 - Domain ID. */
566 RT_GCC_EXTENSION uint64_t u16GstCr3TableRootPtrMed : 16; /**< Bits 95:80 - GCR3 TRP: Guest CR3 Table Root Ptr (Mid). */
567 RT_GCC_EXTENSION uint64_t u1IoTlbEnable : 1; /**< Bit 96 - I: IOTLB Enable. */
568 RT_GCC_EXTENSION uint64_t u1SuppressPfEvents : 1; /**< Bit 97 - SE: Supress Page-fault events. */
569 RT_GCC_EXTENSION uint64_t u1SuppressAllPfEvents : 1; /**< Bit 98 - SA: Supress All Page-fault events. */
570 RT_GCC_EXTENSION uint64_t u2IoCtl : 1; /**< Bits 100:99 - IoCtl: Port I/O Control. */
571 RT_GCC_EXTENSION uint64_t u1Cache : 1; /**< Bit 101 - Cache: IOTLB Cache Hint. */
572 RT_GCC_EXTENSION uint64_t u1SnoopDisable : 1; /**< Bit 102 - SD: Snoop Disable. */
573 RT_GCC_EXTENSION uint64_t u1AllowExclusion : 1; /**< Bit 103 - EX: Allow Exclusion. */
574 RT_GCC_EXTENSION uint64_t u2SysMgt : 2; /**< Bits 105:104 - SysMgt: System Management message enable. */
575 RT_GCC_EXTENSION uint64_t u1Rsvd1 : 1; /**< Bit 106 - Reserved. */
576 RT_GCC_EXTENSION uint64_t u21GstCr3TableRootPtrHi : 21; /**< Bits 127:107 - GCR3 TRP: Guest CR3 Table Root Ptr (Hi). */
577 RT_GCC_EXTENSION uint64_t u1IntrMapValid : 1; /**< Bit 128 - IV: Interrupt map Valid. */
578 RT_GCC_EXTENSION uint64_t u4IntrTableLength : 4; /**< Bits 132:129 - IntTabLen: Interrupt Table Length. */
579 RT_GCC_EXTENSION uint64_t u1IgnoreUnmappedIntrs : 1; /**< Bits 133 - IG: Ignore unmapped interrupts. */
580 RT_GCC_EXTENSION uint64_t u26IntrTableRootPtr : 26; /**< Bits 159:134 - Interrupt Root Table Pointer (Lo). */
581 RT_GCC_EXTENSION uint64_t u20IntrTableRootPtr : 20; /**< Bits 179:160 - Interrupt Root Table Pointer (Hi). */
582 RT_GCC_EXTENSION uint64_t u4Rsvd0 : 4; /**< Bits 183:180 - Reserved. */
583 RT_GCC_EXTENSION uint64_t u1InitPassthru : 1; /**< Bits 184 - INIT Pass-through. */
584 RT_GCC_EXTENSION uint64_t u1ExtIntPassthru : 1; /**< Bits 185 - External Interrupt Pass-through. */
585 RT_GCC_EXTENSION uint64_t u1NmiPassthru : 1; /**< Bits 186 - NMI Pass-through. */
586 RT_GCC_EXTENSION uint64_t u1Rsvd2 : 1; /**< Bits 187 - Reserved. */
587 RT_GCC_EXTENSION uint64_t u2IntrCtrl : 2; /**< Bits 189:188 - IntCtl: Interrupt Control. */
588 RT_GCC_EXTENSION uint64_t u1Lint0Passthru : 1; /**< Bit 190 - Lint0Pass: LINT0 Pass-through. */
589 RT_GCC_EXTENSION uint64_t u1Lint1Passthru : 1; /**< Bit 191 - Lint1Pass: LINT1 Pass-through. */
590 RT_GCC_EXTENSION uint64_t u32Rsvd0 : 32; /**< Bits 223:192 - Reserved. */
591 RT_GCC_EXTENSION uint64_t u22Rsvd0 : 22; /**< Bits 245:224 - Reserved. */
592 RT_GCC_EXTENSION uint64_t u1AttrOverride : 1; /**< Bit 246 - AttrV: Attribute Override. */
593 RT_GCC_EXTENSION uint64_t u1Mode0FC: 1; /**< Bit 247 - Mode0FC. */
594 RT_GCC_EXTENSION uint64_t u8SnoopAttr: 1; /**< Bits 255:248 - Snoop Attribute. */
595 } n;
596 /** The 32-bit unsigned integer view. */
597 uint32_t au32[8];
598 /** The 64-bit unsigned integer view. */
599 uint64_t au64[4];
600} DTE_T;
601AssertCompileSize(DTE_T, 32);
602/** Pointer to a device table entry. */
603typedef DTE_T *PDTE_T;
604/** Pointer to a const device table entry. */
605typedef DTE_T const *PCDTE_T;
606
607/** Mask of valid bits for EPHSUP (Enhanced Peripheral Page Request Handling
608 * Support) feature (bits 52:53). */
609#define IOMMU_DTE_QWORD_0_FEAT_EPHSUP_MASK UINT64_C(0x0030000000000000)
610
611/** Mask of valid bits for GTSup (Guest Translation Support) feature (bits 55:60,
612 * bits 80:95). */
613#define IOMMU_DTE_QWORD_0_FEAT_GTSUP_MASK UINT64_C(0x1f80000000000000)
614#define IOMMU_DTE_QWORD_1_FEAT_GTSUP_MASK UINT64_C(0x00000000ffff0000)
615
616/* Mask of valid bits for GIoSup (Guest I/O Protection Support) feature (bit 54). */
617#define IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK UINT64_C(0x0040000000000000)
618
619/* Mask of valid DTE feature bits. */
620#define IOMMU_DTE_QWORD_0_FEAT_MASK ( IOMMU_DTE_QWORD_0_FEAT_EPHSUP_MASK \
621 | IOMMU_DTE_QWORD_0_FEAT_GTSUP_MASK \
622 | IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK)
623#define IOMMU_DTE_QWORD_1_FEAT_MASK (IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK)
624
625/* Mask of all valid DTE bits (including all feature bits). */
626#define IOMMU_DTE_QWORD_0_VALID_MASK UINT64_C(0x7fffffffffffff83)
627#define IOMMU_DTE_QWORD_1_VALID_MASK UINT64_C(0xfffffbffffffffff)
628#define IOMMU_DTE_QWORD_2_VALID_MASK UINT64_C(0xf70fffffffffffff)
629#define IOMMU_DTE_QWORD_3_VALID_MASK UINT64_C(0xffc0000000000000)
630
631/**
632 * I/O Page Translation Entry.
633 * In accordance with the AMD spec.
634 */
635typedef union
636{
637 struct
638 {
639 RT_GCC_EXTENSION uint64_t u1Present : 1; /**< Bit 0 - PR: Present. */
640 RT_GCC_EXTENSION uint64_t u4Ign0 : 4; /**< Bits 4:1 - Ignored. */
641 RT_GCC_EXTENSION uint64_t u1Accessed : 1; /**< Bit 5 - A: Accessed. */
642 RT_GCC_EXTENSION uint64_t u1Dirty : 1; /**< Bit 6 - D: Dirty. */
643 RT_GCC_EXTENSION uint64_t u2Ign0 : 2; /**< Bits 8:7 - Ignored. */
644 RT_GCC_EXTENSION uint64_t u3NextLevel : 3; /**< Bits 11:9 - Next Level: Next page translation level. */
645 RT_GCC_EXTENSION uint64_t u40PageAddr : 40; /**< Bits 51:12 - Page address. */
646 RT_GCC_EXTENSION uint64_t u7Rsvd0 : 7; /**< Bits 58:52 - Reserved. */
647 RT_GCC_EXTENSION uint64_t u1UntranslatedAccess : 1; /**< Bit 59 - U: Untranslated Access Only. */
648 RT_GCC_EXTENSION uint64_t u1ForceCoherent : 1; /**< Bit 60 - FC: Force Coherent. */
649 RT_GCC_EXTENSION uint64_t u1IoRead : 1; /**< Bit 61 - IR: I/O Read permission. */
650 RT_GCC_EXTENSION uint64_t u1IoWrite : 1; /**< Bit 62 - IW: I/O Wead permission. */
651 RT_GCC_EXTENSION uint64_t u1Ign0 : 1; /**< Bit 63 - Ignored. */
652 } n;
653 /** The 64-bit unsigned integer view. */
654 uint64_t u64;
655} IOPTE_T;
656AssertCompileSize(IOPTE_T, 8);
657
658/**
659 * I/O Page Directory Entry.
660 * In accordance with the AMD spec.
661 */
662typedef union
663{
664 struct
665 {
666 RT_GCC_EXTENSION uint64_t u1Present : 1; /**< Bit 0 - PR: Present. */
667 RT_GCC_EXTENSION uint64_t u4Ign0 : 4; /**< Bits 4:1 - Ignored. */
668 RT_GCC_EXTENSION uint64_t u1Accessed : 1; /**< Bit 5 - A: Accessed. */
669 RT_GCC_EXTENSION uint64_t u3Ign0 : 3; /**< Bits 8:6 - Ignored. */
670 RT_GCC_EXTENSION uint64_t u3NextLevel : 3; /**< Bits 11:9 - Next Level: Next page translation level. */
671 RT_GCC_EXTENSION uint64_t u40PageAddr : 40; /**< Bits 51:12 - Page address (Next Table Address). */
672 RT_GCC_EXTENSION uint64_t u9Rsvd0 : 9; /**< Bits 60:52 - Reserved. */
673 RT_GCC_EXTENSION uint64_t u1IoRead : 1; /**< Bit 61 - IR: I/O Read permission. */
674 RT_GCC_EXTENSION uint64_t u1IoWrite : 1; /**< Bit 62 - IW: I/O Wead permission. */
675 RT_GCC_EXTENSION uint64_t u1Ign0 : 1; /**< Bit 63 - Ignored. */
676 } n;
677 /** The 64-bit unsigned integer view. */
678 uint64_t u64;
679} IOPDE_T;
680AssertCompileSize(IOPDE_T, 8);
681
682/**
683 * I/O Page Table Entry/Entity.
684 * In accordance with the AMD spec.
685 *
686 * This a common subset of an DTE.au64[0], PTE and PDE.
687 * Named as an "entity" to avoid confusing it with PTE.
688 */
689typedef union
690{
691 struct
692 {
693 RT_GCC_EXTENSION uint64_t u1Present : 1; /**< Bit 0 - PR: Present. */
694 RT_GCC_EXTENSION uint64_t u8Ign0 : 8; /**< Bits 8:1 - Ignored. */
695 RT_GCC_EXTENSION uint64_t u3NextLevel : 3; /**< Bits 11:9 - Mode / Next Level: Next page translation level. */
696 RT_GCC_EXTENSION uint64_t u40Addr : 40; /**< Bits 51:12 - Page address. */
697 RT_GCC_EXTENSION uint64_t u9Ign0 : 9; /**< Bits 60:52 - Ignored. */
698 RT_GCC_EXTENSION uint64_t u1IoRead : 1; /**< Bit 61 - IR: I/O Read permission. */
699 RT_GCC_EXTENSION uint64_t u1IoWrite : 1; /**< Bit 62 - IW: I/O Wead permission. */
700 RT_GCC_EXTENSION uint64_t u1Ign0 : 1; /**< Bit 63 - Ignored. */
701 } n;
702 /** The 64-bit unsigned integer view. */
703 uint64_t u64;
704} IOPTENTITY_T;
705AssertCompileSize(IOPTENTITY_T, 8);
706AssertCompile(sizeof(IOPTENTITY_T) == sizeof(IOPTE_T));
707AssertCompile(sizeof(IOPTENTITY_T) == sizeof(IOPDE_T));
708/** Pointer to an IOPT_ENTITY_T struct. */
709typedef IOPTENTITY_T *PIOPTENTITY_T;
710/** Pointer to a const IOPT_ENTITY_T struct. */
711typedef IOPTENTITY_T const *PCIOPTENTITY_T;
712/** Mask of the address field. */
713#define IOMMU_PTENTITY_ADDR_MASK UINT64_C(0x000ffffffffff000)
714
715/**
716 * Interrupt Remapping Table Entry (IRTE).
717 * In accordance with the AMD spec.
718 */
719typedef union
720{
721 struct
722 {
723 uint32_t u1RemapEnable : 1; /**< Bit 0 - RemapEn: Remap Enable. */
724 uint32_t u1SuppressPf : 1; /**< Bit 1 - SupIOPF: Supress I/O Page Fault. */
725 uint32_t u3IntrType : 1; /**< Bits 4:2 - IntType: Interrupt Type. */
726 uint32_t u1ReqEoi : 1; /**< Bit 5 - RqEoi: Request EOI. */
727 uint32_t u1DstMode : 1; /**< Bit 6 - DM: Destination Mode. */
728 uint32_t u1GuestMode : 1; /**< Bit 7 - GuestMode. */
729 uint32_t u8Dst : 8; /**< Bits 15:8 - Destination. */
730 uint32_t u8Vector : 8; /**< Bits 23:16 - Vector. */
731 uint32_t u8Rsvd0 : 8; /**< Bits 31:24 - Reserved. */
732 } n;
733 /** The 32-bit unsigned integer view. */
734 uint32_t u32;
735} IRTE_T;
736AssertCompileSize(IRTE_T, 4);
737/** Pointer to an IRTE_T struct. */
738typedef IRTE_T *PIRTE_T;
739/** Pointer to a const IRTE_T struct. */
740typedef IRTE_T const *PCIRTE_T;
741
742/**
743 * Command: Generic Command Buffer Entry.
744 * In accordance with the AMD spec.
745 */
746typedef union
747{
748 struct
749 {
750 uint32_t u32Operand1Lo; /**< Bits 31:0 - Operand 1 (Lo). */
751 uint32_t u32Operand1Hi : 28; /**< Bits 59:32 - Operand 1 (Hi). */
752 uint32_t u4Opcode : 4; /**< Bits 63:60 - Op Code. */
753 uint64_t u64Operand2; /**< Bits 127:64 - Operand 2. */
754 } n;
755 /** The 64-bit unsigned integer view. */
756 uint64_t au64[2];
757} CMD_GENERIC_T;
758AssertCompileSize(CMD_GENERIC_T, 16);
759/** Number of bits to shift the byte offset of a command in the command buffer to
760 * get its index. */
761#define IOMMU_CMD_GENERIC_SHIFT 4
762
763/**
764 * Command: COMPLETION_WAIT.
765 * In accordance with the AMD spec.
766 */
767typedef union
768{
769 struct
770 {
771 uint32_t u1Store : 1; /**< Bit 0 - S: Completion Store. */
772 uint32_t u1Interrupt : 1; /**< Bit 1 - I: Completion Interrupt. */
773 uint32_t u1Flush : 1; /**< Bit 2 - F: Flush Queue. */
774 uint32_t u29StoreAddrLo : 29; /**< Bits 31:3 - Store Address (Lo). */
775 uint32_t u20StoreAddrHi : 20; /**< Bits 51:32 - Store Address (Hi). */
776 uint32_t u8Rsvd0 : 8; /**< Bits 59:52 - Reserved. */
777 uint32_t u4OpCode : 4; /**< Bits 63:60 - OpCode (Command). */
778 uint64_t u64StoreData; /**< Bits 127:64 - Store Data. */
779 } n;
780 /** The 64-bit unsigned integer view. */
781 uint64_t au64[2];
782} CMD_COMPLETION_WAIT_T;
783AssertCompileSize(CMD_COMPLETION_WAIT_T, 16);
784
785/**
786 * Command: INVALIDATE_DEVTAB_ENTRY.
787 * In accordance with the AMD spec.
788 */
789typedef union
790{
791 struct
792 {
793 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
794 uint16_t u16Rsvd0; /**< Bits 31:16 - Reserved. */
795 uint32_t u28Rsvd0 : 28; /**< Bits 59:32 - Reserved. */
796 uint32_t u4OpCode : 4; /**< Bits 63:60 - Op Code (Command). */
797 uint64_t u64Rsvd0; /**< Bits 127:64 - Reserved. */
798 } n;
799 /** The 64-bit unsigned integer view. */
800 uint64_t au64[2];
801} CMD_INV_DTE_T;
802AssertCompileSize(CMD_INV_DTE_T, 16);
803
804/**
805 * Command: INVALIDATE_IOMMU_PAGES.
806 * In accordance with the AMD spec.
807 */
808typedef union
809{
810 struct
811 {
812 uint32_t u20Pasid : 20; /**< Bits 19:0 - PASID: Process Address-Space ID. */
813 uint32_t u12Rsvd0 : 12; /**< Bits 31:20 - Reserved. */
814 uint32_t u16DomainId : 16; /**< Bits 47:32 - Domain ID. */
815 uint32_t u12Rsvd1 : 12; /**< Bits 59:48 - Reserved. */
816 uint32_t u4OpCode : 4; /**< Bits 63:60 - Op Code (Command). */
817 uint32_t u1Size : 1; /**< Bit 64 - S: Size. */
818 uint32_t u1PageDirEntries : 1; /**< Bit 65 - PDE: Page Directory Entries. */
819 uint32_t u1GuestOrNested : 1; /**< Bit 66 - GN: Guest (GPA) or Nested (GVA). */
820 uint32_t u9Rsvd0 : 9; /**< Bits 75:67 - Reserved. */
821 uint32_t u20AddrLo : 20; /**< Bits 95:76 - Address (Lo). */
822 uint32_t u32AddrHi; /**< Bits 127:96 - Address (Hi). */
823 } n;
824 /** The 64-bit unsigned integer view. */
825 uint64_t au64[2];
826} CMD_INV_IOMMU_PAGES_T;
827AssertCompileSize(CMD_INV_IOMMU_PAGES_T, 16);
828
829/**
830 * Command: INVALIDATE_IOTLB_PAGES.
831 * In accordance with the AMD spec.
832 */
833typedef union
834{
835 struct
836 {
837 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
838 uint8_t u8PasidLo; /**< Bits 23:16 - PASID: Process Address-Space ID (Lo). */
839 uint8_t u8MaxPend; /**< Bits 31:24 - Maxpend: Maximum simultaneous in-flight transactions. */
840 uint32_t u16QueueId : 16; /**< Bits 47:32 - Queue ID. */
841 uint32_t u12PasidHi : 12; /**< Bits 59:48 - PASID: Process Address-Space ID (Hi). */
842 uint32_t u4OpCode : 4; /**< Bits 63:60 - Op Code (Command). */
843 uint32_t u1Size : 1; /**< Bit 64 - S: Size. */
844 uint32_t u1Rsvd0: 1; /**< Bit 65 - Reserved. */
845 uint32_t u1GuestOrNested : 1; /**< Bit 66 - GN: Guest (GPA) or Nested (GVA). */
846 uint32_t u1Rsvd1 : 1; /**< Bit 67 - Reserved. */
847 uint32_t u2Type : 2; /**< Bit 69:68 - Type. */
848 uint32_t u6Rsvd0 : 6; /**< Bits 75:70 - Reserved. */
849 uint32_t u20AddrLo : 20; /**< Bits 95:76 - Address (Lo). */
850 uint32_t u32AddrHi; /**< Bits 127:96 - Address (Hi). */
851 } n;
852 /** The 64-bit unsigned integer view. */
853 uint64_t au64[2];
854} CMD_INV_IOTLB_PAGES_T;
855AssertCompileSize(CMD_INV_IOTLB_PAGES_T, 16);
856
857/**
858 * Command: INVALIDATE_INTR_TABLE.
859 * In accordance with the AMD spec.
860 */
861typedef union
862{
863 struct
864 {
865 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
866 uint16_t u16Rsvd0; /**< Bits 31:16 - Reserved. */
867 uint32_t u32Rsvd0 : 28; /**< Bits 59:32 - Reserved. */
868 uint32_t u4OpCode : 4; /**< Bits 63:60 - Op Code (Command). */
869 uint64_t u64Rsvd0; /**< Bits 127:64 - Reserved. */
870 } u;
871 /** The 64-bit unsigned integer view. */
872 uint64_t au64[2];
873} CMD_INV_INTR_TABLE_T;
874AssertCompileSize(CMD_INV_INTR_TABLE_T, 16);
875
876/**
877 * Command: COMPLETE_PPR_REQ.
878 * In accordance with the AMD spec.
879 */
880typedef union
881{
882 struct
883 {
884 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
885 uint16_t u16Rsvd0; /**< Bits 31:16 - Reserved. */
886 uint32_t u20Pasid : 20; /**< Bits 51:32 - PASID: Process Address-Space ID. */
887 uint32_t u8Rsvd0 : 8; /**< Bits 59:52 - Reserved. */
888 uint32_t u4OpCode : 4; /**< Bits 63:60 - Op Code (Command). */
889 uint32_t u2Rsvd0 : 2; /**< Bits 65:64 - Reserved. */
890 uint32_t u1GuestOrNested : 1; /**< Bit 66 - GN: Guest (GPA) or Nested (GVA). */
891 uint32_t u29Rsvd0 : 29; /**< Bits 95:67 - Reserved. */
892 uint32_t u16CompletionTag : 16; /**< Bits 111:96 - Completion Tag. */
893 uint32_t u16Rsvd1 : 16; /**< Bits 127:112 - Reserved. */
894 } n;
895 /** The 64-bit unsigned integer view. */
896 uint64_t au64[2];
897} CMD_COMPLETE_PPR_REQ_T;
898AssertCompileSize(CMD_COMPLETE_PPR_REQ_T, 16);
899
900/**
901 * Command: INV_IOMMU_ALL.
902 * In accordance with the AMD spec.
903 */
904typedef union
905{
906 struct
907 {
908 uint32_t u32Rsvd0; /**< Bits 31:0 - Reserved. */
909 uint32_t u28Rsvd0 : 28; /**< Bits 59:32 - Reserved. */
910 uint32_t u4OpCode : 4; /**< Bits 63:60 - Op Code (Command). */
911 uint64_t u64Rsvd0; /**< Bits 127:64 - Reserved. */
912 } n;
913 /** The 64-bit unsigned integer view. */
914 uint64_t au64[2];
915} CMD_IOMMU_ALL_T;
916AssertCompileSize(CMD_IOMMU_ALL_T, 16);
917
918/**
919 * Event Log Entry: Generic.
920 * In accordance with the AMD spec.
921 */
922typedef union
923{
924 struct
925 {
926 uint32_t u32Operand1Lo; /**< Bits 31:0 - Operand 1 (Lo). */
927 uint32_t u32Operand1Hi : 28; /**< Bits 59:32 - Operand 1 (Hi). */
928 uint32_t u4EvtCode : 4; /**< Bits 63:60 - Event code. */
929 uint32_t u32Operand2Lo; /**< Bits 95:64 - Operand 2 (Lo). */
930 uint32_t u32Operand2Hi; /**< Bits 127:96 - Operand 2 (Hi). */
931 } n;
932 /** The 32-bit unsigned integer view. */
933 uint32_t au32[4];
934} EVT_GENERIC_T;
935AssertCompileSize(EVT_GENERIC_T, 16);
936/** Number of bits to shift the byte offset of an event entry in the event log
937 * buffer to get its index. */
938#define IOMMU_EVT_GENERIC_SHIFT 4
939/** Pointer to a generic event log entry. */
940typedef EVT_GENERIC_T *PEVT_GENERIC_T;
941/** Pointer to a const generic event log entry. */
942typedef const EVT_GENERIC_T *PCEVT_GENERIC_T;
943
944/**
945 * Hardware event types.
946 * In accordance with the AMD spec.
947 */
948typedef enum HWEVTTYPE
949{
950 HWEVTTYPE_RSVD = 0,
951 HWEVTTYPE_MASTER_ABORT,
952 HWEVTTYPE_TARGET_ABORT,
953 HWEVTTYPE_DATA_ERROR
954} HWEVTTYPE;
955AssertCompileSize(HWEVTTYPE, 4);
956
957/**
958 * Event Log Entry: ILLEGAL_DEV_TABLE_ENTRY.
959 * In accordance with the AMD spec.
960 */
961typedef union
962{
963 struct
964 {
965 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
966 uint16_t u4PasidHi : 4; /**< Bits 19:16 - PASID: Process Address-Space ID (Hi). */
967 uint16_t u12Rsvd0 : 12; /**< Bits 31:20 - Reserved. */
968 uint16_t u16PasidLo; /**< Bits 47:32 - PASID: Process Address-Space ID (Lo). */
969 uint16_t u1GuestOrNested : 1; /**< Bit 48 - GN: Guest (GPA) or Nested (GVA). */
970 uint16_t u2Rsvd0 : 2; /**< Bits 50:49 - Reserved. */
971 uint16_t u1Interrupt : 1; /**< Bit 51 - I: Interrupt. */
972 uint16_t u1Rsvd0 : 1; /**< Bit 52 - Reserved. */
973 uint16_t u1ReadWrite : 1; /**< Bit 53 - RW: Read/Write. */
974 uint16_t u1Rsvd1 : 1; /**< Bit 54 - Reserved. */
975 uint16_t u1RsvdNotZero : 1; /**< Bit 55 - RZ: Reserved bit not Zero (0=invalid level encoding). */
976 uint16_t u1Translation : 1; /**< Bit 56 - TN: Translation. */
977 uint16_t u3Rsvd0 : 3; /**< Bits 59:57 - Reserved. */
978 uint16_t u4EvtCode : 4; /**< Bits 63:60 - Event code. */
979 uint64_t u64Addr; /**< Bits 127:64 - Address: I/O Virtual Address (IOVA). */
980 } n;
981 /** The 32-bit unsigned integer view. */
982 uint32_t au32[4];
983} EVT_ILLEGAL_DTE_T;
984AssertCompileSize(EVT_ILLEGAL_DTE_T, 16);
985/** Pointer to an illegal device table entry event. */
986typedef EVT_ILLEGAL_DTE_T *PEVT_ILLEGAL_DTE_T;
987/** Pointer to a const illegal device table entry event. */
988typedef EVT_ILLEGAL_DTE_T const *PCEVT_ILLEGAL_DTE_T;
989
990/**
991 * Event Log Entry: IO_PAGE_FAULT_EVENT.
992 * In accordance with the AMD spec.
993 */
994typedef union
995{
996 struct
997 {
998 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
999 uint16_t u4PasidHi : 4; /**< Bits 19:16 - PASID: Process Address-Space ID (Hi). */
1000 uint16_t u16DomainOrPasidLo; /**< Bits 47:32 - D/P: Domain ID or Process Address-Space ID (Lo). */
1001 uint16_t u1GuestOrNested : 1; /**< Bit 48 - GN: Guest (GPA) or Nested (GVA). */
1002 uint16_t u1NoExecute : 1; /**< Bit 49 - NX: No Execute. */
1003 uint16_t u1User : 1; /**< Bit 50 - US: User/Supervisor. */
1004 uint16_t u1Interrupt : 1; /**< Bit 51 - I: Interrupt. */
1005 uint16_t u1Present : 1; /**< Bit 52 - PR: Present. */
1006 uint16_t u1ReadWrite : 1; /**< Bit 53 - RW: Read/Write. */
1007 uint16_t u1PermDenied : 1; /**< Bit 54 - PE: Permission Indicator. */
1008 uint16_t u1RsvdNotZero : 1; /**< Bit 55 - RZ: Reserved bit not Zero (0=invalid level encoding). */
1009 uint16_t u1Translation : 1; /**< Bit 56 - TN: Translation. */
1010 uint16_t u3Rsvd0 : 3; /**< Bit 59:57 - Reserved. */
1011 uint16_t u4EvtCode : 4; /**< Bits 63:60 - Event code. */
1012 uint64_t u64Addr; /**< Bits 127:64 - Address: I/O Virtual Address (IOVA). */
1013 } n;
1014 /** The 32-bit unsigned integer view. */
1015 uint32_t au32[4];
1016} EVT_IO_PAGE_FAULT_T;
1017AssertCompileSize(EVT_IO_PAGE_FAULT_T, 16);
1018/** Pointer to an I/O page fault event. */
1019typedef EVT_IO_PAGE_FAULT_T *PEVT_IO_PAGE_FAULT_T;
1020/** Pointer to a const I/O page fault event. */
1021typedef EVT_IO_PAGE_FAULT_T const *PCEVT_IO_PAGE_FAULT_T;
1022
1023
1024/**
1025 * Event Log Entry: DEV_TAB_HARDWARE_ERROR.
1026 * In accordance with the AMD spec.
1027 */
1028typedef union
1029{
1030 struct
1031 {
1032 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
1033 uint16_t u16Rsvd0; /**< Bits 31:16 - Reserved. */
1034 uint32_t u19Rsvd0 : 19; /**< Bits 50:32 - Reserved. */
1035 uint32_t u1Intr : 1; /**< Bit 51 - I: Interrupt (1=interrupt request, 0=memory request). */
1036 uint32_t u1Rsvd0 : 1; /**< Bit 52 - Reserved. */
1037 uint32_t u1ReadWrite : 1; /**< Bit 53 - RW: Read/Write transaction (only meaninful when I=0 and TR=0). */
1038 uint32_t u2Rsvd0 : 2; /**< Bits 55:54 - Reserved. */
1039 uint32_t u1Translation : 1; /**< Bit 56 - TR: Translation (1=translation, 0=transaction). */
1040 uint32_t u2Type : 2; /**< Bits 58:57 - Type: The type of hardware error. */
1041 uint32_t u1Rsvd1 : 1; /**< Bit 59 - Reserved. */
1042 uint32_t u4EvtCode : 4; /**< Bits 63:60 - Event code. */
1043 uint64_t u64Addr; /**< Bits 127:64 - Address. */
1044 } n;
1045 /** The 32-bit unsigned integer view. */
1046 uint32_t au32[4];
1047} EVT_DEV_TAB_HW_ERROR_T;
1048AssertCompileSize(EVT_DEV_TAB_HW_ERROR_T, 16);
1049/** Pointer to a device table hardware error event. */
1050typedef EVT_DEV_TAB_HW_ERROR_T *PEVT_DEV_TAB_HW_ERROR_T;
1051/** Pointer to a const device table hardware error event. */
1052typedef EVT_DEV_TAB_HW_ERROR_T const *PCEVT_DEV_TAB_HW_ERROR_T;
1053
1054/**
1055 * Event Log Entry: EVT_PAGE_TAB_HARDWARE_ERROR.
1056 * In accordance with the AMD spec.
1057 */
1058typedef union
1059{
1060 struct
1061 {
1062 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
1063 uint16_t u16Rsvd0; /**< Bits 31:16 - Reserved. */
1064 uint32_t u16DomainOrPasidLo : 16; /**< Bits 47:32 - D/P: Domain ID or Process Address-Space ID (Lo). */
1065 uint32_t u1GuestOrNested : 1; /**< Bit 48 - GN: Guest (GPA) or Nested (GVA). */
1066 uint32_t u2Rsvd0 : 2; /**< Bits 50:49 - Reserved. */
1067 uint32_t u1Interrupt : 1; /**< Bit 51 - I: Interrupt. */
1068 uint32_t u1Rsvd0 : 1; /**< Bit 52 - Reserved. */
1069 uint32_t u1ReadWrite : 1; /**< Bit 53 - RW: Read/Write. */
1070 uint32_t u2Rsvd1 : 2; /**< Bit 55:54 - Reserved. */
1071 uint32_t u1Translation : 1; /**< Bit 56 - TR: Translation. */
1072 uint32_t u2Type : 2; /**< Bits 58:57 - Type: The type of hardware error. */
1073 uint32_t u1Rsvd1 : 1; /**< Bit 59 - Reserved. */
1074 uint32_t u4EvtCode : 4; /**< Bit 63:60 - Event code. */
1075 /** @todo r=ramshankar: Figure 55: PAGE_TAB_HARDWARE_ERROR says Addr[31:3] but
1076 * table 58 mentions Addr[31:4], we just use the full 64-bits. Looks like a
1077 * typo in the figure.See AMD AMD IOMMU spec (3.05-PUB, Jan 2020). */
1078 uint64_t u64Addr; /** Bits 127:64 - Address: SPA of the page table entry. */
1079 } n;
1080 /** The 32-bit unsigned integer view. */
1081 uint32_t au32[4];
1082} EVT_PAGE_TAB_HW_ERR_T;
1083AssertCompileSize(EVT_PAGE_TAB_HW_ERR_T, 16);
1084/** Pointer to a page table hardware error event. */
1085typedef EVT_PAGE_TAB_HW_ERR_T *PEVT_PAGE_TAB_HW_ERR_T;
1086/** Pointer to a const page table hardware error event. */
1087typedef EVT_PAGE_TAB_HW_ERR_T const *PCEVT_PAGE_TAB_HW_ERR_T;
1088
1089
1090/**
1091 * Event Log Entry: ILLEGAL_COMMAND_ERROR.
1092 * In accordance with the AMD spec.
1093 */
1094typedef union
1095{
1096 struct
1097 {
1098 uint32_t u32Rsvd0; /**< Bits 31:0 - Reserved. */
1099 uint32_t u28Rsvd0 : 28; /**< Bits 47:32 - Reserved. */
1100 uint32_t u4EvtCode : 4; /**< Bits 63:60 - Event code. */
1101 uint32_t u4Rsvd0 : 4; /**< Bits 67:64 - Reserved. */
1102 uint32_t u28AddrLo : 28; /**< Bits 95:68 - Address: SPA of the invalid command (Lo). */
1103 uint32_t u32AddrHi; /**< Bits 127:96 - Address: SPA of the invalid command (Hi). */
1104 } n;
1105 /** The 32-bit unsigned integer view. */
1106 uint32_t au32[4];
1107} EVT_ILLEGAL_CMD_ERR_T;
1108AssertCompileSize(EVT_ILLEGAL_CMD_ERR_T, 16);
1109
1110/**
1111 * Event Log Entry: COMMAND_HARDWARE_ERROR.
1112 * In accordance with the AMD spec.
1113 */
1114typedef union
1115{
1116 struct
1117 {
1118 uint32_t u32Rsvd0; /**< Bits 31:0 - Reserved. */
1119 uint32_t u4Rsvd0 : 4; /**< Bits 35:32 - Reserved. */
1120 uint32_t u28AddrLo : 28; /**< Bits 63:36 - Address: SPA of the attempted access (Lo). */
1121 uint32_t u32AddrHi; /**< Bits 95:64 - Address: SPA of the attempted access (Hi). */
1122 } n;
1123 /** The 32-bit unsigned integer view. */
1124 uint32_t au32[3];
1125} EVT_CMD_HW_ERROR_T;
1126AssertCompileSize(EVT_CMD_HW_ERROR_T, 12);
1127
1128/**
1129 * Event Log Entry: IOTLB_INV_TIMEOUT.
1130 * In accordance with the AMD spec.
1131 */
1132typedef union
1133{
1134 struct
1135 {
1136 uint16_t u16DevId; /**< Bits 15:0 - Device ID. */
1137 uint16_t u16Rsvd0; /**< Bits 31:16 - Reserved.*/
1138 uint32_t u28Rsvd0 : 28; /**< Bits 59:32 - Reserved. */
1139 uint32_t u4EvtCode : 4; /**< Bits 63:60 - Event code. */
1140 uint32_t u4Rsvd0 : 4; /**< Bits 67:64 - Reserved. */
1141 uint32_t u28AddrLo : 28; /**< Bits 95:68 - Address: SPA of the invalidation command that timedout (Lo). */
1142 uint32_t u32AddrHi; /**< Bits 127:96 - Address: SPA of the invalidation command that timedout (Hi). */
1143 } n;
1144 /** The 32-bit unsigned integer view. */
1145 uint32_t au32[4];
1146} EVT_IOTLB_INV_TIMEOUT_T;
1147AssertCompileSize(EVT_IOTLB_INV_TIMEOUT_T, 16);
1148
1149/**
1150 * Event Log Entry: INVALID_DEVICE_REQUEST.
1151 * In accordance with the AMD spec.
1152 */
1153typedef union
1154{
1155 struct
1156 {
1157 uint32_t u16DevId : 16; /***< Bits 15:0 - Device ID. */
1158 uint32_t u4PasidHi : 4; /***< Bits 19:16 - PASID: Process Address-Space ID (Hi). */
1159 uint32_t u12Rsvd0 : 12; /***< Bits 31:20 - Reserved. */
1160 uint32_t u16PasidLo : 16; /***< Bits 47:32 - PASID: Process Address-Space ID (Lo). */
1161 uint32_t u1GuestOrNested : 1; /***< Bit 48 - GN: Guest (GPA) or Nested (GVA). */
1162 uint32_t u1User : 1; /***< Bit 49 - US: User/Supervisor. */
1163 uint32_t u6Rsvd0 : 6; /***< Bits 55:50 - Reserved. */
1164 uint32_t u1Translation: 1; /***< Bit 56 - TR: Translation. */
1165 uint32_t u3Type: 3; /***< Bits 59:57 - Type: The type of hardware error. */
1166 uint32_t u4EvtCode : 4; /***< Bits 63:60 - Event code. */
1167 uint64_t u64Addr; /***< Bits 127:64 - Address: Translation or access address. */
1168 } n;
1169 /** The 32-bit unsigned integer view. */
1170 uint32_t au32[4];
1171} EVT_INVALID_DEV_REQ_T;
1172AssertCompileSize(EVT_INVALID_DEV_REQ_T, 16);
1173
1174/**
1175 * Event Log Entry: EVENT_COUNTER_ZERO.
1176 * In accordance with the AMD spec.
1177 */
1178typedef union
1179{
1180 struct
1181 {
1182 uint32_t u32Rsvd0; /**< Bits 31:0 - Reserved. */
1183 uint32_t u28Rsvd0 : 28; /**< Bits 59:32 - Reserved. */
1184 uint32_t u4EvtCode : 4; /**< Bits 63:60 - Event code. */
1185 uint32_t u20CounterNoteHi : 20; /**< Bits 83:64 - CounterNote: Counter value for the event counter register (Hi). */
1186 uint32_t u12Rsvd0 : 12; /**< Bits 95:84 - Reserved. */
1187 uint32_t u32CounterNoteLo; /**< Bits 127:96 - CounterNote: Counter value for the event cuonter register (Lo). */
1188 } n;
1189 /** The 32-bit unsigned integer view. */
1190 uint32_t au32[4];
1191} EVT_EVENT_COUNTER_ZERO_T;
1192AssertCompileSize(EVT_EVENT_COUNTER_ZERO_T, 16);
1193
1194/**
1195 * IOMMU Capability Header (PCI).
1196 * In accordance with the AMD spec.
1197 */
1198typedef union
1199{
1200 struct
1201 {
1202 uint32_t u8CapId : 8; /**< Bits 7:0 - CapId: Capability ID. */
1203 uint32_t u8CapPtr : 8; /**< Bits 15:8 - CapPtr: Pointer (PCI config offset) to the next capability. */
1204 uint32_t u3CapType : 3; /**< Bits 18:16 - CapType: Capability Type. */
1205 uint32_t u5CapRev : 5; /**< Bits 23:19 - CapRev: Capability revision. */
1206 uint32_t u1IoTlbSup : 1; /**< Bit 24 - IotlbSup: IOTLB Support. */
1207 uint32_t u1HtTunnel : 1; /**< Bit 25 - HtTunnel: HyperTransport Tunnel translation support. */
1208 uint32_t u1NpCache : 1; /**< Bit 26 - NpCache: Not Present table entries are cached. */
1209 uint32_t u1EfrSup : 1; /**< Bit 27 - EFRSup: Extended Feature Register Support. */
1210 uint32_t u1CapExt : 1; /**< Bit 28 - CapExt: Misc. Information Register 1 Support. */
1211 uint32_t u3Rsvd0 : 3; /**< Bits 31:29 - Reserved. */
1212 } n;
1213 /** The 32-bit unsigned integer view. */
1214 uint32_t u32;
1215} IOMMU_CAP_HDR_T;
1216AssertCompileSize(IOMMU_CAP_HDR_T, 4);
1217
1218/**
1219 * IOMMU Base Address (Lo and Hi) Register (PCI).
1220 * In accordance with the AMD spec.
1221 */
1222typedef union
1223{
1224 struct
1225 {
1226 uint32_t u1Enable : 1; /**< Bit 1 - Enable: RW1S - Enable IOMMU MMIO region. */
1227 uint32_t u12Rsvd0 : 12; /**< Bits 13:1 - Reserved. */
1228 uint32_t u18BaseAddrLo : 18; /**< Bits 31:14 - Base address (Lo) of the MMIO region. */
1229 uint32_t u32BaseAddrHi; /**< Bits 63:32 - Base address (Hi) of the MMIO region. */
1230 } n;
1231 /** The 32-bit unsigned integer view. */
1232 uint32_t au32[2];
1233 /** The 64-bit unsigned integer view. */
1234 uint64_t u64;
1235} IOMMU_BAR_T;
1236AssertCompileSize(IOMMU_BAR_T, 8);
1237#define IOMMU_BAR_VALID_MASK UINT64_C(0xffffffffffffc001)
1238
1239/**
1240 * IOMMU Range Register (PCI).
1241 * In accordance with the AMD spec.
1242 */
1243typedef union
1244{
1245 struct
1246 {
1247 uint32_t u5HtUnitId : 5; /**< Bits 4:0 - UnitID: IOMMU HyperTransport Unit ID (not used). */
1248 uint32_t u2Rsvd0 : 2; /**< Bits 6:5 - Reserved. */
1249 uint32_t u1RangeValid : 1; /**< Bit 7 - RngValid: Range Valid. */
1250 uint32_t u8Bus : 8; /**< Bits 15:8 - BusNumber: Bus number of the first and last device. */
1251 uint32_t u8FirstDevice : 8; /**< Bits 23:16 - FirstDevice: Device and function number of the first device. */
1252 uint32_t u8LastDevice: 8; /**< Bits 31:24 - LastDevice: Device and function number of the last device. */
1253 } n;
1254 /** The 32-bit unsigned integer view. */
1255 uint32_t u32;
1256} IOMMU_RANGE_T;
1257AssertCompileSize(IOMMU_RANGE_T, 4);
1258
1259/**
1260 * Device Table Base Address Register (MMIO).
1261 * In accordance with the AMD spec.
1262 */
1263typedef union
1264{
1265 struct
1266 {
1267 RT_GCC_EXTENSION uint64_t u9Size : 9; /**< Bits 8:0 - Size: Size of the device table. */
1268 RT_GCC_EXTENSION uint64_t u3Rsvd0 : 3; /**< Bits 11:9 - Reserved. */
1269 RT_GCC_EXTENSION uint64_t u40Base : 40; /**< Bits 51:12 - DevTabBase: Device table base address. */
1270 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bits 63:52 - Reserved. */
1271 } n;
1272 /** The 64-bit unsigned integer view. */
1273 uint64_t u64;
1274} DEV_TAB_BAR_T;
1275AssertCompileSize(DEV_TAB_BAR_T, 8);
1276#define IOMMU_DEV_TAB_BAR_VALID_MASK UINT64_C(0x000ffffffffff1ff)
1277#define IOMMU_DEV_TAB_SEG_BAR_VALID_MASK UINT64_C(0x000ffffffffff0ff)
1278
1279/**
1280 * Command Buffer Base Address Register (MMIO).
1281 * In accordance with the AMD spec.
1282 */
1283typedef union
1284{
1285 struct
1286 {
1287 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bits 11:0 - Reserved. */
1288 RT_GCC_EXTENSION uint64_t u40Base : 40; /**< Bits 51:12 - ComBase: Command buffer base address. */
1289 RT_GCC_EXTENSION uint64_t u4Rsvd0 : 4; /**< Bits 55:52 - Reserved. */
1290 RT_GCC_EXTENSION uint64_t u4Len : 4; /**< Bits 59:56 - ComLen: Command buffer length. */
1291 RT_GCC_EXTENSION uint64_t u4Rsvd1 : 4; /**< Bits 63:60 - Reserved. */
1292 } n;
1293 /** The 64-bit unsigned integer view. */
1294 uint64_t u64;
1295} CMD_BUF_BAR_T;
1296AssertCompileSize(CMD_BUF_BAR_T, 8);
1297#define IOMMU_CMD_BUF_BAR_VALID_MASK UINT64_C(0x0f0ffffffffff000)
1298
1299/**
1300 * Event Log Base Address Register (MMIO).
1301 * In accordance with the AMD spec.
1302 */
1303typedef union
1304{
1305 struct
1306 {
1307 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bits 11:0 - Reserved. */
1308 RT_GCC_EXTENSION uint64_t u40Base : 40; /**< Bits 51:12 - EventBase: Event log base address. */
1309 RT_GCC_EXTENSION uint64_t u4Rsvd0 : 4; /**< Bits 55:52 - Reserved. */
1310 RT_GCC_EXTENSION uint64_t u4Len : 4; /**< Bits 59:56 - EventLen: Event log length. */
1311 RT_GCC_EXTENSION uint64_t u4Rsvd1 : 4; /**< Bits 63:60 - Reserved. */
1312 } n;
1313 /** The 64-bit unsigned integer view. */
1314 uint64_t u64;
1315} EVT_LOG_BAR_T;
1316AssertCompileSize(EVT_LOG_BAR_T, 8);
1317#define IOMMU_EVT_LOG_BAR_VALID_MASK UINT64_C(0x0f0ffffffffff000)
1318
1319/**
1320 * IOMMU Control Register (MMIO).
1321 * In accordance with the AMD spec.
1322 */
1323typedef union
1324{
1325 struct
1326 {
1327 uint32_t u1IommuEn : 1; /**< Bit 0 - IommuEn: IOMMU Enable. */
1328 uint32_t u1HtTunEn : 1; /**< Bit 1 - HtTunEn: HyperTransport Tunnel Enable. */
1329 uint32_t u1EvtLogEn : 1; /**< Bit 2 - EventLogEn: Event Log Enable. */
1330 uint32_t u1EvtIntrEn : 1; /**< Bit 3 - EventIntEn: Event Log Interrupt Enable. */
1331 uint32_t u1CompWaitIntrEn : 1; /**< Bit 4 - ComWaitIntEn: Completion Wait Interrupt Enable. */
1332 uint32_t u3InvTimeOut : 3; /**< Bits 7:5 - InvTimeOut: Invalidation Timeout. */
1333 uint32_t u1PassPW : 1; /**< Bit 8 - PassPW: Pass Posted Write. */
1334 uint32_t u1ResPassPW : 1; /**< Bit 9 - ResPassPW: Response Pass Posted Write. */
1335 uint32_t u1Coherent : 1; /**< Bit 10 - Coherent: HT read request packet Coherent bit. */
1336 uint32_t u1Isoc : 1; /**< Bit 11 - Isoc: HT read request packet Isochronous bit. */
1337 uint32_t u1CmdBufEn : 1; /**< Bit 12 - CmdBufEn: Command Buffer Enable. */
1338 uint32_t u1PprLogEn : 1; /**< Bit 13 - PprLogEn: Peripheral Page Request (PPR) Log Enable. */
1339 uint32_t u1PprIntrEn : 1; /**< Bit 14 - PprIntrEn: Peripheral Page Request Interrupt Enable. */
1340 uint32_t u1PprEn : 1; /**< Bit 15 - PprEn: Peripheral Page Request processing Enable. */
1341 uint32_t u1GstTranslateEn : 1; /**< Bit 16 - GTEn: Guest Translate Enable. */
1342 uint32_t u1GstVirtApicEn : 1; /**< Bit 17 - GAEn: Guest Virtual-APIC Enable. */
1343 uint32_t u4Crw : 1; /**< Bits 21:18 - CRW: Intended for future use (not documented). */
1344 uint32_t u1SmiFilterEn : 1; /**< Bit 22 - SmiFEn: SMI Filter Enable. */
1345 uint32_t u1SelfWriteBackDis : 1; /**< Bit 23 - SlfWBDis: Self Write-Back Disable. */
1346 uint32_t u1SmiFilterLogEn : 1; /**< Bit 24 - SmiFLogEn: SMI Filter Log Enable. */
1347 uint32_t u3GstVirtApicModeEn : 3; /**< Bits 27:25 - GAMEn: Guest Virtual-APIC Mode Enable. */
1348 uint32_t u1GstLogEn : 1; /**< Bit 28 - GALogEn: Guest Virtual-APIC GA Log Enable. */
1349 uint32_t u1GstIntrEn : 1; /**< Bit 29 - GAIntEn: Guest Virtual-APIC Interrupt Enable. */
1350 uint32_t u2DualPprLogEn : 2; /**< Bits 31:30 - DualPprLogEn: Dual Peripheral Page Request Log Enable. */
1351 uint32_t u2DualEvtLogEn : 2; /**< Bits 33:32 - DualEventLogEn: Dual Event Log Enable. */
1352 uint32_t u3DevTabSegEn : 3; /**< Bits 36:34 - DevTblSegEn: Device Table Segment Enable. */
1353 uint32_t u2PrivAbortEn : 2; /**< Bits 38:37 - PrivAbrtEn: Privilege Abort Enable. */
1354 uint32_t u1PprAutoRespEn : 1; /**< Bit 39 - PprAutoRspEn: Peripheral Page Request Auto Response Enable. */
1355 uint32_t u1MarcEn : 1; /**< Bit 40 - MarcEn: Memory Address Routing and Control Enable. */
1356 uint32_t u1BlockStopMarkEn : 1; /**< Bit 41 - BlkStopMarkEn: Block StopMark messages Enable. */
1357 uint32_t u1PprAutoRespAlwaysOnEn : 1; /**< Bit 42 - PprAutoRspAon:: PPR Auto Response - Always On Enable. */
1358 uint32_t u1DomainIDPNE : 1; /**< Bit 43 - DomainIDPE: Reserved (not documented). */
1359 uint32_t u1Rsvd0 : 1; /**< Bit 44 - Reserved. */
1360 uint32_t u1EnhancedPpr : 1; /**< Bit 45 - EPHEn: Enhanced Peripheral Page Request Handling Enable. */
1361 uint32_t u2HstAccDirtyBitUpdate : 2; /**< Bits 47:46 - HADUpdate: Access and Dirty Bit updated in host page table. */
1362 uint32_t u1GstDirtyUpdateDis : 1; /**< Bit 48 - GDUpdateDis: Disable hardare update of Dirty bit in GPT. */
1363 uint32_t u1Rsvd1 : 1; /**< Bit 49 - Reserved. */
1364 uint32_t u1X2ApicEn : 1; /**< Bit 50 - XTEn: Enable X2APIC. */
1365 uint32_t u1X2ApicIntrGenEn : 1; /**< Bit 51 - IntCapXTEn: Enable IOMMU X2APIC Interrupt generation. */
1366 uint32_t u2Rsvd0 : 2; /**< Bits 53:52 - Reserved. */
1367 uint32_t u1GstAccessUpdateDis : 1; /**< Bit 54 - GAUpdateDis: Disable hardare update of Access bit in GPT. */
1368 uint32_t u8Rsvd0 : 8; /**< Bits 63:55 - Reserved. */
1369 } n;
1370 /** The 64-bit unsigned integer view. */
1371 uint64_t u64;
1372} IOMMU_CTRL_T;
1373AssertCompileSize(IOMMU_CTRL_T, 8);
1374#define IOMMU_CTRL_VALID_MASK UINT64_C(0x004defffffffffff)
1375
1376/**
1377 * IOMMU Exclusion Base Register (MMIO).
1378 * In accordance with the AMD spec.
1379 */
1380typedef union
1381{
1382 struct
1383 {
1384 RT_GCC_EXTENSION uint64_t u1ExclEnable : 1; /**< Bit 0 - ExEn: Exclusion Range Enable. */
1385 RT_GCC_EXTENSION uint64_t u1AllowAll : 1; /**< Bit 1 - Allow: Allow All Devices. */
1386 RT_GCC_EXTENSION uint64_t u10Rsvd0 : 10; /**< Bits 11:2 - Reserved. */
1387 RT_GCC_EXTENSION uint64_t u40ExclRangeBase : 40; /**< Bits 51:12 - Exclusion Range Base Address. */
1388 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bits 63:52 - Reserved. */
1389 } n;
1390 /** The 64-bit unsigned integer view. */
1391 uint64_t u64;
1392} IOMMU_EXCL_RANGE_BAR_T;
1393AssertCompileSize(IOMMU_EXCL_RANGE_BAR_T, 8);
1394#define IOMMU_EXCL_RANGE_BAR_VALID_MASK UINT64_C(0x000ffffffffff003)
1395
1396/**
1397 * IOMMU Exclusion Range Limit Register (MMIO).
1398 * In accordance with the AMD spec.
1399 */
1400typedef union
1401{
1402 struct
1403 {
1404 RT_GCC_EXTENSION uint64_t u52ExclLimit : 52; /**< Bits 51:0 - Exclusion Range Limit (last 12 bits are treated as 1s). */
1405 RT_GCC_EXTENSION uint64_t u12Rsvd1 : 12; /**< Bits 63:52 - Reserved. */
1406 } n;
1407 /** The 64-bit unsigned integer view. */
1408 uint64_t u64;
1409} IOMMU_EXCL_RANGE_LIMIT_T;
1410AssertCompileSize(IOMMU_EXCL_RANGE_LIMIT_T, 8);
1411#define IOMMU_EXCL_RANGE_LIMIT_VALID_MASK UINT64_C(0x000fffffffffffff)
1412
1413/**
1414 * IOMMU Extended Feature Register (MMIO).
1415 * In accordance with the AMD spec.
1416 */
1417typedef union
1418{
1419 struct
1420 {
1421 uint32_t u1PrefetchSup : 1; /**< Bit 0 - PreFSup: Prefetch Support. */
1422 uint32_t u1PprSup : 1; /**< Bit 1 - PPRSup: Peripheral Page Request Support. */
1423 uint32_t u1X2ApicSup : 1; /**< Bit 2 - XTSup: x2Apic Support. */
1424 uint32_t u1NoExecuteSup : 1; /**< Bit 3 - NXSup: No-Execute and Privilege Level Support. */
1425 uint32_t u1GstTranslateSup : 1; /**< Bit 4 - GTSup: Guest Translations (for GVAs) Support. */
1426 uint32_t u1Rsvd0 : 1; /**< Bit 5 - Reserved. */
1427 uint32_t u1InvAllSup : 1; /**< Bit 6 - IASup: Invalidate-All Support. */
1428 uint32_t u1GstVirtApicSup : 1; /**< Bit 7 - GASup: Guest Virtual-APIC Support. */
1429 uint32_t u1HwErrorSup : 1; /**< Bit 8 - HESup: Hardware Error registers Support. */
1430 uint32_t u1PerfCounterSup : 1; /**< Bit 8 - PCSup: Performance Counter Support. */
1431 uint32_t u2HostAddrTranslateSize : 2; /**< Bits 11:10 - HATS: Host Address Translation Size. */
1432 uint32_t u2GstAddrTranslateSize : 2; /**< Bits 13:12 - GATS: Guest Address Translation Size. */
1433 uint32_t u2GstCr3RootTblLevel : 2; /**< Bits 15:14 - GLXSup: Guest CR3 Root Table Level (Max) Size Support. */
1434 uint32_t u2SmiFilterSup : 2; /**< Bits 17:16 - SmiFSup: SMI Filter Register Support. */
1435 uint32_t u3SmiFilterCount : 3; /**< Bits 20:18 - SmiFRC: SMI Filter Register Count. */
1436 uint32_t u3GstVirtApicModeSup : 3; /**< Bits 23:21 - GAMSup: Guest Virtual-APIC Modes Supported. */
1437 uint32_t u2DualPprLogSup : 2; /**< Bits 25:24 - DualPprLogSup: Dual Peripheral Page Request Log Support. */
1438 uint32_t u2Rsvd0 : 2; /**< Bits 27:26 - Reserved. */
1439 uint32_t u2DualEvtLogSup : 2; /**< Bits 29:28 - DualEventLogSup: Dual Event Log Support. */
1440 uint32_t u2Rsvd1 : 2; /**< Bits 31:30 - Reserved. */
1441 uint32_t u5MaxPasidSup : 5; /**< Bits 36:32 - PASMax: Maximum PASID Supported. */
1442 uint32_t u1UserSupervisorSup : 1; /**< Bit 37 - USSup: User/Supervisor Page Protection Support. */
1443 uint32_t u2DevTabSegSup : 2; /**< Bits 39:38 - DevTlbSegSup: Segmented Device Table Support. */
1444 uint32_t u1PprLogOverflowWarn : 1; /**< Bit 40 - PprOvrflwEarlySup: PPR Log Overflow Early Warning Support. */
1445 uint32_t u1PprAutoRespSup : 1; /**< Bit 41 - PprAutoRspSup: PPR Automatic Response Support. */
1446 uint32_t u2MarcSup : 2; /**< Bit 43:42 - MarcSup: Memory Access Routing and Control Support. */
1447 uint32_t u1BlockStopMarkSup : 1; /**< Bit 44 - BlkStopMarkSup: Block StopMark messages Support. */
1448 uint32_t u1PerfOptSup : 1; /**< Bit 45 - PerfOptSup: IOMMU Performance Optimization Support. */
1449 uint32_t u1MsiCapMmioSup : 1; /**< Bit 46 - MsiCapMmioSup: MSI Capability Register MMIO Access Support. */
1450 uint32_t u1Rsvd1 : 1; /**< Bit 47 - Reserved. */
1451 uint32_t u1GstIoSup : 1; /**< Bit 48 - GIoSup: Guest I/O Protection Support. */
1452 uint32_t u1HostAccessSup : 1; /**< Bit 49 - HASup: Host Access Support. */
1453 uint32_t u1EnhancedPprSup : 1; /**< Bit 50 - EPHSup: Enhanced Peripheral Page Request Handling Support. */
1454 uint32_t u1AttrForwardSup : 1; /**< Bit 51 - AttrFWSup: Attribute Forward Support. */
1455 uint32_t u1HostDirtySup : 1; /**< Bit 52 - HDSup: Host Dirty Support. */
1456 uint32_t u1Rsvd2 : 1; /**< Bit 53 - Reserved. */
1457 uint32_t u1InvIoTlbTypeSup : 1; /**< Bit 54 - InvIotlbTypeSup: Invalidate IOTLB Type Support. */
1458 uint32_t u6Rsvd0 : 6; /**< Bit 60:55 - Reserved. */
1459 uint32_t u1GstUpdateDisSup : 1; /**< Bit 61 - GAUpdateDisSup: Disable hardware update on GPT Support. */
1460 uint32_t u1ForcePhysDstSup : 1; /**< Bit 62 - ForcePhyDestSup: Force Phys. Dst. Mode for Remapped Intr. */
1461 uint32_t u1Rsvd3 : 1; /**< Bit 63 - Reserved. */
1462 } n;
1463 /** The 64-bit unsigned integer view. */
1464 uint64_t u64;
1465} IOMMU_EXT_FEAT_T;
1466AssertCompileSize(IOMMU_EXT_FEAT_T, 8);
1467
1468/**
1469 * Peripheral Page Request Log Base Address Register (MMIO).
1470 * In accordance with the AMD spec.
1471 */
1472typedef union
1473{
1474 struct
1475 {
1476 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bit 11:0 - Reserved. */
1477 RT_GCC_EXTENSION uint64_t u40Base : 40; /**< Bits 51:12 - PPRLogBase: Peripheral Page Request Log Base Address. */
1478 RT_GCC_EXTENSION uint64_t u4Rsvd0 : 4; /**< Bits 55:52 - Reserved. */
1479 RT_GCC_EXTENSION uint64_t u4Len : 4; /**< Bits 59:56 - PPRLogLen: Peripheral Page Request Log Length. */
1480 RT_GCC_EXTENSION uint64_t u4Rsvd1 : 4; /**< Bits 63:60 - Reserved. */
1481 } n;
1482 /** The 64-bit unsigned integer view. */
1483 uint64_t u64;
1484} PPR_LOG_BAR_T;
1485AssertCompileSize(PPR_LOG_BAR_T, 8);
1486#define IOMMU_PPR_LOG_BAR_VALID_MASK UINT64_C(0x0f0ffffffffff000)
1487
1488/**
1489 * IOMMU Hardware Event Upper Register (MMIO).
1490 * In accordance with the AMD spec.
1491 */
1492typedef union
1493{
1494 struct
1495 {
1496 RT_GCC_EXTENSION uint64_t u60FirstOperand : 60; /**< Bits 59:0 - First event code dependent operand. */
1497 RT_GCC_EXTENSION uint64_t u4EvtCode : 4; /**< Bits 63:60 - Event Code. */
1498 } n;
1499 /** The 64-bit unsigned integer view. */
1500 uint64_t u64;
1501} IOMMU_HW_EVT_HI_T;
1502AssertCompileSize(IOMMU_HW_EVT_HI_T, 8);
1503
1504/**
1505 * IOMMU Hardware Event Lower Register (MMIO).
1506 * In accordance with the AMD spec.
1507 */
1508typedef uint64_t IOMMU_HW_EVT_LO_T;
1509
1510/**
1511 * IOMMU Hardware Event Status (MMIO).
1512 * In accordance with the AMD spec.
1513 */
1514typedef union
1515{
1516 struct
1517 {
1518 uint32_t u1Valid : 1; /**< Bit 0 - HEV: Hardware Event Valid. */
1519 uint32_t u1Overflow : 1; /**< Bit 1 - HEO: Hardware Event Overflow. */
1520 uint32_t u30Rsvd0 : 30; /**< Bits 31:2 - Reserved. */
1521 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved. */
1522 } n;
1523 /** The 64-bit unsigned integer view. */
1524 uint64_t u64;
1525} IOMMU_HW_EVT_STATUS_T;
1526AssertCompileSize(IOMMU_HW_EVT_STATUS_T, 8);
1527#define IOMMU_HW_EVT_STATUS_VALID_MASK UINT64_C(0x0000000000000003)
1528
1529/**
1530 * Guest Virtual-APIC Log Base Address Register (MMIO).
1531 * In accordance with the AMD spec.
1532 */
1533typedef union
1534{
1535 struct
1536 {
1537 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bit 11:0 - Reserved. */
1538 RT_GCC_EXTENSION uint64_t u40Base : 40; /**< Bits 51:12 - GALogBase: Guest Virtual-APIC Log Base Address. */
1539 RT_GCC_EXTENSION uint64_t u4Rsvd0 : 4; /**< Bits 55:52 - Reserved. */
1540 RT_GCC_EXTENSION uint64_t u4Len : 4; /**< Bits 59:56 - GALogLen: Guest Virtual-APIC Log Length. */
1541 RT_GCC_EXTENSION uint64_t u4Rsvd1 : 4; /**< Bits 63:60 - Reserved. */
1542 } n;
1543 /** The 64-bit unsigned integer view. */
1544 uint64_t u64;
1545} GALOG_BAR_T;
1546AssertCompileSize(GALOG_BAR_T, 8);
1547
1548/**
1549 * Guest Virtual-APIC Log Tail Address Register (MMIO).
1550 * In accordance with the AMD spec.
1551 */
1552typedef union
1553{
1554 struct
1555 {
1556 RT_GCC_EXTENSION uint64_t u3Rsvd0 : 3; /**< Bits 2:0 - Reserved. */
1557 RT_GCC_EXTENSION uint64_t u40GALogTailAddr : 48; /**< Bits 51:3 - GATAddr: Guest Virtual-APIC Tail Log Address. */
1558 RT_GCC_EXTENSION uint64_t u11Rsvd1 : 11; /**< Bits 63:52 - Reserved. */
1559 } n;
1560 /** The 64-bit unsigned integer view. */
1561 uint64_t u64;
1562} GALOG_TAIL_ADDR_T;
1563AssertCompileSize(GALOG_TAIL_ADDR_T, 8);
1564
1565/**
1566 * PPR Log B Base Address Register (MMIO).
1567 * In accordance with the AMD spec.
1568 * Currently identical to PPR_LOG_BAR_T.
1569 */
1570typedef PPR_LOG_BAR_T PPR_LOG_B_BAR_T;
1571
1572/**
1573 * Event Log B Base Address Register (MMIO).
1574 * In accordance with the AMD spec.
1575 * Currently identical to EVT_LOG_BAR_T.
1576 */
1577typedef EVT_LOG_BAR_T EVT_LOG_B_BAR_T;
1578
1579/**
1580 * Device-specific Feature Extension (DSFX) Register (MMIO).
1581 * In accordance with the AMD spec.
1582 */
1583typedef union
1584{
1585 struct
1586 {
1587 uint32_t u24DevSpecFeat : 24; /**< Bits 23:0 - DevSpecificFeatSupp: Implementation specific features. */
1588 uint32_t u4RevMinor : 4; /**< Bits 27:24 - RevMinor: Minor revision identifier. */
1589 uint32_t u4RevMajor : 4; /**< Bits 31:28 - RevMajor: Major revision identifier. */
1590 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved.*/
1591 } n;
1592 /** The 64-bit unsigned integer view. */
1593 uint64_t u64;
1594} DEV_SPECIFIC_FEAT_T;
1595AssertCompileSize(DEV_SPECIFIC_FEAT_T, 8);
1596
1597/**
1598 * Device-specific Control Extension (DSCX) Register (MMIO).
1599 * In accordance with the AMD spec.
1600 */
1601typedef union
1602{
1603 struct
1604 {
1605 uint32_t u24DevSpecCtrl : 24; /**< Bits 23:0 - DevSpecificFeatCntrl: Implementation specific control. */
1606 uint32_t u4RevMinor : 4; /**< Bits 27:24 - RevMinor: Minor revision identifier. */
1607 uint32_t u4RevMajor : 4; /**< Bits 31:28 - RevMajor: Major revision identifier. */
1608 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved.*/
1609 } n;
1610 /** The 64-bit unsigned integer view. */
1611 uint64_t u64;
1612} DEV_SPECIFIC_CTRL_T;
1613AssertCompileSize(DEV_SPECIFIC_CTRL_T, 8);
1614
1615/**
1616 * Device-specific Status Extension (DSSX) Register (MMIO).
1617 * In accordance with the AMD spec.
1618 */
1619typedef union
1620{
1621 struct
1622 {
1623 uint32_t u24DevSpecStatus : 24; /**< Bits 23:0 - DevSpecificFeatStatus: Implementation specific status. */
1624 uint32_t u4RevMinor : 4; /**< Bits 27:24 - RevMinor: Minor revision identifier. */
1625 uint32_t u4RevMajor : 4; /**< Bits 31:28 - RevMajor: Major revision identifier. */
1626 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved.*/
1627 } n;
1628 /** The 64-bit unsigned integer view. */
1629 uint64_t u64;
1630} DEV_SPECIFIC_STATUS_T;
1631AssertCompileSize(DEV_SPECIFIC_STATUS_T, 8);
1632
1633/**
1634 * MSI Information Register 0 and 1 (PCI) / MSI Vector Register 0 and 1 (MMIO).
1635 * In accordance with the AMD spec.
1636 */
1637typedef union
1638{
1639 struct
1640 {
1641 uint32_t u5MsiNumEvtLog : 5; /**< Bits 4:0 - MsiNum: Event Log MSI message number. */
1642 uint32_t u3GstVirtAddrSize: 3; /**< Bits 7:5 - GVAsize: Guest Virtual Address Size. */
1643 uint32_t u7PhysAddrSize : 7; /**< Bits 14:8 - PAsize: Physical Address Size. */
1644 uint32_t u7VirtAddrSize : 7; /**< Bits 21:15 - VAsize: Virtual Address Size. */
1645 uint32_t u1HtAtsResv: 1; /**< Bit 22 - HtAtsResv: HyperTransport ATS Response Address range Reserved. */
1646 uint32_t u4Rsvd0 : 4; /**< Bits 26:23 - Reserved. */
1647 uint32_t u5MsiNumPpr : 5; /**< Bits 31:27 - MsiNumPPR: Peripheral Page Request MSI message number. */
1648 uint32_t u5MsiNumGa : 5; /**< Bits 36:32 - MsiNumGa: MSI message number for guest virtual-APIC log. */
1649 uint32_t u27Rsvd0: 27; /**< Bits 63:37 - Reserved. */
1650 } n;
1651 /** The 32-bit unsigned integer view. */
1652 uint32_t au32[2];
1653 /** The 64-bit unsigned integer view. */
1654 uint64_t u64;
1655} MSI_MISC_INFO_T;
1656AssertCompileSize(MSI_MISC_INFO_T, 8);
1657/** MSI Vector Register 0 and 1 (MMIO). */
1658typedef MSI_MISC_INFO_T MSI_VECTOR_T;
1659
1660/**
1661 * MSI Capability Header Register (PCI + MMIO).
1662 * In accordance with the AMD spec.
1663 */
1664typedef union
1665{
1666 struct
1667 {
1668 uint32_t u8MsiCapId : 8; /**< Bits 7:0 - MsiCapId: Capability ID. */
1669 uint32_t u8MsiCapPtr : 8; /**< Bits 15:8 - MsiCapPtr: Pointer (PCI config offset) to the next capability. */
1670 uint32_t u1MsiEnable : 1; /**< Bit 16 - MsiEn: Message Signal Interrupt Enable. */
1671 uint32_t u3MsiMultiMessCap : 3; /**< Bits 19:17 - MsiMultMessCap: MSI Multi-Message Capability. */
1672 uint32_t u3MsiMultiMessEn : 3; /**< Bits 22:20 - MsiMultMessEn: MSI Multi-Message Enable. */
1673 uint32_t u1Msi64BitEn : 1; /**< Bit 23 - Msi64BitEn: MSI 64-bit Enable. */
1674 uint32_t u8Rsvd0 : 8; /**< Bits 31:24 - Reserved. */
1675 } n;
1676 /** The 32-bit unsigned integer view. */
1677 uint32_t u32;
1678} MSI_CAP_HDR_T;
1679AssertCompileSize(MSI_CAP_HDR_T, 4);
1680#define IOMMU_MSI_CAP_HDR_MSI_EN_MASK RT_BIT(16)
1681
1682/**
1683 * MSI Address Register (PCI + MMIO).
1684 * In accordance with the AMD spec.
1685 */
1686typedef union
1687{
1688 struct
1689 {
1690 RT_GCC_EXTENSION uint64_t u2Rsvd : 2; /**< Bits 1:0 - Reserved. */
1691 RT_GCC_EXTENSION uint64_t u62MsiAddr : 62; /**< Bits 31:2 - MsiAddr: MSI Address. */
1692 } n;
1693 /** The 32-bit unsigned integer view. */
1694 uint32_t au32[2];
1695 /** The 64-bit unsigned integer view. */
1696 uint64_t u64;
1697} MSI_ADDR_T;
1698AssertCompileSize(MSI_ADDR_T, 8);
1699#define IOMMU_MSI_ADDR_VALID_MASK UINT64_C(0xfffffffffffffffc)
1700
1701/**
1702 * MSI Data Register (PCI + MMIO).
1703 * In accordance with the AMD spec.
1704 */
1705typedef union
1706{
1707 struct
1708 {
1709 uint16_t u16MsiData; /**< Bits 15:0 - MsiData: MSI Data. */
1710 uint16_t u16Rsvd0; /**< Bits 31:16 - Reserved. */
1711 } n;
1712 /** The 32-bit unsigned integer view. */
1713 uint32_t u32;
1714} MSI_DATA_T;
1715AssertCompileSize(MSI_DATA_T, 4);
1716#define IOMMU_MSI_DATA_VALID_MASK UINT64_C(0x000000000000ffff)
1717
1718/**
1719 * MSI Mapping Capability Header Register (PCI + MMIO).
1720 * In accordance with the AMD spec.
1721 */
1722typedef union
1723{
1724 struct
1725 {
1726 uint32_t u8MsiMapCapId : 8; /**< Bits 7:0 - MsiMapCapId: MSI Map capability ID. */
1727 uint32_t u8Rsvd0 : 8; /**< Bits 15:8 - Reserved. */
1728 uint32_t u1MsiMapEn : 1; /**< Bit 16 - MsiMapEn: MSI Map enable. */
1729 uint32_t u1MsiMapFixed : 1; /**< Bit 17 - MsiMapFixd: MSI Map fixed. */
1730 uint32_t u9Rsvd0 : 9; /**< Bits 26:18 - Reserved. */
1731 uint32_t u5MapCapType : 5; /**< Bits 31:27 - MsiMapCapType: MSI Mapping capability type. */
1732 } n;
1733 /** The 32-bit unsigned integer view. */
1734 uint32_t u32;
1735} MSI_MAP_CAP_HDR_T;
1736AssertCompileSize(MSI_MAP_CAP_HDR_T, 4);
1737
1738/**
1739 * Performance Optimization Control Register (MMIO).
1740 * In accordance with the AMD spec.
1741 */
1742typedef union
1743{
1744 struct
1745 {
1746 uint32_t u13Rsvd0 : 13; /**< Bits 12:0 - Reserved. */
1747 uint32_t u1PerfOptEn : 1; /**< Bit 13 - PerfOptEn: Performance Optimization Enable. */
1748 uint32_t u17Rsvd0 : 18; /**< Bits 31:14 - Reserved. */
1749 } n;
1750 /** The 32-bit unsigned integer view. */
1751 uint32_t u32;
1752} IOMMU_PERF_OPT_CTRL_T;
1753AssertCompileSize(IOMMU_PERF_OPT_CTRL_T, 4);
1754
1755/**
1756 * XT (x2APIC) IOMMU General Interrupt Control Register (MMIO).
1757 * In accordance with the AMD spec.
1758 */
1759typedef union
1760{
1761 struct
1762 {
1763 uint32_t u2Rsvd0 : 2; /**< Bits 1:0 - Reserved.*/
1764 uint32_t u1X2ApicIntrDstMode : 1; /**< Bit 2 - Destination Mode for general interrupt.*/
1765 uint32_t u4Rsvd0 : 4; /**< Bits 7:3 - Reserved.*/
1766 uint32_t u24X2ApicIntrDstLo : 24; /**< Bits 31:8 - Destination for general interrupt (Lo).*/
1767 uint32_t u8X2ApicIntrVector : 8; /**< Bits 39:32 - Vector for general interrupt.*/
1768 uint32_t u1X2ApicIntrDeliveryMode : 1; /**< Bit 40 - Delivery Mode for general interrupt.*/
1769 uint32_t u15Rsvd0 : 15; /**< Bits 55:41 - Reserved.*/
1770 uint32_t u7X2ApicIntrDstHi : 7; /**< Bits 63:56 - Destination for general interrupt (Hi) .*/
1771 } n;
1772 /** The 64-bit unsigned integer view. */
1773 uint64_t u64;
1774} IOMMU_XT_GEN_INTR_CTRL_T;
1775AssertCompileSize(IOMMU_XT_GEN_INTR_CTRL_T, 8);
1776
1777/**
1778 * XT (x2APIC) IOMMU General Interrupt Control Register (MMIO).
1779 * In accordance with the AMD spec.
1780 */
1781typedef union
1782{
1783 struct
1784 {
1785 uint32_t u2Rsvd0 : 2; /**< Bits 1:0 - Reserved.*/
1786 uint32_t u1X2ApicIntrDstMode : 1; /**< Bit 2 - Destination Mode for the interrupt.*/
1787 uint32_t u4Rsvd0 : 4; /**< Bits 7:3 - Reserved.*/
1788 uint32_t u24X2ApicIntrDstLo : 24; /**< Bits 31:8 - Destination for the interrupt (Lo).*/
1789 uint32_t u8X2ApicIntrVector : 8; /**< Bits 39:32 - Vector for the interrupt.*/
1790 uint32_t u1X2ApicIntrDeliveryMode : 1; /**< Bit 40 - Delivery Mode for the interrupt.*/
1791 uint32_t u15Rsvd0 : 15; /**< Bits 55:41 - Reserved.*/
1792 uint32_t u7X2ApicIntrDstHi : 7; /**< Bits 63:56 - Destination for the interrupt (Hi) .*/
1793 } n;
1794 /** The 64-bit unsigned integer view. */
1795 uint64_t u64;
1796} IOMMU_XT_INTR_CTRL_T;
1797AssertCompileSize(IOMMU_XT_INTR_CTRL_T, 8);
1798
1799/**
1800 * XT (x2APIC) IOMMU PPR Interrupt Control Register (MMIO).
1801 * In accordance with the AMD spec.
1802 * Currently identical to IOMMU_XT_INTR_CTRL_T.
1803 */
1804typedef IOMMU_XT_INTR_CTRL_T IOMMU_XT_PPR_INTR_CTRL_T;
1805
1806/**
1807 * XT (x2APIC) IOMMU GA (Guest Address) Log Control Register (MMIO).
1808 * In accordance with the AMD spec.
1809 * Currently identical to IOMMU_XT_INTR_CTRL_T.
1810 */
1811typedef IOMMU_XT_INTR_CTRL_T IOMMU_XT_GALOG_INTR_CTRL_T;
1812
1813/**
1814 * Memory Access and Routing Control (MARC) Aperture Base Register (MMIO).
1815 * In accordance with the AMD spec.
1816 */
1817typedef union
1818{
1819 struct
1820 {
1821 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bits 11:0 - Reserved. */
1822 RT_GCC_EXTENSION uint64_t u40MarcBaseAddr : 40; /**< Bits 51:12 - MarcBaseAddr: MARC Aperture Base Address. */
1823 RT_GCC_EXTENSION uint64_t u12Rsvd1 : 12; /**< Bits 63:52 - Reserved. */
1824 } n;
1825 /** The 64-bit unsigned integer view. */
1826 uint64_t u64;
1827} MARC_APER_BAR_T;
1828AssertCompileSize(MARC_APER_BAR_T, 8);
1829
1830/**
1831 * Memory Access and Routing Control (MARC) Relocation Register (MMIO).
1832 * In accordance with the AMD spec.
1833 */
1834typedef union
1835{
1836 struct
1837 {
1838 RT_GCC_EXTENSION uint64_t u1RelocEn : 1; /**< Bit 0 - RelocEn: Relocation Enabled. */
1839 RT_GCC_EXTENSION uint64_t u1ReadOnly : 1; /**< Bit 1 - ReadOnly: Whether only read-only acceses allowed. */
1840 RT_GCC_EXTENSION uint64_t u10Rsvd0 : 10; /**< Bits 11:2 - Reserved. */
1841 RT_GCC_EXTENSION uint64_t u40MarcRelocAddr : 40; /**< Bits 51:12 - MarcRelocAddr: MARC Aperture Relocation Address. */
1842 RT_GCC_EXTENSION uint64_t u12Rsvd1 : 12; /**< Bits 63:52 - Reserved. */
1843 } n;
1844 /** The 64-bit unsigned integer view. */
1845 uint64_t u64;
1846} MARC_APER_RELOC_T;
1847AssertCompileSize(MARC_APER_RELOC_T, 8);
1848
1849/**
1850 * Memory Access and Routing Control (MARC) Length Register (MMIO).
1851 * In accordance with the AMD spec.
1852 */
1853typedef union
1854{
1855 struct
1856 {
1857 RT_GCC_EXTENSION uint64_t u12Rsvd0 : 12; /**< Bits 11:0 - Reserved. */
1858 RT_GCC_EXTENSION uint64_t u40MarcLength : 40; /**< Bits 51:12 - MarcLength: MARC Aperture Length. */
1859 RT_GCC_EXTENSION uint64_t u12Rsvd1 : 12; /**< Bits 63:52 - Reserved. */
1860 } n;
1861 /** The 64-bit unsigned integer view. */
1862 uint64_t u64;
1863} MARC_APER_LEN_T;
1864
1865/**
1866 * Memory Access and Routing Control (MARC) Aperture Register.
1867 * This combines other registers to match the MMIO layout for convenient access.
1868 */
1869typedef struct
1870{
1871 MARC_APER_BAR_T Base;
1872 MARC_APER_RELOC_T Reloc;
1873 MARC_APER_LEN_T Length;
1874} MARC_APER_T;
1875AssertCompileSize(MARC_APER_T, 24);
1876
1877/**
1878 * IOMMU Reserved Register (MMIO).
1879 * In accordance with the AMD spec.
1880 * This register is reserved for hardware use (although RW?).
1881 */
1882typedef uint64_t IOMMU_RSVD_REG_T;
1883
1884/**
1885 * Command Buffer Head Pointer Register (MMIO).
1886 * In accordance with the AMD spec.
1887 */
1888typedef union
1889{
1890 struct
1891 {
1892 uint32_t off; /**< Bits 31:0 - Buffer pointer (offset; 16 byte aligned, 512 KB max). */
1893 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved. */
1894 } n;
1895 /** The 32-bit unsigned integer view. */
1896 uint32_t au32[2];
1897 /** The 64-bit unsigned integer view. */
1898 uint64_t u64;
1899} CMD_BUF_HEAD_PTR_T;
1900AssertCompileSize(CMD_BUF_HEAD_PTR_T, 8);
1901#define IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK UINT64_C(0x000000000007fff0)
1902
1903/**
1904 * Command Buffer Tail Pointer Register (MMIO).
1905 * In accordance with the AMD spec.
1906 * Currently identical to CMD_BUF_HEAD_PTR_T.
1907 */
1908typedef CMD_BUF_HEAD_PTR_T CMD_BUF_TAIL_PTR_T;
1909#define IOMMU_CMD_BUF_TAIL_PTR_VALID_MASK IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK
1910
1911/**
1912 * Event Log Head Pointer Register (MMIO).
1913 * In accordance with the AMD spec.
1914 * Currently identical to CMD_BUF_HEAD_PTR_T.
1915 */
1916typedef CMD_BUF_HEAD_PTR_T EVT_LOG_HEAD_PTR_T;
1917#define IOMMU_EVT_LOG_HEAD_PTR_VALID_MASK IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK
1918
1919/**
1920 * Event Log Tail Pointer Register (MMIO).
1921 * In accordance with the AMD spec.
1922 * Currently identical to CMD_BUF_HEAD_PTR_T.
1923 */
1924typedef CMD_BUF_HEAD_PTR_T EVT_LOG_TAIL_PTR_T;
1925#define IOMMU_EVT_LOG_TAIL_PTR_VALID_MASK IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK
1926
1927
1928/**
1929 * IOMMU Status Register (MMIO).
1930 * In accordance with the AMD spec.
1931 */
1932typedef union
1933{
1934 struct
1935 {
1936 uint32_t u1EvtOverflow : 1; /**< Bit 0 - EventOverflow: Event log overflow. */
1937 uint32_t u1EvtLogIntr : 1; /**< Bit 1 - EventLogInt: Event log interrupt. */
1938 uint32_t u1CompWaitIntr : 1; /**< Bit 2 - ComWaitInt: Completion wait interrupt . */
1939 uint32_t u1EvtLogRunning : 1; /**< Bit 3 - EventLogRun: Event logging is running. */
1940 uint32_t u1CmdBufRunning : 1; /**< Bit 4 - CmdBufRun: Command buffer is running. */
1941 uint32_t u1PprOverflow : 1; /**< Bit 5 - PprOverflow: Peripheral Page Request Log (PPR) overflow. */
1942 uint32_t u1PprIntr : 1; /**< Bit 6 - PprInt: PPR interrupt. */
1943 uint32_t u1PprLogRunning : 1; /**< Bit 7 - PprLogRun: PPR logging is running. */
1944 uint32_t u1GstLogRunning : 1; /**< Bit 8 - GALogRun: Guest virtual-APIC logging is running. */
1945 uint32_t u1GstLogOverflow : 1; /**< Bit 9 - GALOverflow: Guest virtual-APIC log overflow. */
1946 uint32_t u1GstLogIntr : 1; /**< Bit 10 - GAInt: Guest virtual-APIC log interrupt. */
1947 uint32_t u1PprOverflowB : 1; /**< Bit 11 - PprOverflowB: PPR log B overflow. */
1948 uint32_t u1PprLogActive : 1; /**< Bit 12 - PprLogActive: PPR log A is active. */
1949 uint32_t u2Rsvd0 : 2; /**< Bits 14:13 - Reserved. */
1950 uint32_t u1EvtOverflowB : 1; /**< Bit 15 - EvtOverflowB: Event log B overflow. */
1951 uint32_t u1EvtLogActive : 1; /**< Bit 16 - EvtLogActive: Event log A active. */
1952 uint32_t u1PprOverflowEarlyB : 1; /**< Bit 17 - PprOverflowEarlyB: PPR log B overflow early warning. */
1953 uint32_t u1PprOverflowEarly : 1; /**< Bit 18 - PprOverflowEarly: PPR log overflow early warning. */
1954 uint32_t u13Rsvd0 : 13; /**< Bits 31:19 - Reserved. */
1955 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved . */
1956 } n;
1957 /** The 32-bit unsigned integer view. */
1958 uint32_t au32[2];
1959 /** The 64-bit unsigned integer view. */
1960 uint64_t u64;
1961} IOMMU_STATUS_T;
1962AssertCompileSize(IOMMU_STATUS_T, 8);
1963#define IOMMU_STATUS_VALID_MASK UINT64_C(0x0000000000079fff)
1964#define IOMMU_STATUS_RW1C_MASK UINT64_C(0x0000000000068e67)
1965
1966/**
1967 * PPR Log Head Pointer Register (MMIO).
1968 * In accordance with the AMD spec.
1969 * Currently identical to CMD_BUF_HEAD_PTR_T.
1970 */
1971typedef CMD_BUF_HEAD_PTR_T PPR_LOG_HEAD_PTR_T;
1972
1973/**
1974 * PPR Log Tail Pointer Register (MMIO).
1975 * In accordance with the AMD spec.
1976 * Currently identical to CMD_BUF_HEAD_PTR_T.
1977 */
1978typedef CMD_BUF_HEAD_PTR_T PPR_LOG_TAIL_PTR_T;
1979
1980/**
1981 * Guest Virtual-APIC Log Head Pointer Register (MMIO).
1982 * In accordance with the AMD spec.
1983 */
1984typedef union
1985{
1986 struct
1987 {
1988 uint32_t u2Rsvd0 : 2; /**< Bits 2:0 - Reserved. */
1989 uint32_t u12GALogPtr : 12; /**< Bits 15:3 - Guest Virtual-APIC Log Head or Tail Pointer. */
1990 uint32_t u16Rsvd0 : 16; /**< Bits 31:16 - Reserved. */
1991 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved. */
1992 } n;
1993 /** The 32-bit unsigned integer view. */
1994 uint32_t au32[2];
1995 /** The 64-bit unsigned integer view. */
1996 uint64_t u64;
1997} GALOG_HEAD_PTR_T;
1998AssertCompileSize(GALOG_HEAD_PTR_T, 8);
1999
2000/**
2001 * Guest Virtual-APIC Log Tail Pointer Register (MMIO).
2002 * In accordance with the AMD spec.
2003 * Currently identical to GALOG_HEAD_PTR_T.
2004 */
2005typedef GALOG_HEAD_PTR_T GALOG_TAIL_PTR_T;
2006
2007/**
2008 * PPR Log B Head Pointer Register (MMIO).
2009 * In accordance with the AMD spec.
2010 * Currently identical to CMD_BUF_HEAD_PTR_T.
2011 */
2012typedef CMD_BUF_HEAD_PTR_T PPR_LOG_B_HEAD_PTR_T;
2013
2014/**
2015 * PPR Log B Tail Pointer Register (MMIO).
2016 * In accordance with the AMD spec.
2017 * Currently identical to CMD_BUF_HEAD_PTR_T.
2018 */
2019typedef CMD_BUF_HEAD_PTR_T PPR_LOG_B_TAIL_PTR_T;
2020
2021/**
2022 * Event Log B Head Pointer Register (MMIO).
2023 * In accordance with the AMD spec.
2024 * Currently identical to CMD_BUF_HEAD_PTR_T.
2025 */
2026typedef CMD_BUF_HEAD_PTR_T EVT_LOG_B_HEAD_PTR_T;
2027
2028/**
2029 * Event Log B Tail Pointer Register (MMIO).
2030 * In accordance with the AMD spec.
2031 * Currently identical to CMD_BUF_HEAD_PTR_T.
2032 */
2033typedef CMD_BUF_HEAD_PTR_T EVT_LOG_B_TAIL_PTR_T;
2034
2035/**
2036 * PPR Log Auto Response Register (MMIO).
2037 * In accordance with the AMD spec.
2038 */
2039typedef union
2040{
2041 struct
2042 {
2043 uint32_t u4AutoRespCode : 4; /**< Bits 3:0 - PprAutoRespCode: PPR log Auto Response Code. */
2044 uint32_t u1AutoRespMaskGen : 1; /**< Bit 4 - PprAutoRespMaskGn: PPR log Auto Response Mask Gen. */
2045 uint32_t u27Rsvd0 : 27; /**< Bits 31:5 - Reserved. */
2046 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved.*/
2047 } n;
2048 /** The 32-bit unsigned integer view. */
2049 uint32_t au32[2];
2050 /** The 64-bit unsigned integer view. */
2051 uint64_t u64;
2052} PPR_LOG_AUTO_RESP_T;
2053AssertCompileSize(PPR_LOG_AUTO_RESP_T, 8);
2054
2055/**
2056 * PPR Log Overflow Early Indicator Register (MMIO).
2057 * In accordance with the AMD spec.
2058 */
2059typedef union
2060{
2061 struct
2062 {
2063 uint32_t u15Threshold : 15; /**< Bits 14:0 - PprOvrflwEarlyThreshold: Overflow early indicator threshold. */
2064 uint32_t u15Rsvd0 : 15; /**< Bits 29:15 - Reserved. */
2065 uint32_t u1IntrEn : 1; /**< Bit 30 - PprOvrflwEarlyIntEn: Overflow early indicator interrupt enable. */
2066 uint32_t u1Enable : 1; /**< Bit 31 - PprOvrflwEarlyEn: Overflow early indicator enable. */
2067 uint32_t u32Rsvd0; /**< Bits 63:32 - Reserved. */
2068 } n;
2069 /** The 32-bit unsigned integer view. */
2070 uint32_t au32[2];
2071 /** The 64-bit unsigned integer view. */
2072 uint64_t u64;
2073} PPR_LOG_OVERFLOW_EARLY_T;
2074AssertCompileSize(PPR_LOG_OVERFLOW_EARLY_T, 8);
2075
2076/**
2077 * PPR Log B Overflow Early Indicator Register (MMIO).
2078 * In accordance with the AMD spec.
2079 * Currently identical to PPR_LOG_OVERFLOW_EARLY_T.
2080 */
2081typedef PPR_LOG_OVERFLOW_EARLY_T PPR_LOG_B_OVERFLOW_EARLY_T;
2082
2083/**
2084 * ILLEGAL_DEV_TABLE_ENTRY Event Types.
2085 * In accordance with the AMD spec.
2086 */
2087typedef enum EVT_ILLEGAL_DTE_TYPE_T
2088{
2089 kIllegalDteType_RsvdNotZero = 0,
2090 kIllegalDteType_RsvdIntTab,
2091 kIllegalDteType_RsvdIoCtl,
2092 kIllegalDteType_RsvdIntCtl
2093} EVT_ILLEGAL_DTE_TYPE_T;
2094
2095/**
2096 * ILLEGAL_DEV_TABLE_ENTRY Event Types.
2097 * In accordance with the AMD spec.
2098 */
2099typedef enum EVT_IO_PAGE_FAULT_TYPE_T
2100{
2101 /* Memory transaction. */
2102 kIoPageFaultType_DteRsvdPagingMode = 0,
2103 kIoPageFaultType_PteInvalidPageSize,
2104 kIoPageFaultType_PteInvalidLvlEncoding,
2105 kIoPageFaultType_SkippedLevelIovaNotZero,
2106 kIoPageFaultType_PteRsvdNotZero,
2107 kIoPageFaultType_PteValidNotSet,
2108 kIoPageFaultType_DteTranslationDisabled,
2109 kIoPageFaultType_PasidInvalidRange,
2110 kIoPageFaultType_PermDenied,
2111 kIoPageFaultType_UserSupervisor,
2112 /* Interrupt remapping */
2113 kIoPageFaultType_IrteAddrInvalid,
2114 kIoPageFaultType_IrteRsvdNotZero,
2115 kIoPageFaultType_IrteRemapEn,
2116 kIoPageFaultType_IrteRsvdIntType,
2117 kIoPageFaultType_IntrReqAborted,
2118 kIoPageFaultType_IntrWithPasid,
2119 kIoPageFaultType_SmiFilterMismatch,
2120 /* Memory transaction or interrupt remapping. */
2121 kIoPageFaultType_DevId_Invalid
2122} EVT_IO_PAGE_FAULT_TYPE_T;
2123
2124/**
2125 * DEV_TAB_HARDWARE_ERROR, PAGE_TAB_HARDWARE_ERROR and COMMAND_HARDWARE_ERROR Event
2126 * Types.
2127 * In accordance with the AMD spec.
2128 */
2129typedef enum EVT_HW_ERR_TYPE_T
2130{
2131 kHwErrType_MasterAbort = 0,
2132 kHwErrType_TargetAbort,
2133 kHwErrType_PoisonedData
2134} EVT_HW_ERR_TYPE_T;
2135
2136/**
2137 * ILLEGAL_COMMAND_ERROR Event Types.
2138 * In accordance with the AMD spec.
2139 */
2140typedef enum EVT_ILLEGAL_CMD_ERR_TYPE_T
2141{
2142 kIllegalCmdErrType_RsvdNotZero = 0,
2143 kIllegalCmdErrType_CmdNotSupported,
2144 kIllegalCmdErrType_IotlbNotSupported
2145} EVT_ILLEGAL_CMD_ERR_TYPE_T;
2146
2147/**
2148 * IOTLB_INV_TIMEOUT Event Types.
2149 * In accordance with the AMD spec.
2150 */
2151typedef enum EVT_IOTLB_INV_TIMEOUT_TYPE_T
2152{
2153 InvTimeoutType_NoResponse = 0
2154} EVT_IOTLB_INV_TIMEOUT_TYPE_T;
2155
2156/**
2157 * INVALID_DEVICE_REQUEST Event Types.
2158 * In accordance with the AMD spec.
2159 */
2160typedef enum EVT_INVALID_DEV_REQ_TYPE_T
2161{
2162 /* Access. */
2163 kInvalidDevReqType_ReadOrNonPostedWrite = 0,
2164 kInvalidDevReqType_PretranslatedTransaction,
2165 kInvalidDevReqType_PortIo,
2166 kInvalidDevReqType_SysMgt,
2167 kInvalidDevReqType_IntrRange,
2168 kInvalidDevReqType_RsvdIntrRange,
2169 kInvalidDevReqType_SysMgtAddr,
2170 /* Translation Request. */
2171 kInvalidDevReqType_TrAccessInvalid,
2172 kInvalidDevReqType_TrDisabled,
2173 kInvalidDevReqType_DevIdInvalid,
2174} EVT_INVALID_DEV_REQ_TYPE_T;
2175
2176/**
2177 * INVALID_PPR_REQUEST Event Types.
2178 * In accordance with the AMD spec.
2179 */
2180typedef enum EVT_INVALID_PPR_REQ_TYPE_T
2181{
2182 kInvalidPprReqType_PriNotSupported,
2183 kInvalidPprReqType_GstTranslateDisabled
2184} EVT_INVALID_PPR_REQ_TYPE_T;
2185
2186/**
2187 * IOMMU operations (transaction) types.
2188 */
2189typedef enum IOMMUOP
2190{
2191 /** Address translation request. */
2192 IOMMUOP_TRANSLATE_REQ = 0,
2193 /** Memory read request. */
2194 IOMMUOP_MEM_READ,
2195 /** Memory write request. */
2196 IOMMUOP_MEM_WRITE,
2197 /** Interrupt request. */
2198 IOMMUOP_INTR_REQ,
2199 /** Command. */
2200 IOMMUOP_CMD
2201} IOMMUOP;
2202AssertCompileSize(IOMMUOP, 4);
2203
2204/**
2205 * IOMMU I/O TLB Entry.
2206 * @note Update iommuAmdInitIotlbe() when changes are made.
2207 */
2208typedef struct
2209{
2210 /** Magic (IOMMU_IOTLBE_MAGIC). */
2211 uint32_t uMagic;
2212 /** Reserved for future (eviction hints?). */
2213 uint16_t uRsvd0;
2214 /** The I/O access permissions (IOMMU_IO_PERM_XXX). */
2215 uint8_t fIoPerm;
2216 /** The number of offset bits in the system physical address. */
2217 uint8_t cShift;
2218 /** The translated system physical address (SPA) of the page. */
2219 RTGCPHYS GCPhysSpa;
2220} IOTLBE_T;
2221AssertCompileSizeAlignment(IOTLBE_T, 8);
2222AssertCompileMemberAlignment(IOTLBE_T, GCPhysSpa, 8);
2223/** Pointer to an IOMMU I/O TLB entry struct. */
2224typedef IOTLBE_T *PIOTLBE_T;
2225/** Pointer to a const IOMMU I/O TLB entry struct. */
2226typedef IOTLBE_T const *PCIOTLBE_T;
2227
2228/**
2229 * The shared IOMMU device state.
2230 */
2231typedef struct IOMMU
2232{
2233 /** IOMMU device index (0 is at the top of the PCI tree hierarchy). */
2234 uint32_t idxIommu;
2235 /** Alignment padding. */
2236 uint32_t uPadding0;
2237 /** The event semaphore the command thread waits on. */
2238 SUPSEMEVENT hEvtCmdThread;
2239 /** The MMIO handle. */
2240 IOMMMIOHANDLE hMmio;
2241
2242 /** @name PCI: Base capability block registers.
2243 * @{ */
2244 IOMMU_BAR_T IommuBar; /**< IOMMU base address register. */
2245 /** @} */
2246
2247 /** @name MMIO: Control and status registers.
2248 * @{ */
2249 DEV_TAB_BAR_T aDevTabBaseAddrs[8]; /**< Device table base address registers. */
2250 CMD_BUF_BAR_T CmdBufBaseAddr; /**< Command buffer base address register. */
2251 EVT_LOG_BAR_T EvtLogBaseAddr; /**< Event log base address register. */
2252 IOMMU_CTRL_T Ctrl; /**< IOMMU control register. */
2253 IOMMU_EXCL_RANGE_BAR_T ExclRangeBaseAddr; /**< IOMMU exclusion range base register. */
2254 IOMMU_EXCL_RANGE_LIMIT_T ExclRangeLimit; /**< IOMMU exclusion range limit. */
2255 IOMMU_EXT_FEAT_T ExtFeat; /**< IOMMU extended feature register. */
2256 /** @} */
2257
2258 /** @name MMIO: PPR Log registers.
2259 * @{ */
2260 PPR_LOG_BAR_T PprLogBaseAddr; /**< PPR Log base address register. */
2261 IOMMU_HW_EVT_HI_T HwEvtHi; /**< IOMMU hardware event register (Hi). */
2262 IOMMU_HW_EVT_LO_T HwEvtLo; /**< IOMMU hardware event register (Lo). */
2263 IOMMU_HW_EVT_STATUS_T HwEvtStatus; /**< IOMMU hardware event status. */
2264 /** @} */
2265
2266 /** @todo IOMMU: SMI filter. */
2267
2268 /** @name MMIO: Guest Virtual-APIC Log registers.
2269 * @{ */
2270 GALOG_BAR_T GALogBaseAddr; /**< Guest Virtual-APIC Log base address register. */
2271 GALOG_TAIL_ADDR_T GALogTailAddr; /**< Guest Virtual-APIC Log Tail address register. */
2272 /** @} */
2273
2274 /** @name MMIO: Alternate PPR and Event Log registers.
2275 * @{ */
2276 PPR_LOG_B_BAR_T PprLogBBaseAddr; /**< PPR Log B base address register. */
2277 EVT_LOG_B_BAR_T EvtLogBBaseAddr; /**< Event Log B base address register. */
2278 /** @} */
2279
2280 /** @name MMIO: Device-specific feature registers.
2281 * @{ */
2282 DEV_SPECIFIC_FEAT_T DevSpecificFeat; /**< Device-specific feature extension register (DSFX). */
2283 DEV_SPECIFIC_CTRL_T DevSpecificCtrl; /**< Device-specific control extension register (DSCX). */
2284 DEV_SPECIFIC_STATUS_T DevSpecificStatus; /**< Device-specific status extension register (DSSX). */
2285 /** @} */
2286
2287 /** @name MMIO: MSI Capability Block registers.
2288 * @{ */
2289 MSI_MISC_INFO_T MsiMiscInfo; /**< MSI Misc. info registers / MSI Vector registers. */
2290 /** @} */
2291
2292 /** @name MMIO: Performance Optimization Control registers.
2293 * @{ */
2294 IOMMU_PERF_OPT_CTRL_T PerfOptCtrl; /**< IOMMU Performance optimization control register. */
2295 /** @} */
2296
2297 /** @name MMIO: x2APIC Control registers.
2298 * @{ */
2299 IOMMU_XT_GEN_INTR_CTRL_T XtGenIntrCtrl; /**< IOMMU X2APIC General interrupt control register. */
2300 IOMMU_XT_PPR_INTR_CTRL_T XtPprIntrCtrl; /**< IOMMU X2APIC PPR interrupt control register. */
2301 IOMMU_XT_GALOG_INTR_CTRL_T XtGALogIntrCtrl; /**< IOMMU X2APIC Guest Log interrupt control register. */
2302 /** @} */
2303
2304 /** @name MMIO: MARC registers.
2305 * @{ */
2306 MARC_APER_T aMarcApers[4]; /**< MARC Aperture Registers. */
2307 /** @} */
2308
2309 /** @name MMIO: Reserved register.
2310 * @{ */
2311 IOMMU_RSVD_REG_T RsvdReg; /**< IOMMU Reserved Register. */
2312 /** @} */
2313
2314 /** @name MMIO: Command and Event Log pointer registers.
2315 * @{ */
2316 CMD_BUF_HEAD_PTR_T CmdBufHeadPtr; /**< Command buffer head pointer register. */
2317 CMD_BUF_TAIL_PTR_T CmdBufTailPtr; /**< Command buffer tail pointer register. */
2318 EVT_LOG_HEAD_PTR_T EvtLogHeadPtr; /**< Event log head pointer register. */
2319 EVT_LOG_TAIL_PTR_T EvtLogTailPtr; /**< Event log tail pointer register. */
2320 /** @} */
2321
2322 /** @name MMIO: Command and Event Status register.
2323 * @{ */
2324 IOMMU_STATUS_T Status; /**< IOMMU status register. */
2325 /** @} */
2326
2327 /** @name MMIO: PPR Log Head and Tail pointer registers.
2328 * @{ */
2329 PPR_LOG_HEAD_PTR_T PprLogHeadPtr; /**< IOMMU PPR log head pointer register. */
2330 PPR_LOG_TAIL_PTR_T PprLogTailPtr; /**< IOMMU PPR log tail pointer register. */
2331 /** @} */
2332
2333 /** @name MMIO: Guest Virtual-APIC Log Head and Tail pointer registers.
2334 * @{ */
2335 GALOG_HEAD_PTR_T GALogHeadPtr; /**< Guest Virtual-APIC log head pointer register. */
2336 GALOG_TAIL_PTR_T GALogTailPtr; /**< Guest Virtual-APIC log tail pointer register. */
2337 /** @} */
2338
2339 /** @name MMIO: PPR Log B Head and Tail pointer registers.
2340 * @{ */
2341 PPR_LOG_B_HEAD_PTR_T PprLogBHeadPtr; /**< PPR log B head pointer register. */
2342 PPR_LOG_B_TAIL_PTR_T PprLogBTailPtr; /**< PPR log B tail pointer register. */
2343 /** @} */
2344
2345 /** @name MMIO: Event Log B Head and Tail pointer registers.
2346 * @{ */
2347 EVT_LOG_B_HEAD_PTR_T EvtLogBHeadPtr; /**< Event log B head pointer register. */
2348 EVT_LOG_B_TAIL_PTR_T EvtLogBTailPtr; /**< Event log B tail pointer register. */
2349 /** @} */
2350
2351 /** @name MMIO: PPR Log Overflow protection registers.
2352 * @{ */
2353 PPR_LOG_AUTO_RESP_T PprLogAutoResp; /**< PPR Log Auto Response register. */
2354 PPR_LOG_OVERFLOW_EARLY_T PprLogOverflowEarly; /**< PPR Log Overflow Early Indicator register. */
2355 PPR_LOG_B_OVERFLOW_EARLY_T PprLogBOverflowEarly; /**< PPR Log B Overflow Early Indicator register. */
2356 /** @} */
2357
2358 /** @todo IOMMU: IOMMU Event counter registers. */
2359
2360 /** @todo IOMMU: Stat counters. */
2361} IOMMU;
2362/** Pointer to the IOMMU device state. */
2363typedef struct IOMMU *PIOMMU;
2364/** Pointer to the const IOMMU device state. */
2365typedef const struct IOMMU *PCIOMMU;
2366AssertCompileMemberAlignment(IOMMU, hEvtCmdThread, 8);
2367AssertCompileMemberAlignment(IOMMU, hMmio, 8);
2368AssertCompileMemberAlignment(IOMMU, IommuBar, 8);
2369
2370/**
2371 * The ring-3 IOMMU device state.
2372 */
2373typedef struct IOMMUR3
2374{
2375 /** Device instance. */
2376 PPDMDEVINSR3 pDevInsR3;
2377 /** The IOMMU helpers. */
2378 PCPDMIOMMUHLPR3 pIommuHlpR3;
2379 /** The command thread handle. */
2380 R3PTRTYPE(PPDMTHREAD) pCmdThread;
2381} IOMMUR3;
2382/** Pointer to the ring-3 IOMMU device state. */
2383typedef IOMMUR3 *PIOMMUR3;
2384
2385/**
2386 * The ring-0 IOMMU device state.
2387 */
2388typedef struct IOMMUR0
2389{
2390 /** Device instance. */
2391 PPDMDEVINSR0 pDevInsR0;
2392 /** The IOMMU helpers. */
2393 PCPDMIOMMUHLPR0 pIommuHlpR0;
2394} IOMMUR0;
2395/** Pointer to the ring-0 IOMMU device state. */
2396typedef IOMMUR0 *PIOMMUR0;
2397
2398/**
2399 * The raw-mode IOMMU device state.
2400 */
2401typedef struct IOMMURC
2402{
2403 /** Device instance. */
2404 PPDMDEVINSR0 pDevInsRC;
2405 /** The IOMMU helpers. */
2406 PCPDMIOMMUHLPRC pIommuHlpRC;
2407} IOMMURC;
2408/** Pointer to the raw-mode IOMMU device state. */
2409typedef IOMMURC *PIOMMURC;
2410
2411/** The IOMMU device state for the current context. */
2412typedef CTX_SUFF(IOMMU) IOMMUCC;
2413/** Pointer to the IOMMU device state for the current context. */
2414typedef CTX_SUFF(PIOMMU) PIOMMUCC;
2415
2416/**
2417 * IOMMU register access routines.
2418 */
2419typedef struct
2420{
2421 const char *pszName;
2422 VBOXSTRICTRC (*pfnRead )(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t *pu64Value);
2423 VBOXSTRICTRC (*pfnWrite)(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value);
2424 bool f64BitReg;
2425} IOMMUREGACC;
2426
2427
2428/*********************************************************************************************************************************
2429* Global Variables *
2430*********************************************************************************************************************************/
2431/**
2432 * An array of the number of device table segments supported.
2433 * Indexed by u2DevTabSegSup.
2434 */
2435static uint8_t const g_acDevTabSegs[] = { 0, 2, 4, 8 };
2436
2437/**
2438 * An array of the masks to select the device table segment index from a device ID.
2439 */
2440static uint16_t const g_auDevTabSegMasks[] = { 0x0, 0x8000, 0xc000, 0xe000 };
2441
2442/**
2443 * The maximum size (inclusive) of each device table segment (0 to 7).
2444 * Indexed by the device table segment index.
2445 */
2446static uint16_t const g_auDevTabSegMaxSizes[] = { 0x1ff, 0xff, 0x7f, 0x7f, 0x3f, 0x3f, 0x3f, 0x3f };
2447
2448
2449#ifndef VBOX_DEVICE_STRUCT_TESTCASE
2450/**
2451 * Gets the maximum number of buffer entries for the given buffer length.
2452 *
2453 * @returns Number of buffer entries.
2454 * @param uEncodedLen The length (power-of-2 encoded).
2455 */
2456DECLINLINE(uint32_t) iommuAmdGetBufMaxEntries(uint8_t uEncodedLen)
2457{
2458 Assert(uEncodedLen > 7);
2459 return 2 << (uEncodedLen - 1);
2460}
2461
2462
2463/**
2464 * Gets the total length of the buffer given a base register's encoded length.
2465 *
2466 * @returns The length of the buffer in bytes.
2467 * @param uEncodedLen The length (power-of-2 encoded).
2468 */
2469DECLINLINE(uint32_t) iommuAmdGetBufLength(uint8_t uEncodedLen)
2470{
2471 Assert(uEncodedLen > 7);
2472 return (2 << (uEncodedLen - 1)) << 4;
2473}
2474
2475
2476/**
2477 * Gets the number of (unconsumed) entries in the event log.
2478 *
2479 * @returns The number of entries in the event log.
2480 * @param pThis The IOMMU device state.
2481 */
2482static uint32_t iommuAmdGetEvtLogEntryCount(PIOMMU pThis)
2483{
2484 uint32_t const idxTail = pThis->EvtLogTailPtr.n.off >> IOMMU_EVT_GENERIC_SHIFT;
2485 uint32_t const idxHead = pThis->EvtLogHeadPtr.n.off >> IOMMU_EVT_GENERIC_SHIFT;
2486 if (idxTail >= idxHead)
2487 return idxTail - idxHead;
2488
2489 uint32_t const cMaxEvts = iommuAmdGetBufMaxEntries(pThis->EvtLogBaseAddr.n.u4Len);
2490 return cMaxEvts - idxHead + idxTail;
2491}
2492
2493
2494/**
2495 * Gets the number of (unconsumed) commands in the command buffer.
2496 *
2497 * @returns The number of commands in the command buffer.
2498 * @param pThis The IOMMU device state.
2499 */
2500static uint32_t iommuAmdGetCmdBufEntryCount(PIOMMU pThis)
2501{
2502 uint32_t const idxTail = pThis->CmdBufTailPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;
2503 uint32_t const idxHead = pThis->CmdBufHeadPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;
2504 if (idxTail >= idxHead)
2505 return idxTail - idxHead;
2506
2507 uint32_t const cMaxEvts = iommuAmdGetBufMaxEntries(pThis->CmdBufBaseAddr.n.u4Len);
2508 return cMaxEvts - idxHead + idxTail;
2509}
2510
2511
2512DECLINLINE(IOMMU_STATUS_T) iommuAmdGetStatus(PCIOMMU pThis)
2513{
2514 IOMMU_STATUS_T Status;
2515 Status.u64 = ASMAtomicReadU64((volatile uint64_t *)&pThis->Status.u64);
2516 return Status;
2517}
2518
2519
2520DECLINLINE(IOMMU_CTRL_T) iommuAmdGetCtrl(PCIOMMU pThis)
2521{
2522 IOMMU_CTRL_T Ctrl;
2523 Ctrl.u64 = ASMAtomicReadU64((volatile uint64_t *)&pThis->Ctrl.u64);
2524 return Ctrl;
2525}
2526
2527
2528/**
2529 * Returns whether MSI is enabled for the IOMMU.
2530 *
2531 * @returns Whether MSI is enabled.
2532 * @param pDevIns The IOMMU device instance.
2533 *
2534 * @note There should be a PCIDevXxx function for this.
2535 */
2536static bool iommuAmdIsMsiEnabled(PPDMDEVINS pDevIns)
2537{
2538 MSI_CAP_HDR_T MsiCapHdr;
2539 MsiCapHdr.u32 = PDMPciDevGetDWord(pDevIns->apPciDevs[0], IOMMU_PCI_OFF_MSI_CAP_HDR);
2540 return MsiCapHdr.n.u1MsiEnable;
2541}
2542
2543
2544/**
2545 * Signals a PCI target abort.
2546 *
2547 * @param pDevIns The IOMMU device instance.
2548 */
2549static void iommuAmdSetPciTargetAbort(PPDMDEVINS pDevIns)
2550{
2551 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2552 uint16_t const u16Status = PDMPciDevGetStatus(pPciDev) | VBOX_PCI_STATUS_SIG_TARGET_ABORT;
2553 PDMPciDevSetStatus(pPciDev, u16Status);
2554}
2555
2556
2557/**
2558 * The IOMMU command thread.
2559 *
2560 * @returns VBox status code.
2561 * @param pDevIns The IOMMU device instance.
2562 * @param pThread The command thread.
2563 */
2564static DECLCALLBACK(int) iommuAmdR3CmdThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
2565{
2566 RT_NOREF(pDevIns, pThread);
2567}
2568
2569
2570/**
2571 * Unblocks the command thread so it can respond to a state change.
2572 *
2573 * @returns VBox status code.
2574 * @param pDevIns The IOMMU device instance.
2575 * @param pThread The command thread.
2576 */
2577static DECLCALLBACK(int) iommuAmdR3CmdThreadWakeUp(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
2578{
2579 RT_NOREF(pThread);
2580 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2581 return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
2582}
2583
2584
2585/**
2586 * Writes to a read-only register.
2587 */
2588static VBOXSTRICTRC iommuAmdIgnore_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2589{
2590 RT_NOREF(pDevIns, pThis, iReg, u64Value);
2591 Log((IOMMU_LOG_PFX ": Write to read-only register (%#x) with value %#RX64 ignored\n", iReg, u64Value));
2592 return VINF_SUCCESS;
2593}
2594
2595
2596/**
2597 * Writes the Device Table Base Address Register.
2598 */
2599static VBOXSTRICTRC iommuAmdDevTabBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2600{
2601 RT_NOREF(pDevIns, iReg);
2602
2603 /* Mask out all unrecognized bits. */
2604 u64Value &= IOMMU_DEV_TAB_BAR_VALID_MASK;
2605
2606 /* Update the register. */
2607 pThis->aDevTabBaseAddrs[0].u64 = u64Value;
2608 return VINF_SUCCESS;
2609}
2610
2611
2612/**
2613 * Writes the Command Buffer Base Address Register.
2614 */
2615static VBOXSTRICTRC iommuAmdCmdBufBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2616{
2617 RT_NOREF(pDevIns, iReg);
2618
2619 /*
2620 * While this is not explicitly specified like the event log base address register,
2621 * the AMD spec. does specify "CmdBufRun must be 0b to modify the command buffer registers properly".
2622 * Inconsistent specs :/
2623 */
2624 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
2625 if (Status.n.u1CmdBufRunning)
2626 {
2627 Log((IOMMU_LOG_PFX ": Setting CmdBufBar (%#RX64) when command buffer is running -> Ignored\n", u64Value));
2628 return VINF_SUCCESS;
2629 }
2630
2631 /* Mask out all unrecognized bits. */
2632 CMD_BUF_BAR_T CmdBufBaseAddr;
2633 CmdBufBaseAddr.u64 = u64Value & IOMMU_CMD_BUF_BAR_VALID_MASK;
2634
2635 /* Validate the length. */
2636 if (CmdBufBaseAddr.n.u4Len >= 8)
2637 {
2638 /* Update the register. */
2639 pThis->CmdBufBaseAddr.u64 = CmdBufBaseAddr.u64;
2640
2641 /*
2642 * Writing the command buffer base address, clears the command buffer head and tail pointers.
2643 * See AMD spec. 2.4 "Commands".
2644 */
2645 pThis->CmdBufHeadPtr.u64 = 0;
2646 pThis->CmdBufTailPtr.u64 = 0;
2647 }
2648 else
2649 Log((IOMMU_LOG_PFX ": Command buffer length (%#x) invalid -> Ignored\n", CmdBufBaseAddr.n.u4Len));
2650
2651 return VINF_SUCCESS;
2652}
2653
2654
2655/**
2656 * Writes the Event Log Base Address Register.
2657 */
2658static VBOXSTRICTRC iommuAmdEvtLogBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2659{
2660 RT_NOREF(pDevIns, iReg);
2661
2662 /*
2663 * IOMMU behavior is undefined when software writes this register when event logging is running.
2664 * In our emulation, we ignore the write entirely.
2665 * See AMD IOMMU spec. "Event Log Base Address Register".
2666 */
2667 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
2668 if (Status.n.u1EvtLogRunning)
2669 {
2670 Log((IOMMU_LOG_PFX ": Setting EvtLogBar (%#RX64) when event logging is running -> Ignored\n", u64Value));
2671 return VINF_SUCCESS;
2672 }
2673
2674 /* Mask out all unrecognized bits. */
2675 u64Value &= IOMMU_EVT_LOG_BAR_VALID_MASK;
2676 EVT_LOG_BAR_T EvtLogBaseAddr;
2677 EvtLogBaseAddr.u64 = u64Value;
2678
2679 /* Validate the length. */
2680 if (EvtLogBaseAddr.n.u4Len >= 8)
2681 {
2682 /* Update the register. */
2683 pThis->EvtLogBaseAddr.u64 = EvtLogBaseAddr.u64;
2684
2685 /*
2686 * Writing the event log base address, clears the event log head and tail pointers.
2687 * See AMD spec. 2.5 "Event Logging".
2688 */
2689 pThis->EvtLogHeadPtr.u64 = 0;
2690 pThis->EvtLogTailPtr.u64 = 0;
2691 }
2692 else
2693 Log((IOMMU_LOG_PFX ": Event log length (%#x) invalid -> Ignored\n", EvtLogBaseAddr.n.u4Len));
2694
2695 return VINF_SUCCESS;
2696}
2697
2698
2699/**
2700 * Writes the Control Register.
2701 */
2702static VBOXSTRICTRC iommuAmdCtrl_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2703{
2704 RT_NOREF(pDevIns, iReg);
2705
2706 /* Mask out all unrecognized bits. */
2707 u64Value &= IOMMU_CTRL_VALID_MASK;
2708
2709 IOMMU_CTRL_T const OldCtrl = iommuAmdGetCtrl(pThis);
2710 IOMMU_CTRL_T NewCtrl;
2711 NewCtrl.u64 = u64Value;
2712
2713 /* Enable or disable event logging when the bit transitions. */
2714 if (OldCtrl.n.u1EvtLogEn != NewCtrl.n.u1EvtLogEn)
2715 {
2716 if (NewCtrl.n.u1EvtLogEn)
2717 {
2718 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_EVT_LOG_OVERFLOW);
2719 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_RUNNING);
2720 }
2721 else
2722 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_EVT_LOG_RUNNING);
2723 }
2724
2725 /* Update the register. */
2726 ASMAtomicWriteU64(&pThis->Ctrl.u64, NewCtrl.u64);
2727
2728 /* Enable or disable command buffer processing when the bit transitions. */
2729 if (OldCtrl.n.u1CmdBufEn != NewCtrl.n.u1CmdBufEn)
2730 {
2731 if (NewCtrl.n.u1CmdBufEn)
2732 {
2733 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_CMD_BUF_RUNNING);
2734
2735 /* If the command buffer isn't empty, kick the command thread to start processing commands. */
2736 if (pThis->CmdBufTailPtr.n.off != pThis->CmdBufHeadPtr.n.off)
2737 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
2738 }
2739 else
2740 {
2741 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING);
2742 /* Kick the command thread to stop processing commands. */
2743 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
2744 }
2745 }
2746}
2747
2748
2749/**
2750 * Writes to the Excluse Range Base Address Register.
2751 */
2752static VBOXSTRICTRC iommuAmdExclRangeBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2753{
2754 RT_NOREF(pDevIns, iReg);
2755 pThis->ExclRangeBaseAddr.u64 = u64Value & IOMMU_EXCL_RANGE_BAR_VALID_MASK;
2756 return VINF_SUCCESS;
2757}
2758
2759
2760/**
2761 * Writes to the Excluse Range Limit Register.
2762 */
2763static VBOXSTRICTRC iommuAmdExclRangeLimit_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2764{
2765 RT_NOREF(pDevIns, iReg);
2766 u64Value &= IOMMU_EXCL_RANGE_LIMIT_VALID_MASK;
2767 u64Value |= UINT64_C(0xfff);
2768 pThis->ExclRangeLimit.u64 = u64Value;
2769 return VINF_SUCCESS;
2770}
2771
2772
2773/**
2774 * Writes the Hardware Event Register (Hi).
2775 */
2776static VBOXSTRICTRC iommuAmdHwEvtHi_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2777{
2778 /** @todo IOMMU: Why the heck is this marked read/write by the AMD IOMMU spec? */
2779 RT_NOREF(pDevIns, iReg);
2780 Log((IOMMU_LOG_PFX ": Writing %#RX64 to hardware event (Hi) register!\n", u64Value));
2781 pThis->HwEvtHi.u64 = u64Value;
2782 return VINF_SUCCESS;
2783}
2784
2785
2786/**
2787 * Writes the Hardware Event Register (Lo).
2788 */
2789static VBOXSTRICTRC iommuAmdHwEvtLo_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2790{
2791 /** @todo IOMMU: Why the heck is this marked read/write by the AMD IOMMU spec? */
2792 RT_NOREF(pDevIns, iReg);
2793 Log((IOMMU_LOG_PFX ": Writing %#RX64 to hardware event (Lo) register!\n", u64Value));
2794 pThis->HwEvtLo = u64Value;
2795 return VINF_SUCCESS;
2796}
2797
2798
2799/**
2800 * Writes the Hardware Event Status Register.
2801 */
2802static VBOXSTRICTRC iommuAmdHwEvtStatus_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2803{
2804 RT_NOREF(pDevIns, iReg);
2805
2806 /* Mask out all unrecognized bits. */
2807 u64Value &= IOMMU_HW_EVT_STATUS_VALID_MASK;
2808
2809 /*
2810 * The two bits (HEO and HEV) are RW1C (Read/Write 1-to-Clear; writing 0 has no effect).
2811 * If the current status bits or the bits being written are both 0, we've nothing to do.
2812 * The Overflow bit (bit 1) is only valid when the Valid bit (bit 0) is 1.
2813 */
2814 uint64_t HwStatus = pThis->HwEvtStatus.u64;
2815 if (!(HwStatus & RT_BIT(0)))
2816 return VINF_SUCCESS;
2817 if (u64Value & HwStatus & RT_BIT_64(0))
2818 HwStatus &= ~RT_BIT_64(0);
2819 if (u64Value & HwStatus & RT_BIT_64(1))
2820 HwStatus &= ~RT_BIT_64(1);
2821
2822 /* Update the register. */
2823 pThis->HwEvtStatus.u64 = HwStatus;
2824 return VINF_SUCCESS;
2825}
2826
2827
2828/**
2829 * Writes the Device Table Segment Base Address Register.
2830 */
2831static VBOXSTRICTRC iommuAmdDevTabSegBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2832{
2833 RT_NOREF(pDevIns);
2834
2835 /* Figure out which segment is being written. */
2836 uint8_t const offSegment = (iReg - IOMMU_MMIO_OFF_DEV_TAB_SEG_FIRST) >> 3;
2837 uint8_t const idxSegment = offSegment + 1;
2838 Assert(idxSegment < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
2839
2840 /* Mask out all unrecognized bits. */
2841 u64Value &= IOMMU_DEV_TAB_SEG_BAR_VALID_MASK;
2842 DEV_TAB_BAR_T DevTabSegBar;
2843 DevTabSegBar.u64 = u64Value;
2844
2845 /* Validate the size. */
2846 uint16_t const uSegSize = DevTabSegBar.n.u9Size;
2847 uint16_t const uMaxSegSize = g_auDevTabSegMaxSizes[idxSegment];
2848 if (uSegSize <= uMaxSegSize)
2849 {
2850 /* Update the register. */
2851 pThis->aDevTabBaseAddrs[idxSegment].u64 = u64Value;
2852 }
2853 else
2854 Log((IOMMU_LOG_PFX ": Device table segment (%u) size invalid (%#RX32) -> Ignored\n", idxSegment, uSegSize));
2855
2856 return VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Writes the MSI Capability Header Register.
2862 */
2863static VBOXSTRICTRC iommuAmdMsiCapHdr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2864{
2865 RT_NOREF(pThis, iReg);
2866 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2867 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
2868 MSI_CAP_HDR_T MsiCapHdr;
2869 MsiCapHdr.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
2870 MsiCapHdr.n.u1MsiEnable = RT_BOOL(u64Value & IOMMU_MSI_CAP_HDR_MSI_EN_MASK);
2871 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR, MsiCapHdr.u32);
2872 return VINF_SUCCESS;
2873}
2874
2875
2876/**
2877 * Writes the MSI Address (Lo) Register (32-bit).
2878 */
2879static VBOXSTRICTRC iommuAmdMsiAddrLo_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2880{
2881 RT_NOREF(pThis, iReg);
2882 Assert(!RT_HI_U32(u64Value));
2883 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2884 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
2885 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO, u64Value & IOMMU_MSI_ADDR_VALID_MASK);
2886 return VINF_SUCCESS;
2887}
2888
2889
2890/**
2891 * Writes the MSI Address (Hi) Register (32-bit).
2892 */
2893static VBOXSTRICTRC iommuAmdMsiAddrHi_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2894{
2895 RT_NOREF(pThis, iReg);
2896 Assert(!RT_HI_U32(u64Value));
2897 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2898 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
2899 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI, u64Value);
2900 return VINF_SUCCESS;
2901}
2902
2903
2904/**
2905 * Writes the MSI Data Register (32-bit).
2906 */
2907static VBOXSTRICTRC iommuAmdMsiData_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2908{
2909 RT_NOREF(pThis, iReg);
2910 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2911 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
2912 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA, u64Value & IOMMU_MSI_DATA_VALID_MASK);
2913 return VINF_SUCCESS;
2914}
2915
2916
2917/**
2918 * Writes the Command Buffer Head Pointer Register (32-bit).
2919 */
2920static VBOXSTRICTRC iommuAmdCmdBufHeadPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2921{
2922 RT_NOREF(pDevIns, iReg);
2923
2924 /*
2925 * IOMMU behavior is undefined when software writes this register when the command buffer is running.
2926 * In our emulation, we ignore the write entirely.
2927 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
2928 */
2929 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
2930 if (Status.n.u1CmdBufRunning)
2931 {
2932 Log((IOMMU_LOG_PFX ": Setting CmdBufHeadPtr (%#RX64) when command buffer is running -> Ignored\n", u64Value));
2933 return VINF_SUCCESS;
2934 }
2935
2936 /*
2937 * IOMMU behavior is undefined when software writes a value outside the buffer length.
2938 * In our emulation, we ignore the write entirely.
2939 */
2940 uint32_t const offBuf = u64Value & IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK;
2941 uint32_t const cbBuf = iommuAmdGetBufLength(pThis->CmdBufBaseAddr.n.u4Len);
2942 Assert(cbBuf <= _512K);
2943 if (offBuf >= cbBuf)
2944 {
2945 Log((IOMMU_LOG_PFX ": Setting CmdBufHeadPtr (%#RX32) to a value that exceeds buffer length (%#RX23) -> Ignored\n",
2946 offBuf, cbBuf));
2947 return VINF_SUCCESS;
2948 }
2949
2950 /* Update the register. */
2951 pThis->CmdBufHeadPtr.au32[0] = offBuf;
2952
2953 LogFlow((IOMMU_LOG_PFX ": Set CmdBufHeadPtr to %#RX32\n", offBuf));
2954 return VINF_SUCCESS;
2955}
2956
2957
2958/**
2959 * Writes the Command Buffer Tail Pointer Register (32-bit).
2960 */
2961static VBOXSTRICTRC iommuAmdCmdBufTailPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
2962{
2963 RT_NOREF(pDevIns, iReg);
2964
2965 /*
2966 * IOMMU behavior is undefined when software writes a value outside the buffer length.
2967 * In our emulation, we ignore the write entirely.
2968 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
2969 */
2970 uint32_t const offBuf = u64Value & IOMMU_CMD_BUF_TAIL_PTR_VALID_MASK;
2971 uint32_t const cbBuf = iommuAmdGetBufLength(pThis->CmdBufBaseAddr.n.u4Len);
2972 Assert(cbBuf <= _512K);
2973 if (offBuf >= cbBuf)
2974 {
2975 Log((IOMMU_LOG_PFX ": Setting CmdBufTailPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n",
2976 offBuf, cbBuf));
2977 return VINF_SUCCESS;
2978 }
2979
2980 /*
2981 * IOMMU behavior is undefined if software advances the tail pointer equal to or beyond the
2982 * head pointer after adding one or more commands to the buffer.
2983 *
2984 * However, we cannot enforce this strictly because it's legal for software to shrink the
2985 * command queue (by reducing the offset) as well as wrap around the pointer (when head isn't
2986 * at 0). Software might even make the queue empty by making head and tail equal which is
2987 * allowed. I don't think we can or should try too hard to prevent software shooting itself
2988 * in the foot here. As long as we make sure the offset value is within the circular buffer
2989 * bounds (which we do by masking bits above) it should be sufficient.
2990 */
2991 pThis->CmdBufTailPtr.au32[0] = offBuf;
2992
2993 LogFlow((IOMMU_LOG_PFX ": Set CmdBufTailPtr to %#RX32\n", offBuf));
2994 return VINF_SUCCESS;
2995}
2996
2997
2998/**
2999 * Writes the Event Log Head Pointer Register (32-bit).
3000 */
3001static VBOXSTRICTRC iommuAmdEvtLogHeadPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
3002{
3003 RT_NOREF(pDevIns, iReg);
3004
3005 /*
3006 * IOMMU behavior is undefined when software writes a value outside the buffer length.
3007 * In our emulation, we ignore the write entirely.
3008 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
3009 */
3010 uint32_t const offBuf = u64Value & IOMMU_EVT_LOG_HEAD_PTR_VALID_MASK;
3011 uint32_t const cbBuf = iommuAmdGetBufLength(pThis->EvtLogBaseAddr.n.u4Len);
3012 Assert(cbBuf <= _512K);
3013 if (offBuf >= cbBuf)
3014 {
3015 Log((IOMMU_LOG_PFX ": Setting EvtLogHeadPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n",
3016 offBuf, cbBuf));
3017 return VINF_SUCCESS;
3018 }
3019
3020 /* Update the register. */
3021 pThis->EvtLogHeadPtr.au32[0] = offBuf;
3022
3023 LogFlow((IOMMU_LOG_PFX ": Set EvtLogHeadPtr to %#RX32\n", offBuf));
3024 return VINF_SUCCESS;
3025}
3026
3027
3028/**
3029 * Writes the Event Log Tail Pointer Register (32-bit).
3030 */
3031static VBOXSTRICTRC iommuAmdEvtLogTailPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
3032{
3033 RT_NOREF(pDevIns, iReg);
3034 NOREF(pThis);
3035
3036 /*
3037 * IOMMU behavior is undefined when software writes this register when the event log is running.
3038 * In our emulation, we ignore the write entirely.
3039 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
3040 */
3041 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
3042 if (Status.n.u1EvtLogRunning)
3043 {
3044 Log((IOMMU_LOG_PFX ": Setting EvtLogTailPtr (%#RX64) when event log is running -> Ignored\n", u64Value));
3045 return VINF_SUCCESS;
3046 }
3047
3048 /*
3049 * IOMMU behavior is undefined when software writes a value outside the buffer length.
3050 * In our emulation, we ignore the write entirely.
3051 */
3052 uint32_t const offBuf = u64Value & IOMMU_EVT_LOG_TAIL_PTR_VALID_MASK;
3053 uint32_t const cbBuf = iommuAmdGetBufLength(pThis->EvtLogBaseAddr.n.u4Len);
3054 Assert(cbBuf <= _512K);
3055 if (offBuf >= cbBuf)
3056 {
3057 Log((IOMMU_LOG_PFX ": Setting EvtLogTailPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n",
3058 offBuf, cbBuf));
3059 return VINF_SUCCESS;
3060 }
3061
3062 /* Update the register. */
3063 pThis->EvtLogTailPtr.au32[0] = offBuf;
3064
3065 LogFlow((IOMMU_LOG_PFX ": Set EvtLogTailPtr to %#RX32\n", offBuf));
3066 return VINF_SUCCESS;
3067}
3068
3069
3070/**
3071 * Writes the Status Register (64-bit).
3072 */
3073static VBOXSTRICTRC iommuAmdStatus_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
3074{
3075 RT_NOREF(pDevIns, iReg);
3076
3077 /* Mask out all unrecognized bits. */
3078 u64Value &= IOMMU_STATUS_VALID_MASK;
3079
3080 /*
3081 * Compute RW1C (read-only, write-1-to-clear) bits and preserve the rest (which are read-only).
3082 * Writing 0 to an RW1C bit has no effect. Writing 1 to an RW1C bit, clears the bit if it's already 1.
3083 */
3084 IOMMU_STATUS_T const OldStatus = iommuAmdGetStatus(pThis);
3085 uint64_t const fOldRw1cBits = (OldStatus.u64 & IOMMU_STATUS_RW1C_MASK);
3086 uint64_t const fOldRoBits = (OldStatus.u64 & ~IOMMU_STATUS_RW1C_MASK);
3087 uint64_t const fNewRw1cBits = (u64Value & IOMMU_STATUS_RW1C_MASK);
3088
3089 uint64_t const uNewStatus = (fOldRw1cBits & ~fNewRw1cBits) | fOldRoBits;
3090
3091 /* Update the register. */
3092 ASMAtomicWriteU64(&pThis->Status.u64, uNewStatus);
3093 return VINF_SUCCESS;
3094}
3095
3096
3097#if 0
3098/**
3099 * Table 0: Registers-access table.
3100 */
3101static const IOMMUREGACC g_aTable0Regs[] =
3102{
3103
3104};
3105
3106/**
3107 * Table 1: Registers-access table.
3108 */
3109static const IOMMUREGACC g_aTable1Regs[] =
3110{
3111};
3112#endif
3113
3114
3115/**
3116 * Writes an IOMMU register (32-bit and 64-bit).
3117 *
3118 * @returns Strict VBox status code.
3119 * @param pDevIns The IOMMU device instance.
3120 * @param off MMIO byte offset to the register.
3121 * @param cb The size of the write access.
3122 * @param uValue The value being written.
3123 *
3124 * @thread EMT.
3125 */
3126static VBOXSTRICTRC iommuAmdWriteRegister(PPDMDEVINS pDevIns, uint32_t off, uint8_t cb, uint64_t uValue)
3127{
3128 Assert(off < IOMMU_MMIO_REGION_SIZE);
3129 Assert(cb == 4 || cb == 8);
3130 Assert(!(off & (cb - 1)));
3131
3132 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3133 switch (off)
3134 {
3135 case IOMMU_MMIO_OFF_DEV_TAB_BAR: return iommuAmdDevTabBar_w(pDevIns, pThis, off, uValue);
3136 case IOMMU_MMIO_OFF_CMD_BUF_BAR: return iommuAmdCmdBufBar_w(pDevIns, pThis, off, uValue);
3137 case IOMMU_MMIO_OFF_EVT_LOG_BAR: return iommuAmdEvtLogBar_w(pDevIns, pThis, off, uValue);
3138 case IOMMU_MMIO_OFF_CTRL: return iommuAmdCtrl_w(pDevIns, pThis, off, uValue);
3139 case IOMMU_MMIO_OFF_EXCL_BAR: return iommuAmdExclRangeBar_w(pDevIns, pThis, off, uValue);
3140 case IOMMU_MMIO_OFF_EXCL_RANGE_LIMIT: return iommuAmdExclRangeLimit_w(pDevIns, pThis, off, uValue);
3141 case IOMMU_MMIO_OFF_EXT_FEAT: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3142
3143 case IOMMU_MMIO_OFF_PPR_LOG_BAR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3144 case IOMMU_MMIO_OFF_HW_EVT_HI: return iommuAmdHwEvtHi_w(pDevIns, pThis, off, uValue);
3145 case IOMMU_MMIO_OFF_HW_EVT_LO: return iommuAmdHwEvtLo_w(pDevIns, pThis, off, uValue);
3146 case IOMMU_MMIO_OFF_HW_EVT_STATUS: return iommuAmdHwEvtStatus_w(pDevIns, pThis, off, uValue);
3147
3148 case IOMMU_MMIO_OFF_GALOG_BAR:
3149 case IOMMU_MMIO_OFF_GALOG_TAIL_ADDR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3150
3151 case IOMMU_MMIO_OFF_PPR_LOG_B_BAR:
3152 case IOMMU_MMIO_OFF_PPR_EVT_B_BAR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3153
3154 case IOMMU_MMIO_OFF_DEV_TAB_SEG_1:
3155 case IOMMU_MMIO_OFF_DEV_TAB_SEG_2:
3156 case IOMMU_MMIO_OFF_DEV_TAB_SEG_3:
3157 case IOMMU_MMIO_OFF_DEV_TAB_SEG_4:
3158 case IOMMU_MMIO_OFF_DEV_TAB_SEG_5:
3159 case IOMMU_MMIO_OFF_DEV_TAB_SEG_6:
3160 case IOMMU_MMIO_OFF_DEV_TAB_SEG_7: return iommuAmdDevTabSegBar_w(pDevIns, pThis, off, uValue);
3161
3162 case IOMMU_MMIO_OFF_DEV_SPECIFIC_FEAT:
3163 case IOMMU_MMIO_OFF_DEV_SPECIFIC_CTRL:
3164 case IOMMU_MMIO_OFF_DEV_SPECIFIC_STATUS: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3165
3166 case IOMMU_MMIO_OFF_MSI_VECTOR_0:
3167 case IOMMU_MMIO_OFF_MSI_VECTOR_1: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3168 case IOMMU_MMIO_OFF_MSI_CAP_HDR:
3169 {
3170 VBOXSTRICTRC rcStrict = iommuAmdMsiCapHdr_w(pDevIns, pThis, off, (uint32_t)uValue);
3171 if (cb == 4 || RT_FAILURE(rcStrict))
3172 return rcStrict;
3173 uValue >>= 32;
3174 RT_FALL_THRU();
3175 }
3176 case IOMMU_MMIO_OFF_MSI_ADDR_LO: return iommuAmdMsiAddrLo_w(pDevIns, pThis, off, uValue);
3177 case IOMMU_MMIO_OFF_MSI_ADDR_HI:
3178 {
3179 VBOXSTRICTRC rcStrict = iommuAmdMsiAddrHi_w(pDevIns, pThis, off, (uint32_t)uValue);
3180 if (cb == 4 || RT_FAILURE(rcStrict))
3181 return rcStrict;
3182 uValue >>= 32;
3183 RT_FALL_THRU();
3184 }
3185 case IOMMU_MMIO_OFF_MSI_DATA: return iommuAmdMsiData_w(pDevIns, pThis, off, uValue);
3186 case IOMMU_MMIO_OFF_MSI_MAPPING_CAP_HDR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3187
3188 case IOMMU_MMIO_OFF_PERF_OPT_CTRL: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3189
3190 case IOMMU_MMIO_OFF_XT_GEN_INTR_CTRL:
3191 case IOMMU_MMIO_OFF_XT_PPR_INTR_CTRL:
3192 case IOMMU_MMIO_OFF_XT_GALOG_INT_CTRL: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3193
3194 case IOMMU_MMIO_OFF_MARC_APER_BAR_0:
3195 case IOMMU_MMIO_OFF_MARC_APER_RELOC_0:
3196 case IOMMU_MMIO_OFF_MARC_APER_LEN_0:
3197 case IOMMU_MMIO_OFF_MARC_APER_BAR_1:
3198 case IOMMU_MMIO_OFF_MARC_APER_RELOC_1:
3199 case IOMMU_MMIO_OFF_MARC_APER_LEN_1:
3200 case IOMMU_MMIO_OFF_MARC_APER_BAR_2:
3201 case IOMMU_MMIO_OFF_MARC_APER_RELOC_2:
3202 case IOMMU_MMIO_OFF_MARC_APER_LEN_2:
3203 case IOMMU_MMIO_OFF_MARC_APER_BAR_3:
3204 case IOMMU_MMIO_OFF_MARC_APER_RELOC_3:
3205 case IOMMU_MMIO_OFF_MARC_APER_LEN_3: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3206
3207 case IOMMU_MMIO_OFF_RSVD_REG: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3208
3209 case IOMMU_MMIO_CMD_BUF_HEAD_PTR: return iommuAmdCmdBufHeadPtr_w(pDevIns, pThis, off, uValue);
3210 case IOMMU_MMIO_CMD_BUF_TAIL_PTR: return iommuAmdCmdBufTailPtr_w(pDevIns, pThis, off, uValue);
3211 case IOMMU_MMIO_EVT_LOG_HEAD_PTR: return iommuAmdEvtLogHeadPtr_w(pDevIns, pThis, off, uValue);
3212 case IOMMU_MMIO_EVT_LOG_TAIL_PTR: return iommuAmdEvtLogTailPtr_w(pDevIns, pThis, off, uValue);
3213
3214 case IOMMU_MMIO_OFF_STATUS: return iommuAmdStatus_w(pDevIns, pThis, off, uValue);
3215
3216 case IOMMU_MMIO_OFF_PPR_LOG_HEAD_PTR:
3217 case IOMMU_MMIO_OFF_PPR_LOG_TAIL_PTR:
3218
3219 case IOMMU_MMIO_OFF_GALOG_HEAD_PTR:
3220 case IOMMU_MMIO_OFF_GALOG_TAIL_PTR:
3221
3222 case IOMMU_MMIO_OFF_PPR_LOG_B_HEAD_PTR:
3223 case IOMMU_MMIO_OFF_PPR_LOG_B_TAIL_PTR:
3224
3225 case IOMMU_MMIO_OFF_EVT_LOG_B_HEAD_PTR:
3226 case IOMMU_MMIO_OFF_EVT_LOG_B_TAIL_PTR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
3227
3228 case IOMMU_MMIO_OFF_PPR_LOG_AUTO_RESP:
3229 case IOMMU_MMIO_OFF_PPR_LOG_OVERFLOW_EARLY:
3230 case IOMMU_MMIO_OFF_PPR_LOG_B_OVERFLOW_EARLY:
3231
3232 /* Not implemented. */
3233 case IOMMU_MMIO_OFF_SMI_FLT_FIRST:
3234 case IOMMU_MMIO_OFF_SMI_FLT_LAST:
3235 {
3236 Log((IOMMU_LOG_PFX ": Writing unsupported register: SMI filter %u -> Ignored\n",
3237 (off - IOMMU_MMIO_OFF_SMI_FLT_FIRST) >> 3));
3238 return VINF_SUCCESS;
3239 }
3240
3241 /* Unknown. */
3242 default:
3243 {
3244 Log((IOMMU_LOG_PFX ": Writing unknown register %u (%#x) with %#RX64 -> Ignored\n", off, off, uValue));
3245 return VINF_SUCCESS;
3246 }
3247 }
3248}
3249
3250
3251/**
3252 * Reads an IOMMU register (64-bit) given its MMIO offset.
3253 *
3254 * All reads are 64-bit but reads to 32-bit registers that are aligned on an 8-byte
3255 * boundary include the lower half of the subsequent register.
3256 *
3257 * This is because most registers are 64-bit and aligned on 8-byte boundaries but
3258 * some are really 32-bit registers aligned on an 8-byte boundary. We cannot assume
3259 * software will only perform 32-bit reads on those 32-bit registers that are
3260 * aligned on 8-byte boundaries.
3261 *
3262 * @returns Strict VBox status code.
3263 * @param pDevIns The IOMMU device instance.
3264 * @param off The MMIO offset of the register in bytes.
3265 * @param puResult Where to store the value being read.
3266 *
3267 * @thread EMT.
3268 */
3269static VBOXSTRICTRC iommuAmdReadRegister(PPDMDEVINS pDevIns, uint32_t off, uint64_t *puResult)
3270{
3271 Assert(off < IOMMU_MMIO_REGION_SIZE);
3272 Assert(!(off & 7) || !(off & 3));
3273
3274 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3275 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3276 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3277
3278 /** @todo IOMMU: fine-grained locking? */
3279 uint64_t uReg;
3280 switch (off)
3281 {
3282 case IOMMU_MMIO_OFF_DEV_TAB_BAR: uReg = pThis->aDevTabBaseAddrs[0].u64; break;
3283 case IOMMU_MMIO_OFF_CMD_BUF_BAR: uReg = pThis->CmdBufBaseAddr.u64; break;
3284 case IOMMU_MMIO_OFF_EVT_LOG_BAR: uReg = pThis->EvtLogBaseAddr.u64; break;
3285 case IOMMU_MMIO_OFF_CTRL: uReg = pThis->Ctrl.u64; break;
3286 case IOMMU_MMIO_OFF_EXCL_BAR: uReg = pThis->ExclRangeBaseAddr.u64; break;
3287 case IOMMU_MMIO_OFF_EXCL_RANGE_LIMIT: uReg = pThis->ExclRangeLimit.u64; break;
3288 case IOMMU_MMIO_OFF_EXT_FEAT: uReg = pThis->ExtFeat.u64; break;
3289
3290 case IOMMU_MMIO_OFF_PPR_LOG_BAR: uReg = pThis->PprLogBaseAddr.u64; break;
3291 case IOMMU_MMIO_OFF_HW_EVT_HI: uReg = pThis->HwEvtHi.u64; break;
3292 case IOMMU_MMIO_OFF_HW_EVT_LO: uReg = pThis->HwEvtLo; break;
3293 case IOMMU_MMIO_OFF_HW_EVT_STATUS: uReg = pThis->HwEvtStatus.u64; break;
3294
3295 case IOMMU_MMIO_OFF_GALOG_BAR: uReg = pThis->GALogBaseAddr.u64; break;
3296 case IOMMU_MMIO_OFF_GALOG_TAIL_ADDR: uReg = pThis->GALogTailAddr.u64; break;
3297
3298 case IOMMU_MMIO_OFF_PPR_LOG_B_BAR: uReg = pThis->PprLogBBaseAddr.u64; break;
3299 case IOMMU_MMIO_OFF_PPR_EVT_B_BAR: uReg = pThis->EvtLogBBaseAddr.u64; break;
3300
3301 case IOMMU_MMIO_OFF_DEV_TAB_SEG_1:
3302 case IOMMU_MMIO_OFF_DEV_TAB_SEG_2:
3303 case IOMMU_MMIO_OFF_DEV_TAB_SEG_3:
3304 case IOMMU_MMIO_OFF_DEV_TAB_SEG_4:
3305 case IOMMU_MMIO_OFF_DEV_TAB_SEG_5:
3306 case IOMMU_MMIO_OFF_DEV_TAB_SEG_6:
3307 case IOMMU_MMIO_OFF_DEV_TAB_SEG_7:
3308 {
3309 uint8_t const offDevTabSeg = (off - IOMMU_MMIO_OFF_DEV_TAB_SEG_FIRST) >> 3;
3310 uint8_t const idxDevTabSeg = offDevTabSeg + 1;
3311 Assert(idxDevTabSeg < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
3312 uReg = pThis->aDevTabBaseAddrs[idxDevTabSeg].u64;
3313 break;
3314 }
3315
3316 case IOMMU_MMIO_OFF_DEV_SPECIFIC_FEAT: uReg = pThis->DevSpecificFeat.u64; break;
3317 case IOMMU_MMIO_OFF_DEV_SPECIFIC_CTRL: uReg = pThis->DevSpecificCtrl.u64; break;
3318 case IOMMU_MMIO_OFF_DEV_SPECIFIC_STATUS: uReg = pThis->DevSpecificStatus.u64; break;
3319
3320 case IOMMU_MMIO_OFF_MSI_VECTOR_0: uReg = pThis->MsiMiscInfo.u64; break;
3321 case IOMMU_MMIO_OFF_MSI_VECTOR_1: uReg = pThis->MsiMiscInfo.au32[1]; break;
3322 case IOMMU_MMIO_OFF_MSI_CAP_HDR:
3323 {
3324 uint32_t const uMsiCapHdr = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
3325 uint32_t const uMsiAddrLo = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
3326 uReg = RT_MAKE_U64(uMsiCapHdr, uMsiAddrLo);
3327 break;
3328 }
3329 case IOMMU_MMIO_OFF_MSI_ADDR_LO:
3330 {
3331 uReg = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
3332 break;
3333 }
3334 case IOMMU_MMIO_OFF_MSI_ADDR_HI:
3335 {
3336 uint32_t const uMsiAddrHi = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI);
3337 uint32_t const uMsiData = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
3338 uReg = RT_MAKE_U64(uMsiAddrHi, uMsiData);
3339 break;
3340 }
3341 case IOMMU_MMIO_OFF_MSI_DATA:
3342 {
3343 uReg = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
3344 break;
3345 }
3346 case IOMMU_MMIO_OFF_MSI_MAPPING_CAP_HDR:
3347 {
3348 /*
3349 * The PCI spec. lists MSI Mapping Capability 08H as related to HyperTransport capability.
3350 * The AMD IOMMU spec. fails to mention it explicitly and lists values for this register as
3351 * though HyperTransport is supported. We don't support HyperTransport, we thus just return
3352 * 0 for this register.
3353 */
3354 uReg = RT_MAKE_U64(0, pThis->PerfOptCtrl.u32);
3355 break;
3356 }
3357
3358 case IOMMU_MMIO_OFF_PERF_OPT_CTRL: uReg = pThis->PerfOptCtrl.u32; break;
3359
3360 case IOMMU_MMIO_OFF_XT_GEN_INTR_CTRL: uReg = pThis->XtGenIntrCtrl.u64; break;
3361 case IOMMU_MMIO_OFF_XT_PPR_INTR_CTRL: uReg = pThis->XtPprIntrCtrl.u64; break;
3362 case IOMMU_MMIO_OFF_XT_GALOG_INT_CTRL: uReg = pThis->XtGALogIntrCtrl.u64; break;
3363
3364 case IOMMU_MMIO_OFF_MARC_APER_BAR_0: uReg = pThis->aMarcApers[0].Base.u64; break;
3365 case IOMMU_MMIO_OFF_MARC_APER_RELOC_0: uReg = pThis->aMarcApers[0].Reloc.u64; break;
3366 case IOMMU_MMIO_OFF_MARC_APER_LEN_0: uReg = pThis->aMarcApers[0].Length.u64; break;
3367 case IOMMU_MMIO_OFF_MARC_APER_BAR_1: uReg = pThis->aMarcApers[1].Base.u64; break;
3368 case IOMMU_MMIO_OFF_MARC_APER_RELOC_1: uReg = pThis->aMarcApers[1].Reloc.u64; break;
3369 case IOMMU_MMIO_OFF_MARC_APER_LEN_1: uReg = pThis->aMarcApers[1].Length.u64; break;
3370 case IOMMU_MMIO_OFF_MARC_APER_BAR_2: uReg = pThis->aMarcApers[2].Base.u64; break;
3371 case IOMMU_MMIO_OFF_MARC_APER_RELOC_2: uReg = pThis->aMarcApers[2].Reloc.u64; break;
3372 case IOMMU_MMIO_OFF_MARC_APER_LEN_2: uReg = pThis->aMarcApers[2].Length.u64; break;
3373 case IOMMU_MMIO_OFF_MARC_APER_BAR_3: uReg = pThis->aMarcApers[3].Base.u64; break;
3374 case IOMMU_MMIO_OFF_MARC_APER_RELOC_3: uReg = pThis->aMarcApers[3].Reloc.u64; break;
3375 case IOMMU_MMIO_OFF_MARC_APER_LEN_3: uReg = pThis->aMarcApers[3].Length.u64; break;
3376
3377 case IOMMU_MMIO_OFF_RSVD_REG: uReg = pThis->RsvdReg; break;
3378
3379 case IOMMU_MMIO_CMD_BUF_HEAD_PTR: uReg = pThis->CmdBufHeadPtr.u64; break;
3380 case IOMMU_MMIO_CMD_BUF_TAIL_PTR: uReg = pThis->CmdBufTailPtr.u64; break;
3381 case IOMMU_MMIO_EVT_LOG_HEAD_PTR: uReg = pThis->EvtLogHeadPtr.u64; break;
3382 case IOMMU_MMIO_EVT_LOG_TAIL_PTR: uReg = pThis->EvtLogTailPtr.u64; break;
3383
3384 case IOMMU_MMIO_OFF_STATUS: uReg = pThis->Status.u64; break;
3385
3386 case IOMMU_MMIO_OFF_PPR_LOG_HEAD_PTR: uReg = pThis->PprLogHeadPtr.u64; break;
3387 case IOMMU_MMIO_OFF_PPR_LOG_TAIL_PTR: uReg = pThis->PprLogTailPtr.u64; break;
3388
3389 case IOMMU_MMIO_OFF_GALOG_HEAD_PTR: uReg = pThis->GALogHeadPtr.u64; break;
3390 case IOMMU_MMIO_OFF_GALOG_TAIL_PTR: uReg = pThis->GALogTailPtr.u64; break;
3391
3392 case IOMMU_MMIO_OFF_PPR_LOG_B_HEAD_PTR: uReg = pThis->PprLogBHeadPtr.u64; break;
3393 case IOMMU_MMIO_OFF_PPR_LOG_B_TAIL_PTR: uReg = pThis->PprLogBTailPtr.u64; break;
3394
3395 case IOMMU_MMIO_OFF_EVT_LOG_B_HEAD_PTR: uReg = pThis->EvtLogBHeadPtr.u64; break;
3396 case IOMMU_MMIO_OFF_EVT_LOG_B_TAIL_PTR: uReg = pThis->EvtLogBTailPtr.u64; break;
3397
3398 case IOMMU_MMIO_OFF_PPR_LOG_AUTO_RESP: uReg = pThis->PprLogAutoResp.u64; break;
3399 case IOMMU_MMIO_OFF_PPR_LOG_OVERFLOW_EARLY: uReg = pThis->PprLogOverflowEarly.u64; break;
3400 case IOMMU_MMIO_OFF_PPR_LOG_B_OVERFLOW_EARLY: uReg = pThis->PprLogBOverflowEarly.u64; break;
3401
3402 /* Not implemented. */
3403 case IOMMU_MMIO_OFF_SMI_FLT_FIRST:
3404 case IOMMU_MMIO_OFF_SMI_FLT_LAST:
3405 {
3406 Log((IOMMU_LOG_PFX ": Reading unsupported register: SMI filter %u\n", (off - IOMMU_MMIO_OFF_SMI_FLT_FIRST) >> 3));
3407 uReg = 0;
3408 break;
3409 }
3410
3411 /* Unknown. */
3412 default:
3413 {
3414 Log((IOMMU_LOG_PFX ": Reading unknown register %u (%#x) -> 0\n", off, off));
3415 uReg = 0;
3416 return VINF_IOM_MMIO_UNUSED_00;
3417 }
3418 }
3419
3420 *puResult = uReg;
3421 return VINF_SUCCESS;
3422}
3423
3424
3425/**
3426 * Raises the MSI interrupt for the IOMMU device.
3427 *
3428 * @param pDevIns The IOMMU device instance.
3429 *
3430 * @thread Any.
3431 */
3432static void iommuAmdRaiseMsiInterrupt(PPDMDEVINS pDevIns)
3433{
3434 if (iommuAmdIsMsiEnabled(pDevIns))
3435 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
3436}
3437
3438
3439/**
3440 * Clears the MSI interrupt for the IOMMU device.
3441 *
3442 * @param pDevIns The IOMMU device instance.
3443 *
3444 * @thread Any.
3445 */
3446static void iommuAmdClearMsiInterrupt(PPDMDEVINS pDevIns)
3447{
3448 if (iommuAmdIsMsiEnabled(pDevIns))
3449 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
3450}
3451
3452
3453/**
3454 * Writes an entry to the event log in memory.
3455 *
3456 * @returns VBox status code.
3457 * @param pDevIns The IOMMU device instance.
3458 * @param pEvent The event to log.
3459 *
3460 * @thread Any.
3461 */
3462static int iommuAmdWriteEvtLogEntry(PPDMDEVINS pDevIns, PCEVT_GENERIC_T pEvent)
3463{
3464 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3465 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
3466
3467 /** @todo IOMMU: Consider locking here. */
3468
3469 /* Check if event logging is active and the log has not overflowed. */
3470 if ( Status.n.u1EvtLogRunning
3471 && !Status.n.u1EvtOverflow)
3472 {
3473 uint32_t const cbEvt = sizeof(*pEvent);
3474
3475 /* Get the offset we need to write the event to in memory (circular buffer offset). */
3476 uint32_t const offEvt = pThis->EvtLogTailPtr.n.off;
3477 Assert(!(offEvt & ~IOMMU_EVT_LOG_TAIL_PTR_VALID_MASK));
3478
3479 /* Ensure we have space in the event log. */
3480 uint32_t const cMaxEvts = iommuAmdGetBufMaxEntries(pThis->EvtLogBaseAddr.n.u4Len);
3481 uint32_t const cEvts = iommuAmdGetEvtLogEntryCount(pThis);
3482 if (cEvts + 1 < cMaxEvts)
3483 {
3484 /* Write the event log entry to memory. */
3485 RTGCPHYS const GCPhysEvtLog = pThis->EvtLogBaseAddr.n.u40Base << X86_PAGE_4K_SHIFT;
3486 RTGCPHYS const GCPhysEvtLogEntry = GCPhysEvtLog + offEvt;
3487 int rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhysEvtLogEntry, pEvent, cbEvt);
3488 if (RT_FAILURE(rc))
3489 Log((IOMMU_LOG_PFX ": Failed to write event log entry at %#RGp. rc=%Rrc\n", GCPhysEvtLogEntry, rc));
3490
3491 /* Increment the event log tail pointer. */
3492 uint32_t const cbEvtLog = iommuAmdGetBufLength(pThis->EvtLogBaseAddr.n.u4Len);
3493 pThis->EvtLogTailPtr.n.off = (offEvt + cbEvt) % cbEvtLog;
3494
3495 /* Indicate that an event log entry was written. */
3496 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_INTR);
3497
3498 /* Check and signal an interrupt if software wants to receive one when an event log entry is written. */
3499 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
3500 if (Ctrl.n.u1EvtIntrEn)
3501 iommuAmdRaiseMsiInterrupt(pDevIns);
3502 }
3503 else
3504 {
3505 /* Indicate that the event log has overflowed. */
3506 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_OVERFLOW);
3507
3508 /* Check and signal an interrupt if software wants to receive one when the event log has overflowed. */
3509 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
3510 if (Ctrl.n.u1EvtIntrEn)
3511 iommuAmdRaiseMsiInterrupt(pDevIns);
3512 }
3513 }
3514}
3515
3516
3517/**
3518 * Sets an event in the hardware error registers.
3519 *
3520 * @param pDevIns The IOMMU device instance.
3521 * @param pEvent The event.
3522 *
3523 * @thread Any.
3524 */
3525static void iommuAmdSetHwError(PPDMDEVINS pDevIns, PCEVT_GENERIC_T pEvent)
3526{
3527 /** @todo IOMMU: We should probably lock the device here */
3528 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3529 if (pThis->ExtFeat.n.u1HwErrorSup)
3530 {
3531 if (pThis->HwEvtStatus.n.u1Valid)
3532 pThis->HwEvtStatus.n.u1Overflow = 1;
3533 pThis->HwEvtStatus.n.u1Valid = 1;
3534 pThis->HwEvtHi.u64 = RT_MAKE_U64(pEvent->au32[0], pEvent->au32[1]);
3535 pThis->HwEvtLo = RT_MAKE_U64(pEvent->au32[2], pEvent->au32[3]);
3536 Assert(pThis->HwEvtHi.n.u4EvtCode == IOMMU_EVT_DEV_TAB_HW_ERROR);
3537 }
3538}
3539
3540
3541/**
3542 * Initializes a PAGE_TAB_HARDWARE_ERROR event.
3543 *
3544 * @param uDevId The device ID.
3545 * @param uDomainId The domain ID.
3546 * @param GCPhysPtEntity The system physical address of the page table
3547 * entity.
3548 * @param enmOp The IOMMU operation being performed.
3549 * @param pEvtPageTabHwErr Where to store the initialized event.
3550 */
3551static void iommuAmdInitPageTabHwErrorEvent(uint16_t uDevId, uint16_t uDomainId, RTGCPHYS GCPhysPtEntity, IOMMUOP enmOp,
3552 PEVT_PAGE_TAB_HW_ERR_T pEvtPageTabHwErr)
3553{
3554 memset(pEvtPageTabHwErr, 0, sizeof(*pEvtPageTabHwErr));
3555 pEvtPageTabHwErr->n.u16DevId = uDevId;
3556 pEvtPageTabHwErr->n.u16DomainOrPasidLo = uDomainId;
3557 //pEvtPageTabHwErr->n.u1GuestOrNested = 0;
3558 pEvtPageTabHwErr->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
3559 pEvtPageTabHwErr->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
3560 pEvtPageTabHwErr->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
3561 pEvtPageTabHwErr->n.u2Type = enmOp == IOMMUOP_CMD ? HWEVTTYPE_DATA_ERROR : HWEVTTYPE_TARGET_ABORT;;
3562 pEvtPageTabHwErr->n.u4EvtCode = IOMMU_EVT_PAGE_TAB_HW_ERROR;
3563 pEvtPageTabHwErr->n.u64Addr = GCPhysPtEntity;
3564}
3565
3566
3567/**
3568 * Raises a PAGE_TAB_HARDWARE_ERROR event.
3569 *
3570 * @param pDevIns The IOMMU device instance.
3571 * @param enmOp The IOMMU operation being performed.
3572 * @param pEvtPageTabHwErr The page table hardware error event.
3573 * @param enmEvtType The hardware error event type.
3574 */
3575static void iommuAmdRaisePageTabHwErrorEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PEVT_PAGE_TAB_HW_ERR_T pEvtPageTabHwErr,
3576 EVT_HW_ERR_TYPE_T enmEvtType)
3577{
3578 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_PAGE_TAB_HW_ERR_T));
3579 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtPageTabHwErr;
3580
3581 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
3582 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
3583 if (enmOp != IOMMUOP_CMD)
3584 iommuAmdSetPciTargetAbort(pDevIns);
3585
3586 Log((IOMMU_LOG_PFX ": Raised PAGE_TAB_HARDWARE_ERROR. uDevId=%#x uDomainId=%#x GCPhysPtEntity=%#RGp enmOp=%u enmType=%u\n",
3587 pEvtPageTabHwErr->n.u16DevId, pEvtPageTabHwErr->n.u16DomainOrPasidLo, pEvtPageTabHwErr->n.u64Addr, enmOp, enmEvtType));
3588 NOREF(enmEvtType);
3589}
3590
3591
3592/**
3593 * Initializes a DEV_TAB_HARDWARE_ERROR event.
3594 *
3595 * @param uDevId The device ID.
3596 * @param GCPhysDte The system physical address of the failed device table
3597 * access.
3598 * @param enmOp The IOMMU operation being performed.
3599 * @param pEvtDevTabHwErr Where to store the initialized event.
3600 */
3601static void iommuAmdInitDevTabHwErrorEvent(uint16_t uDevId, RTGCPHYS GCPhysDte, IOMMUOP enmOp,
3602 PEVT_DEV_TAB_HW_ERROR_T pEvtDevTabHwErr)
3603{
3604 memset(pEvtDevTabHwErr, 0, sizeof(*pEvtDevTabHwErr));
3605 pEvtDevTabHwErr->n.u16DevId = uDevId;
3606 pEvtDevTabHwErr->n.u1Intr = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
3607 /** @todo r=ramshankar: Any other transaction type that can set read/write bit? */
3608 pEvtDevTabHwErr->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
3609 pEvtDevTabHwErr->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
3610 pEvtDevTabHwErr->n.u2Type = enmOp == IOMMUOP_CMD ? HWEVTTYPE_DATA_ERROR : HWEVTTYPE_TARGET_ABORT;
3611 pEvtDevTabHwErr->n.u4EvtCode = IOMMU_EVT_DEV_TAB_HW_ERROR;
3612 pEvtDevTabHwErr->n.u64Addr = GCPhysDte;
3613}
3614
3615
3616/**
3617 * Raises a DEV_TAB_HARDWARE_ERROR event.
3618 *
3619 * @param pDevIns The IOMMU device instance.
3620 * @param enmOp The IOMMU operation being performed.
3621 * @param pEvtDevTabHwErr The device table hardware error event.
3622 * @param enmEvtType The hardware error event type.
3623 */
3624static void iommuAmdRaiseDevTabHwErrorEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PEVT_DEV_TAB_HW_ERROR_T pEvtDevTabHwErr,
3625 EVT_HW_ERR_TYPE_T enmEvtType)
3626{
3627 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_DEV_TAB_HW_ERROR_T));
3628 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtDevTabHwErr;
3629 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
3630 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
3631 if (enmOp != IOMMUOP_CMD)
3632 iommuAmdSetPciTargetAbort(pDevIns);
3633
3634 Log((IOMMU_LOG_PFX ": Raised DEV_TAB_HARDWARE_ERROR. uDevId=%#x GCPhysDte=%#RGp enmOp=%u enmType=%u\n",
3635 pEvtDevTabHwErr->n.u16DevId, pEvtDevTabHwErr->n.u64Addr, enmOp, enmEvtType));
3636 NOREF(enmEvtType);
3637}
3638
3639
3640/**
3641 * Initializes an ILLEGAL_DEV_TABLE_ENTRY event.
3642 *
3643 * @param uDevId The device ID.
3644 * @param uIova The I/O virtual address.
3645 * @param fRsvdNotZero Whether reserved bits are not zero. Pass @c false if the
3646 * event was caused by an invalid level encoding in the
3647 * DTE.
3648 * @param enmOp The IOMMU operation being performed.
3649 * @param pEvtIllegalDte Where to store the initialized event.
3650 */
3651static void iommuAmdInitIllegalDteEvent(uint16_t uDevId, uint64_t uIova, bool fRsvdNotZero, IOMMUOP enmOp,
3652 PEVT_ILLEGAL_DTE_T pEvtIllegalDte)
3653{
3654 memset(pEvtIllegalDte, 0, sizeof(*pEvtIllegalDte));
3655 pEvtIllegalDte->n.u16DevId = uDevId;
3656 pEvtIllegalDte->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
3657 pEvtIllegalDte->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
3658 pEvtIllegalDte->n.u1RsvdNotZero = fRsvdNotZero;
3659 pEvtIllegalDte->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
3660 pEvtIllegalDte->n.u4EvtCode = IOMMU_EVT_ILLEGAL_DEV_TAB_ENTRY;
3661 pEvtIllegalDte->n.u64Addr = uIova & ~UINT64_C(0x3);
3662 /** @todo r=ramshankar: Not sure why the last 2 bits are marked as reserved by the
3663 * IOMMU spec here but not for this field for I/O page fault event. */
3664 Assert(!(uIova & UINT64_C(0x3)));
3665}
3666
3667
3668/**
3669 * Raises an ILLEGAL_DEV_TABLE_ENTRY event.
3670 *
3671 * @param pDevIns The IOMMU instance data.
3672 * @param enmOp The IOMMU operation being performed.
3673 * @param pEvtIllegalDte The illegal device table entry event.
3674 * @param enmEvtType The illegal DTE event type.
3675 */
3676static void iommuAmdRaiseIllegalDteEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PCEVT_ILLEGAL_DTE_T pEvtIllegalDte,
3677 EVT_ILLEGAL_DTE_TYPE_T enmEvtType)
3678{
3679 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_ILLEGAL_DTE_T));
3680 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIllegalDte;
3681 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
3682 if (enmOp != IOMMUOP_CMD)
3683 iommuAmdSetPciTargetAbort(pDevIns);
3684
3685 Log((IOMMU_LOG_PFX ": Raised ILLEGAL_DTE_EVENT. uDevId=%#x uIova=%#RX64 enmOp=%u enmEvtType=%u\n", pEvtIllegalDte->n.u16DevId,
3686 pEvtIllegalDte->n.u64Addr, enmOp, enmEvtType));
3687 NOREF(enmEvtType);
3688}
3689
3690
3691/**
3692 * Initializes an IO_PAGE_FAULT event.
3693 *
3694 * @param uDevId The device ID.
3695 * @param uDomainId The domain ID.
3696 * @param uIova The I/O virtual address being accessed.
3697 * @param fPresent Transaction to a page marked as present (including
3698 * DTE.V=1) or interrupt marked as remapped
3699 * (IRTE.RemapEn=1).
3700 * @param fRsvdNotZero Whether reserved bits are not zero. Pass @c false if
3701 * the I/O page fault was caused by invalid level
3702 * encoding.
3703 * @param fPermDenied Permission denied for the address being accessed.
3704 * @param enmOp The IOMMU operation being performed.
3705 * @param pEvtIoPageFault Where to store the initialized event.
3706 */
3707static void iommuAmdInitIoPageFaultEvent(uint16_t uDevId, uint16_t uDomainId, uint64_t uIova, bool fPresent, bool fRsvdNotZero,
3708 bool fPermDenied, IOMMUOP enmOp, PEVT_IO_PAGE_FAULT_T pEvtIoPageFault)
3709{
3710 Assert(!fPermDenied || fPresent);
3711 memset(pEvtIoPageFault, 0, sizeof(*pEvtIoPageFault));
3712 pEvtIoPageFault->n.u16DevId = uDevId;
3713 //pEvtIoPageFault->n.u4PasidHi = 0;
3714 pEvtIoPageFault->n.u16DomainOrPasidLo = uDomainId;
3715 //pEvtIoPageFault->n.u1GuestOrNested = 0;
3716 //pEvtIoPageFault->n.u1NoExecute = 0;
3717 //pEvtIoPageFault->n.u1User = 0;
3718 pEvtIoPageFault->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
3719 pEvtIoPageFault->n.u1Present = fPresent;
3720 pEvtIoPageFault->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
3721 pEvtIoPageFault->n.u1PermDenied = fPermDenied;
3722 pEvtIoPageFault->n.u1RsvdNotZero = fRsvdNotZero;
3723 pEvtIoPageFault->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
3724 pEvtIoPageFault->n.u4EvtCode = IOMMU_EVT_IO_PAGE_FAULT;
3725 pEvtIoPageFault->n.u64Addr = uIova;
3726}
3727
3728
3729/**
3730 * Raises an IO_PAGE_FAULT event.
3731 *
3732 * @param pDevIns The IOMMU instance data.
3733 * @param pDte The device table entry. Optional, can be NULL
3734 * depending on @a enmOp.
3735 * @param pIrte The interrupt remapping table entry. Optional, can
3736 * be NULL depending on @a enmOp.
3737 * @param enmOp The IOMMU operation being performed.
3738 * @param pEvtIoPageFault The I/O page fault event.
3739 * @param enmEvtType The I/O page fault event type.
3740 *
3741 * @thread Any.
3742 */
3743static void iommuAmdRaiseIoPageFaultEvent(PPDMDEVINS pDevIns, PCDTE_T pDte, PCIRTE_T pIrte, IOMMUOP enmOp,
3744 PCEVT_IO_PAGE_FAULT_T pEvtIoPageFault, EVT_IO_PAGE_FAULT_TYPE_T enmEvtType)
3745{
3746 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_IO_PAGE_FAULT_T));
3747 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIoPageFault;
3748
3749 bool fSuppressEvtLogging = false;
3750 if ( enmOp == IOMMUOP_MEM_READ
3751 || enmOp == IOMMUOP_MEM_WRITE)
3752 {
3753 if ( pDte
3754 && pDte->n.u1Valid)
3755 {
3756 fSuppressEvtLogging = pDte->n.u1SuppressAllPfEvents;
3757 /** @todo IOMMU: Implement DTE.SE bit, i.e. device ID specific I/O page fault
3758 * suppression. Perhaps will be possible when we complete IOTLB/cache
3759 * handling. */
3760 }
3761 }
3762 else if (enmOp == IOMMUOP_INTR_REQ)
3763 {
3764 if ( pDte
3765 && pDte->n.u1IntrMapValid)
3766 fSuppressEvtLogging = !pDte->n.u1IgnoreUnmappedIntrs;
3767
3768 if ( !fSuppressEvtLogging
3769 && pIrte)
3770 fSuppressEvtLogging = pIrte->n.u1SuppressPf;
3771 }
3772 /* else: Events are never suppressed for commands. */
3773
3774 switch (enmEvtType)
3775 {
3776 case kIoPageFaultType_PermDenied:
3777 {
3778 /* Cannot be triggered by a command. */
3779 Assert(enmOp != IOMMUOP_CMD);
3780 RT_FALL_THRU();
3781 }
3782 case kIoPageFaultType_DteRsvdPagingMode:
3783 case kIoPageFaultType_PteInvalidPageSize:
3784 case kIoPageFaultType_PteInvalidLvlEncoding:
3785 case kIoPageFaultType_SkippedLevelIovaNotZero:
3786 case kIoPageFaultType_PteRsvdNotZero:
3787 case kIoPageFaultType_PteValidNotSet:
3788 case kIoPageFaultType_DteTranslationDisabled:
3789 case kIoPageFaultType_PasidInvalidRange:
3790 {
3791 /*
3792 * For a translation request, the IOMMU doesn't signal an I/O page fault nor does it
3793 * create an event log entry. See AMD spec. 2.1.3.2 "I/O Page Faults".
3794 */
3795 if (enmOp != IOMMUOP_TRANSLATE_REQ)
3796 {
3797 if (!fSuppressEvtLogging)
3798 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
3799 if (enmOp != IOMMUOP_CMD)
3800 iommuAmdSetPciTargetAbort(pDevIns);
3801 }
3802 break;
3803 }
3804
3805 case kIoPageFaultType_UserSupervisor:
3806 {
3807 /* Access is blocked and only creates an event log entry. */
3808 if (!fSuppressEvtLogging)
3809 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
3810 break;
3811 }
3812
3813 case kIoPageFaultType_IrteAddrInvalid:
3814 case kIoPageFaultType_IrteRsvdNotZero:
3815 case kIoPageFaultType_IrteRemapEn:
3816 case kIoPageFaultType_IrteRsvdIntType:
3817 case kIoPageFaultType_IntrReqAborted:
3818 case kIoPageFaultType_IntrWithPasid:
3819 {
3820 /* Only trigerred by interrupt requests. */
3821 Assert(enmOp == IOMMUOP_INTR_REQ);
3822 if (!fSuppressEvtLogging)
3823 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
3824 iommuAmdSetPciTargetAbort(pDevIns);
3825 break;
3826 }
3827
3828 case kIoPageFaultType_SmiFilterMismatch:
3829 {
3830 /* Not supported and probably will never be, assert. */
3831 AssertMsgFailed(("kIoPageFaultType_SmiFilterMismatch - Upstream SMI requests not supported/implemented."));
3832 break;
3833 }
3834
3835 case kIoPageFaultType_DevId_Invalid:
3836 {
3837 /* Cannot be triggered by a command. */
3838 Assert(enmOp != IOMMUOP_CMD);
3839 Assert(enmOp != IOMMUOP_TRANSLATE_REQ); /** @todo IOMMU: We don't support translation requests yet. */
3840 if (!fSuppressEvtLogging)
3841 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
3842 if ( enmOp == IOMMUOP_MEM_READ
3843 || enmOp == IOMMUOP_MEM_WRITE)
3844 iommuAmdSetPciTargetAbort(pDevIns);
3845 break;
3846 }
3847 }
3848}
3849
3850
3851/**
3852 * Initializes an IOTLB entry.
3853 *
3854 * @param GCPhysSpa The translated system physical address.
3855 * @param cShift The number of offset bits in the system physical address.
3856 * @param fIoPerm The I/O access permissions (IOMMU_IO_PERM_XXX).
3857 * @param pIotlbe Where to store the initialized IOTLB entry.
3858 */
3859static void iommuAmdInitIotlbe(RTGCPHYS GCPhysSpa, uint8_t cShift, uint8_t fIoPerm, PIOTLBE_T pIotlbe)
3860{
3861 pIotlbe->uMagic = IOMMU_IOTLBE_MAGIC;
3862 pIotlbe->uRsvd0 = 0;
3863 pIotlbe->fIoPerm = fIoPerm;
3864 pIotlbe->cShift = cShift;
3865 pIotlbe->GCPhysSpa = GCPhysSpa;
3866}
3867
3868
3869/**
3870 * Updates an IOTLB entry.
3871 *
3872 * @param GCPhysSpa The translated system physical address.
3873 * @param cShift The number of offset bits in the system physical address.
3874 * @param fIoPerm The I/O access permissions (IOMMU_IO_PERM_XXX).
3875 * @param pIotlbe The IOTLB entry to update.
3876 */
3877static void iommuAmdUpdateIotlbe(RTGCPHYS GCPhysSpa, uint8_t cShift, uint8_t fIoPerm, PIOTLBE_T pIotlbe)
3878{
3879 Assert(pIotlbe->uMagic == IOMMU_IOTLBE_MAGIC);
3880 pIotlbe->fIoPerm = fIoPerm;
3881 pIotlbe->cShift = cShift;
3882 pIotlbe->GCPhysSpa = GCPhysSpa;
3883}
3884
3885
3886/**
3887 * Returns whether the I/O virtual address is to be excluded from translation and
3888 * permission checks.
3889 *
3890 * @returns @c true if the DVA is excluded, @c false otherwise.
3891 * @param pThis The IOMMU device state.
3892 * @param pDte The device table entry.
3893 * @param uIova The I/O virtual address.
3894 *
3895 * @remarks Ensure the exclusion range is enabled prior to calling this function.
3896 *
3897 * @thread Any.
3898 */
3899static bool iommuAmdIsDvaInExclRange(PCIOMMU pThis, PCDTE_T pDte, uint64_t uIova)
3900{
3901 /* Ensure the exclusion range is enabled. */
3902 Assert(pThis->ExclRangeBaseAddr.n.u1ExclEnable);
3903
3904 /* Check if the IOVA falls within the exclusion range. */
3905 uint64_t const uIovaExclFirst = pThis->ExclRangeBaseAddr.n.u40ExclRangeBase << X86_PAGE_4K_SHIFT;
3906 uint64_t const uIovaExclLast = pThis->ExclRangeLimit.n.u52ExclLimit;
3907 if (uIovaExclLast - uIova >= uIovaExclFirst)
3908 {
3909 /* Check if device access to addresses in the exclusion range can be forwarded untranslated. */
3910 if ( pThis->ExclRangeBaseAddr.n.u1AllowAll
3911 || pDte->n.u1AllowExclusion)
3912 return true;
3913 }
3914 return false;
3915}
3916
3917
3918/**
3919 * Reads a device table entry from guest memory given the device ID.
3920 *
3921 * @returns VBox status code.
3922 * @param pDevIns The IOMMU device instance.
3923 * @param uDevId The device ID.
3924 * @param enmOp The IOMMU operation being performed.
3925 * @param pDte Where to store the device table entry.
3926 *
3927 * @thread Any.
3928 */
3929static int iommuAmdReadDte(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PDTE_T pDte)
3930{
3931 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3932 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
3933
3934 uint8_t const idxSegsEn = Ctrl.n.u3DevTabSegEn;
3935 Assert(idxSegsEn < RT_ELEMENTS(g_auDevTabSegMasks));
3936
3937 uint8_t const idxSeg = uDevId & g_auDevTabSegMasks[idxSegsEn] >> 13;
3938 Assert(idxSeg < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
3939
3940 RTGCPHYS const GCPhysDevTab = pThis->aDevTabBaseAddrs[idxSeg].n.u40Base << X86_PAGE_4K_SHIFT;
3941 uint16_t const offDte = uDevId & ~g_auDevTabSegMasks[idxSegsEn];
3942 RTGCPHYS const GCPhysDte = GCPhysDevTab + offDte;
3943
3944 Assert(!(GCPhysDevTab & X86_PAGE_4K_OFFSET_MASK));
3945 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysDte, pDte, sizeof(*pDte));
3946 if (RT_FAILURE(rc))
3947 {
3948 Log((IOMMU_LOG_PFX ": Failed to read device table entry at %#RGp. rc=%Rrc -> DevTabHwError\n", GCPhysDte, rc));
3949
3950 EVT_DEV_TAB_HW_ERROR_T EvtDevTabHwErr;
3951 iommuAmdInitDevTabHwErrorEvent(uDevId, GCPhysDte, enmOp, &EvtDevTabHwErr);
3952 iommuAmdRaiseDevTabHwErrorEvent(pDevIns, enmOp, &EvtDevTabHwErr, kHwErrType_TargetAbort);
3953 return VERR_IOMMU_IPE_1;
3954 }
3955
3956 return rc;
3957}
3958
3959
3960/**
3961 * Walks the I/O page table(s) to translate the I/O virtual address to a system
3962 * physical address.
3963 *
3964 * @returns VBox status code.
3965 * @param pDevIns The IOMMU device instance.
3966 * @param uIova The I/O virtual address to translate. Must be 4K aligned!
3967 * @param uDevId The device ID.
3968 * @param fAccess The access permissions (IOMMU_IO_PERM_XXX). This is the
3969 * permissions for the access being made.
3970 * @param pDte The device table entry.
3971 * @param enmOp The IOMMU operation being performed.
3972 * @param pIotlbe The IOTLB entry to update with the results of the
3973 * translation.
3974 *
3975 * @thread Any.
3976 */
3977static int iommuAmdWalkIoPageTables(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, uint8_t fAccess, PCDTE_T pDte,
3978 IOMMUOP enmOp, PIOTLBE_T pIotlbe)
3979{
3980 Assert(pDte->n.u1Valid);
3981 /* The input I/O virtual address must be 4K page aligned. */
3982 Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK));
3983
3984 /* If the translation is not valid, raise an I/O page fault. */
3985 if (pDte->n.u1TranslationValid)
3986 { /* likely */ }
3987 else
3988 {
3989 /** @todo r=ramshankar: The AMD IOMMU spec. says page walk is terminated but
3990 * doesn't explicitly say whether an I/O page fault is raised. From other
3991 * places in the spec. it seems early page walk terminations (starting with
3992 * the DTE) return the state computed so far and raises an I/O page fault. So
3993 * returning an invalid translation rather than skipping translation. */
3994 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
3995 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, false /* fPresent */, false /* fRsvdNotZero */,
3996 false /* fPermDenied */, enmOp, &EvtIoPageFault);
3997 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
3998 kIoPageFaultType_DteTranslationDisabled);
3999 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4000 }
4001
4002 /* If the root page table level is 0, translation is skipped and access is controlled by the permission bits. */
4003 uint8_t const uMaxLevel = pDte->n.u3Mode;
4004 if (uMaxLevel == 0)
4005 {
4006 uint8_t const fDtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
4007 if ((fAccess & fDtePerm) != fAccess)
4008 {
4009 Log((IOMMU_LOG_PFX ": Access denied for IOVA (%#RX64). fAccess=%#x fDtePerm=%#x\n", uIova, fAccess, fDtePerm));
4010 return VERR_IOMMU_ADDR_ACCESS_DENIED;
4011 }
4012 iommuAmdUpdateIotlbe(uIova, 0 /* cShift */, fDtePerm, pIotlbe);
4013 return VINF_SUCCESS;
4014 }
4015
4016 /* If the root page table level exceeds the allowed host-address translation level, page walk is terminated. */
4017 if (uMaxLevel > IOMMU_MAX_HOST_PT_LEVEL)
4018 {
4019 /** @todo r=ramshankar: I cannot make out from the AMD IOMMU spec. if I should be
4020 * raising an ILLEGAL_DEV_TABLE_ENTRY event or an IO_PAGE_FAULT event here.
4021 * I'm just going with I/O page fault. */
4022 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4023 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
4024 false /* fPermDenied */, enmOp, &EvtIoPageFault);
4025 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
4026 kIoPageFaultType_PteInvalidLvlEncoding);
4027 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4028 }
4029
4030 /* Check permissions bits of the root page table. */
4031 uint8_t const fPtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
4032 if ((fAccess & fPtePerm) != fAccess)
4033 {
4034 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4035 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
4036 true /* fPermDenied */, enmOp, &EvtIoPageFault);
4037 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
4038 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4039 }
4040
4041 /** @todo IOMMU: Split the rest of this into a separate function called
4042 * iommuAmdWalkIoPageDirectory() and call it for multi-page accesses. We can
4043 * avoid re-checking the DTE root-page table entry every time. */
4044
4045 /* The virtual address bits indexing table. */
4046 static uint8_t const s_acIovaLevelShifts[] = { 0, 12, 21, 30, 39, 48, 57, 0 };
4047 static uint64_t const s_auIovaLevelMasks[] = { UINT64_C(0x0000000000000000),
4048 UINT64_C(0x00000000001ff000),
4049 UINT64_C(0x000000003fe00000),
4050 UINT64_C(0x0000007fc0000000),
4051 UINT64_C(0x0000ff8000000000),
4052 UINT64_C(0x01ff000000000000),
4053 UINT64_C(0xfe00000000000000),
4054 UINT64_C(0x0000000000000000) };
4055 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) == RT_ELEMENTS(s_auIovaLevelMasks));
4056 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) > IOMMU_MAX_HOST_PT_LEVEL);
4057
4058 /* Traverse the I/O page table starting with the page directory in the DTE. */
4059 IOPTENTITY_T PtEntity;
4060 PtEntity.u64 = pDte->au64[0];
4061 for (;;)
4062 {
4063 /* Figure out the system physical address of the page table at the current level. */
4064 uint8_t const uLevel = PtEntity.n.u3NextLevel;
4065
4066 /* Read the page table entity at the current level. */
4067 {
4068 Assert(uLevel > 0 && uLevel < RT_ELEMENTS(s_acIovaLevelShifts));
4069 Assert(uLevel <= IOMMU_MAX_HOST_PT_LEVEL);
4070 uint16_t const idxPte = (uIova >> s_acIovaLevelShifts[uLevel]) & UINT64_C(0x1ff);
4071 uint64_t const offPte = idxPte << 3;
4072 RTGCPHYS const GCPhysPtEntity = (PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK) + offPte;
4073 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysPtEntity, &PtEntity.u64, sizeof(PtEntity));
4074 if (RT_FAILURE(rc))
4075 {
4076 Log((IOMMU_LOG_PFX ": Failed to read page table entry at %#RGp. rc=%Rrc -> PageTabHwError\n", GCPhysPtEntity, rc));
4077 EVT_PAGE_TAB_HW_ERR_T EvtPageTabHwErr;
4078 iommuAmdInitPageTabHwErrorEvent(uDevId, pDte->n.u16DomainId, GCPhysPtEntity, enmOp, &EvtPageTabHwErr);
4079 iommuAmdRaisePageTabHwErrorEvent(pDevIns, enmOp, &EvtPageTabHwErr, kHwErrType_TargetAbort);
4080 return VERR_IOMMU_IPE_2;
4081 }
4082 }
4083
4084 /* Check present bit. */
4085 if (PtEntity.n.u1Present)
4086 { /* likely */ }
4087 else
4088 {
4089 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4090 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, false /* fPresent */, false /* fRsvdNotZero */,
4091 false /* fPermDenied */, enmOp, &EvtIoPageFault);
4092 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
4093 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4094 }
4095
4096 /* Check permission bits. */
4097 uint8_t const fPtePerm = (PtEntity.u64 >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
4098 if ((fAccess & fPtePerm) == fAccess)
4099 { /* likely */ }
4100 else
4101 {
4102 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4103 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
4104 true /* fPermDenied */, enmOp, &EvtIoPageFault);
4105 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
4106 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4107 }
4108
4109 /* If this is a PTE, we're at the final level and we're done. */
4110 uint8_t const uNextLevel = PtEntity.n.u3NextLevel;
4111 if (uNextLevel == 0)
4112 {
4113 /* The page size of the translation is the default (4K). */
4114 pIotlbe->GCPhysSpa = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
4115 pIotlbe->cShift = X86_PAGE_4K_SHIFT;
4116 pIotlbe->fIoPerm = fPtePerm;
4117 return VINF_SUCCESS;
4118 }
4119 if (uNextLevel == 7)
4120 {
4121 /* The default page size of the translation is overriden. */
4122 RTGCPHYS const GCPhysPte = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
4123 uint8_t cShift = X86_PAGE_4K_SHIFT;
4124 while (GCPhysPte & RT_BIT_64(cShift++))
4125 ;
4126
4127 /* The page size must be larger than the default size and lower than the default size of the higher level. */
4128 Assert(uLevel < IOMMU_MAX_HOST_PT_LEVEL); /* PTE at level 6 handled outside the loop, uLevel should be <= 5. */
4129 if ( cShift > s_acIovaLevelShifts[uLevel]
4130 && cShift < s_acIovaLevelShifts[uLevel + 1])
4131 {
4132 pIotlbe->GCPhysSpa = GCPhysPte;
4133 pIotlbe->cShift = cShift;
4134 pIotlbe->fIoPerm = fPtePerm;
4135 return VINF_SUCCESS;
4136 }
4137
4138 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4139 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
4140 false /* fPermDenied */, enmOp, &EvtIoPageFault);
4141 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
4142 kIoPageFaultType_PteInvalidPageSize);
4143 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4144 }
4145
4146 /* Validate the next level encoding of the PDE. */
4147#if IOMMU_MAX_HOST_PT_LEVEL < 6
4148 if (uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL)
4149 { /* likely */ }
4150 else
4151 {
4152 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4153 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
4154 false /* fPermDenied */, enmOp, &EvtIoPageFault);
4155 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
4156 kIoPageFaultType_PteInvalidLvlEncoding);
4157 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4158 }
4159#else
4160 Assert(uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL);
4161#endif
4162
4163 /* Validate level transition. */
4164 if (uNextLevel < uLevel)
4165 { /* likely */ }
4166 else
4167 {
4168 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4169 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
4170 false /* fPermDenied */, enmOp, &EvtIoPageFault);
4171 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
4172 kIoPageFaultType_PteInvalidLvlEncoding);
4173 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4174 }
4175
4176 /* Ensure IOVA bits of skipped levels are zero. */
4177 Assert(uLevel > 0);
4178 uint64_t uIovaSkipMask = 0;
4179 for (unsigned idxLevel = uLevel - 1; idxLevel > uNextLevel; idxLevel--)
4180 uIovaSkipMask |= s_auIovaLevelMasks[idxLevel];
4181 if (!(uIova & uIovaSkipMask))
4182 { /* likely */ }
4183 else
4184 {
4185 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
4186 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
4187 false /* fPermDenied */, enmOp, &EvtIoPageFault);
4188 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
4189 kIoPageFaultType_SkippedLevelIovaNotZero);
4190 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4191 }
4192
4193 /* Continue with traversing the page directory at this level. */
4194 }
4195
4196 /* Shouldn't really get here. */
4197 return VERR_IOMMU_IPE_3;
4198}
4199
4200
4201/**
4202 * Looks up an I/O virtual address from the device table(s).
4203 *
4204 * @returns VBox status code.
4205 * @param pDevIns The IOMMU instance data.
4206 * @param uDevId The device ID.
4207 * @param uIova The I/O virtual address to lookup.
4208 * @param cbAccess The size of the access.
4209 * @param fAccess The access permissions (IOMMU_IO_PERM_XXX). This is the
4210 * permissions for the access being made.
4211 * @param enmOp The IOMMU operation being performed.
4212 * @param pGCPhysSpa Where to store the translated system physical address. Only
4213 * valid when translation succeeds and VINF_SUCCESS is
4214 * returned!
4215 *
4216 * @thread Any.
4217 */
4218static int iommuAmdLookupDeviceTables(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbAccess, uint8_t fAccess,
4219 IOMMUOP enmOp, PRTGCPHYS pGCPhysSpa)
4220{
4221 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4222
4223 /* Read the device table entry from memory. */
4224 DTE_T Dte;
4225 int rc = iommuAmdReadDte(pDevIns, uDevId, enmOp, &Dte);
4226 if (RT_SUCCESS(rc))
4227 {
4228 /* If the DTE is not valid, addresses are forwarded without translation */
4229 if (Dte.n.u1Valid)
4230 { /* likely */ }
4231 else
4232 {
4233 *pGCPhysSpa = uIova;
4234 return VINF_SUCCESS;
4235 }
4236
4237 /* Validate bits 127:0 of the device table entry when DTE.V is 1. */
4238 uint64_t const fRsvd0 = Dte.au64[0] & ~(IOMMU_DTE_QWORD_0_VALID_MASK & ~IOMMU_DTE_QWORD_0_FEAT_MASK);
4239 uint64_t const fRsvd1 = Dte.au64[1] & ~(IOMMU_DTE_QWORD_1_VALID_MASK & ~IOMMU_DTE_QWORD_1_FEAT_MASK);
4240 if (RT_LIKELY( !fRsvd0
4241 && !fRsvd1))
4242 { /* likely */ }
4243 else
4244 {
4245 Log((IOMMU_LOG_PFX ": Invalid reserved bits in DTE (u64[0]=%#RX64 u64[1]=%#RX64) -> Illegal DTE\n", fRsvd0, fRsvd1));
4246 EVT_ILLEGAL_DTE_T Event;
4247 iommuAmdInitIllegalDteEvent(uDevId, uIova, true /* fRsvdNotZero */, enmOp, &Event);
4248 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdNotZero);
4249 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4250 }
4251
4252 /* If the IOVA is subject to address exclusion, addresses are forwarded without translation. */
4253 if ( !pThis->ExclRangeBaseAddr.n.u1ExclEnable
4254 || !iommuAmdIsDvaInExclRange(pThis, &Dte, uIova))
4255 { /* likely */ }
4256 else
4257 {
4258 *pGCPhysSpa = uIova;
4259 return VINF_SUCCESS;
4260 }
4261
4262 /** @todo IOMMU: Perhaps do the <= 4K access case first, if the generic loop
4263 * below gets too expensive and when we have iommuAmdWalkIoPageDirectory. */
4264
4265 IOTLBE_T Iotlbe;
4266 iommuAmdInitIotlbe(NIL_RTGCPHYS, 0 /* cShift */, IOMMU_IO_PERM_NONE, &Iotlbe);
4267
4268 uint64_t cbChecked = 0;
4269 uint64_t uBaseIova = uIova & X86_PAGE_4K_BASE_MASK;
4270 for (;;)
4271 {
4272 /* Walk the I/O page tables to translate and get permission bits for the IOVA access. */
4273 rc = iommuAmdWalkIoPageTables(pDevIns, uDevId, uBaseIova, fAccess, &Dte, enmOp, &Iotlbe);
4274 if (RT_SUCCESS(rc))
4275 {
4276 /* Record the translated base address (before continuing to check permission bits of any subsequent pages). */
4277 if (cbChecked == 0)
4278 *pGCPhysSpa = Iotlbe.GCPhysSpa;
4279
4280 /** @todo IOMMU: Split large pages into 4K IOTLB entries and add to IOTLB cache. */
4281
4282 uint64_t const cbPhysPage = UINT64_C(1) << Iotlbe.cShift;
4283 cbChecked += cbPhysPage;
4284 if (cbChecked >= cbAccess)
4285 break;
4286 uBaseIova += cbPhysPage;
4287 }
4288 else
4289 {
4290 Log((IOMMU_LOG_PFX ": I/O page table walk failed. uIova=%#RX64 uBaseIova=%#RX64 fAccess=%u rc=%Rrc\n",
4291 uIova, uBaseIova, fAccess, rc));
4292 *pGCPhysSpa = NIL_RTGCPHYS;
4293 return rc;
4294 }
4295 }
4296
4297 return rc;
4298 }
4299
4300 Log((IOMMU_LOG_PFX ": Failed to read device table entry. uDevId=%#x rc=%Rrc\n", uDevId, rc));
4301 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
4302}
4303
4304
4305/**
4306 * Memory read request from a device.
4307 *
4308 * @returns VBox status code.
4309 * @param pDevIns The IOMMU device instance.
4310 * @param uDevId The device ID (bus, device, function).
4311 * @param uIova The I/O virtual address being read.
4312 * @param cbRead The number of bytes being read.
4313 * @param pGCPhysSpa Where to store the translated system physical address.
4314 *
4315 * @thread Any.
4316 */
4317static int iommuAmdDeviceMemRead(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbRead, PRTGCPHYS pGCPhysSpa)
4318{
4319 Assert(pDevIns);
4320 Assert(pGCPhysSpa);
4321 Assert(cbRead > 0);
4322
4323 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4324
4325 /* Addresses are forwarded without translation when the IOMMU is disabled. */
4326 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
4327 if (Ctrl.n.u1IommuEn)
4328 {
4329 /** @todo IOMMU: IOTLB cache lookup. */
4330
4331 /* Lookup the IOVA from the device tables. */
4332 return iommuAmdLookupDeviceTables(pDevIns, uDevId, uIova, cbRead, IOMMU_IO_PERM_READ, IOMMUOP_MEM_READ, pGCPhysSpa);
4333 }
4334
4335 *pGCPhysSpa = uIova;
4336 return VINF_SUCCESS;
4337}
4338
4339
4340/**
4341 * Memory write request from a device.
4342 *
4343 * @returns VBox status code.
4344 * @param pDevIns The IOMMU device instance.
4345 * @param uDevId The device ID (bus, device, function).
4346 * @param uIova The I/O virtual address being written.
4347 * @param cbWrite The number of bytes being written.
4348 * @param pGCPhysSpa Where to store the translated physical address.
4349 *
4350 * @thread Any.
4351 */
4352static int iommuAmdDeviceMemWrite(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbWrite, PRTGCPHYS pGCPhysSpa)
4353{
4354 RT_NOREF(pDevIns, uDevId, uIova, cbWrite, pGCPhysSpa);
4355 return VERR_NOT_IMPLEMENTED;
4356}
4357
4358
4359/**
4360 * @callback_method_impl{FNIOMMMIONEWWRITE}
4361 */
4362static DECLCALLBACK(VBOXSTRICTRC) iommuAmdMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
4363{
4364 NOREF(pvUser);
4365 Assert(cb == 4 || cb == 8);
4366 Assert(!(off & (cb - 1)));
4367
4368 uint64_t const uValue = cb == 8 ? *(uint64_t const *)pv : *(uint32_t const *)pv;
4369 return iommuAmdWriteRegister(pDevIns, off, cb, uValue);
4370}
4371
4372
4373/**
4374 * @callback_method_impl{FNIOMMMIONEWREAD}
4375 */
4376static DECLCALLBACK(VBOXSTRICTRC) iommuAmdMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
4377{
4378 NOREF(pvUser);
4379 Assert(cb == 4 || cb == 8);
4380 Assert(!(off & (cb - 1)));
4381
4382 uint64_t uResult;
4383 VBOXSTRICTRC rcStrict = iommuAmdReadRegister(pDevIns, off, &uResult);
4384 if (cb == 8)
4385 *(uint64_t *)pv = uResult;
4386 else
4387 *(uint32_t *)pv = (uint32_t)uResult;
4388
4389 return rcStrict;
4390}
4391
4392
4393# ifdef IN_RING3
4394/**
4395 * @callback_method_impl{FNPCICONFIGREAD}
4396 */
4397static DECLCALLBACK(VBOXSTRICTRC) iommuAmdR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress,
4398 unsigned cb, uint32_t *pu32Value)
4399{
4400 /** @todo IOMMU: PCI config read stat counter. */
4401 VBOXSTRICTRC rcStrict = PDMDevHlpPCIConfigRead(pDevIns, pPciDev, uAddress, cb, pu32Value);
4402 Log3((IOMMU_LOG_PFX ": Reading PCI config register %#x (cb=%u) -> %#x %Rrc\n", uAddress, cb, *pu32Value,
4403 VBOXSTRICTRC_VAL(rcStrict)));
4404 return rcStrict;
4405}
4406
4407
4408/**
4409 * @callback_method_impl{FNPCICONFIGWRITE}
4410 */
4411static DECLCALLBACK(VBOXSTRICTRC) iommuAmdR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress,
4412 unsigned cb, uint32_t u32Value)
4413{
4414 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4415
4416 /*
4417 * Discard writes to read-only registers that are specific to the IOMMU.
4418 * Other common PCI registers are handled by the generic code, see devpciR3IsConfigByteWritable().
4419 * See PCI spec. 6.1. "Configuration Space Organization".
4420 */
4421 switch (uAddress)
4422 {
4423 case IOMMU_PCI_OFF_CAP_HDR: /* All bits are read-only. */
4424 case IOMMU_PCI_OFF_RANGE_REG: /* We don't have any devices integrated with the IOMMU. */
4425 case IOMMU_PCI_OFF_MISCINFO_REG_0: /* We don't support MSI-X. */
4426 case IOMMU_PCI_OFF_MISCINFO_REG_1: /* We don't support guest-address translation. */
4427 {
4428 Log((IOMMU_LOG_PFX ": PCI config write (%#RX32) to read-only register %#x -> Ignored\n", u32Value, uAddress));
4429 return VINF_SUCCESS;
4430 }
4431 }
4432
4433 IOMMU_LOCK_RET(pDevIns, pThis, VERR_IGNORED);
4434
4435 VBOXSTRICTRC rcStrict;
4436 switch (uAddress)
4437 {
4438 case IOMMU_PCI_OFF_BASE_ADDR_REG_LO:
4439 {
4440 if (pThis->IommuBar.n.u1Enable)
4441 {
4442 rcStrict = VINF_SUCCESS;
4443 Log((IOMMU_LOG_PFX ": Writing Base Address (Lo) when it's already enabled -> Ignored\n"));
4444 break;
4445 }
4446
4447 pThis->IommuBar.au32[0] = u32Value & IOMMU_BAR_VALID_MASK;
4448 if (pThis->IommuBar.n.u1Enable)
4449 {
4450 Assert(pThis->hMmio == NIL_IOMMMIOHANDLE);
4451 Assert(!pThis->ExtFeat.n.u1PerfCounterSup); /* Base is 16K aligned when performance counters aren't supported. */
4452 RTGCPHYS const GCPhysMmioBase = RT_MAKE_U64(pThis->IommuBar.au32[0] & 0xffffc000, pThis->IommuBar.au32[1]);
4453 rcStrict = PDMDevHlpMmioMap(pDevIns, pThis->hMmio, GCPhysMmioBase);
4454 if (RT_FAILURE(rcStrict))
4455 Log((IOMMU_LOG_PFX ": Failed to map IOMMU MMIO region at %#RGp. rc=%Rrc\n", GCPhysMmioBase, rcStrict));
4456 }
4457 break;
4458 }
4459
4460 case IOMMU_PCI_OFF_BASE_ADDR_REG_HI:
4461 {
4462 if (!pThis->IommuBar.n.u1Enable)
4463 pThis->IommuBar.au32[1] = u32Value;
4464 else
4465 {
4466 rcStrict = VINF_SUCCESS;
4467 Log((IOMMU_LOG_PFX ": Writing Base Address (Hi) when it's already enabled -> Ignored\n"));
4468 }
4469 break;
4470 }
4471
4472 case IOMMU_PCI_OFF_MSI_CAP_HDR:
4473 {
4474 u32Value |= RT_BIT(23); /* 64-bit MSI addressess must always be enabled for IOMMU. */
4475 RT_FALL_THRU();
4476 }
4477
4478 default:
4479 {
4480 rcStrict = PDMDevHlpPCIConfigWrite(pDevIns, pPciDev, uAddress, cb, u32Value);
4481 break;
4482 }
4483 }
4484
4485 IOMMU_UNLOCK(pDevIns, pThis);
4486
4487 Log3((IOMMU_LOG_PFX ": PCI config write: %#x -> To %#x (%u) %Rrc\n", u32Value, uAddress, cb, VBOXSTRICTRC_VAL(rcStrict)));
4488 return rcStrict;
4489}
4490
4491
4492/**
4493 * @callback_method_impl{FNDBGFHANDLERDEV}
4494 */
4495static DECLCALLBACK(void) iommuAmdR3DbgInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
4496{
4497 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4498 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
4499 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
4500
4501 LogFlow((IOMMU_LOG_PFX ": iommuAmdR3DbgInfo: pThis=%p pszArgs=%s\n", pThis, pszArgs));
4502 bool const fVerbose = !strncmp(pszArgs, RT_STR_TUPLE("verbose")) ? true : false;
4503
4504 pHlp->pfnPrintf(pHlp, "AMD-IOMMU:\n");
4505 /* Device Table Base Addresses (all segments). */
4506 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aDevTabBaseAddrs); i++)
4507 {
4508 DEV_TAB_BAR_T const DevTabBar = pThis->aDevTabBaseAddrs[i];
4509 pHlp->pfnPrintf(pHlp, " Device Table BAR [%u] = %#RX64\n", i, DevTabBar.u64);
4510 if (fVerbose)
4511 {
4512 pHlp->pfnPrintf(pHlp, " Size = %#x (%u bytes)\n", DevTabBar.n.u9Size,
4513 IOMMU_GET_DEV_TAB_SIZE(DevTabBar.n.u9Size));
4514 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", DevTabBar.n.u40Base << X86_PAGE_4K_SHIFT);
4515 }
4516 }
4517 /* Command Buffer Base Address Register. */
4518 {
4519 CMD_BUF_BAR_T const CmdBufBar = pThis->CmdBufBaseAddr;
4520 uint8_t const uEncodedLen = CmdBufBar.n.u4Len;
4521 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
4522 uint32_t const cbBuffer = iommuAmdGetBufLength(uEncodedLen);
4523 pHlp->pfnPrintf(pHlp, " Command buffer BAR = %#RX64\n", CmdBufBar.u64);
4524 if (fVerbose)
4525 {
4526 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", CmdBufBar.n.u40Base << X86_PAGE_4K_SHIFT);
4527 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
4528 cEntries, cbBuffer);
4529 }
4530 }
4531 /* Event Log Base Address Register. */
4532 {
4533 EVT_LOG_BAR_T const EvtLogBar = pThis->EvtLogBaseAddr;
4534 uint8_t const uEncodedLen = EvtLogBar.n.u4Len;
4535 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
4536 uint32_t const cbBuffer = iommuAmdGetBufLength(uEncodedLen);
4537 pHlp->pfnPrintf(pHlp, " Event log BAR = %#RX64\n", EvtLogBar.u64);
4538 if (fVerbose)
4539 {
4540 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", EvtLogBar.n.u40Base << X86_PAGE_4K_SHIFT);
4541 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
4542 cEntries, cbBuffer);
4543 }
4544 }
4545 /* IOMMU Control Register. */
4546 {
4547 IOMMU_CTRL_T const Ctrl = pThis->Ctrl;
4548 pHlp->pfnPrintf(pHlp, " Control = %#RX64\n", Ctrl.u64);
4549 if (fVerbose)
4550 {
4551 pHlp->pfnPrintf(pHlp, " IOMMU enable = %RTbool\n", Ctrl.n.u1IommuEn);
4552 pHlp->pfnPrintf(pHlp, " HT Tunnel translation enable = %RTbool\n", Ctrl.n.u1HtTunEn);
4553 pHlp->pfnPrintf(pHlp, " Event log enable = %RTbool\n", Ctrl.n.u1EvtLogEn);
4554 pHlp->pfnPrintf(pHlp, " Event log interrupt enable = %RTbool\n", Ctrl.n.u1EvtIntrEn);
4555 pHlp->pfnPrintf(pHlp, " Completion wait interrupt enable = %RTbool\n", Ctrl.n.u1EvtIntrEn);
4556 pHlp->pfnPrintf(pHlp, " Invalidation timeout = %u\n", Ctrl.n.u3InvTimeOut);
4557 pHlp->pfnPrintf(pHlp, " Pass posted write = %RTbool\n", Ctrl.n.u1PassPW);
4558 pHlp->pfnPrintf(pHlp, " Respose Pass posted write = %RTbool\n", Ctrl.n.u1ResPassPW);
4559 pHlp->pfnPrintf(pHlp, " Coherent = %RTbool\n", Ctrl.n.u1Coherent);
4560 pHlp->pfnPrintf(pHlp, " Isochronous = %RTbool\n", Ctrl.n.u1Isoc);
4561 pHlp->pfnPrintf(pHlp, " Command buffer enable = %RTbool\n", Ctrl.n.u1CmdBufEn);
4562 pHlp->pfnPrintf(pHlp, " PPR log enable = %RTbool\n", Ctrl.n.u1PprLogEn);
4563 pHlp->pfnPrintf(pHlp, " PPR interrupt enable = %RTbool\n", Ctrl.n.u1PprIntrEn);
4564 pHlp->pfnPrintf(pHlp, " PPR enable = %RTbool\n", Ctrl.n.u1PprEn);
4565 pHlp->pfnPrintf(pHlp, " Guest translation eanble = %RTbool\n", Ctrl.n.u1GstTranslateEn);
4566 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC enable = %RTbool\n", Ctrl.n.u1GstVirtApicEn);
4567 pHlp->pfnPrintf(pHlp, " CRW = %#x\n", Ctrl.n.u4Crw);
4568 pHlp->pfnPrintf(pHlp, " SMI filter enable = %RTbool\n", Ctrl.n.u1SmiFilterEn);
4569 pHlp->pfnPrintf(pHlp, " Self-writeback disable = %RTbool\n", Ctrl.n.u1SelfWriteBackDis);
4570 pHlp->pfnPrintf(pHlp, " SMI filter log enable = %RTbool\n", Ctrl.n.u1SmiFilterLogEn);
4571 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC mode enable = %#x\n", Ctrl.n.u3GstVirtApicModeEn);
4572 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC GA log enable = %RTbool\n", Ctrl.n.u1GstLogEn);
4573 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC interrupt enable = %RTbool\n", Ctrl.n.u1GstIntrEn);
4574 pHlp->pfnPrintf(pHlp, " Dual PPR log enable = %#x\n", Ctrl.n.u2DualPprLogEn);
4575 pHlp->pfnPrintf(pHlp, " Dual event log enable = %#x\n", Ctrl.n.u2DualEvtLogEn);
4576 pHlp->pfnPrintf(pHlp, " Device table segmentation enable = %#x\n", Ctrl.n.u3DevTabSegEn);
4577 pHlp->pfnPrintf(pHlp, " Privilege abort enable = %#x\n", Ctrl.n.u2PrivAbortEn);
4578 pHlp->pfnPrintf(pHlp, " PPR auto response enable = %RTbool\n", Ctrl.n.u1PprAutoRespEn);
4579 pHlp->pfnPrintf(pHlp, " MARC enable = %RTbool\n", Ctrl.n.u1MarcEn);
4580 pHlp->pfnPrintf(pHlp, " Block StopMark enable = %RTbool\n", Ctrl.n.u1BlockStopMarkEn);
4581 pHlp->pfnPrintf(pHlp, " PPR auto response always-on enable = %RTbool\n", Ctrl.n.u1PprAutoRespAlwaysOnEn);
4582 pHlp->pfnPrintf(pHlp, " Domain IDPNE = %RTbool\n", Ctrl.n.u1DomainIDPNE);
4583 pHlp->pfnPrintf(pHlp, " Enhanced PPR handling = %RTbool\n", Ctrl.n.u1EnhancedPpr);
4584 pHlp->pfnPrintf(pHlp, " Host page table access/dirty bit update = %#x\n", Ctrl.n.u2HstAccDirtyBitUpdate);
4585 pHlp->pfnPrintf(pHlp, " Guest page table dirty bit disable = %RTbool\n", Ctrl.n.u1GstDirtyUpdateDis);
4586 pHlp->pfnPrintf(pHlp, " x2APIC enable = %RTbool\n", Ctrl.n.u1X2ApicEn);
4587 pHlp->pfnPrintf(pHlp, " x2APIC interrupt enable = %RTbool\n", Ctrl.n.u1X2ApicIntrGenEn);
4588 pHlp->pfnPrintf(pHlp, " Guest page table access bit update = %RTbool\n", Ctrl.n.u1GstAccessUpdateDis);
4589 }
4590 }
4591 /* Exclusion Base Address Register. */
4592 {
4593 IOMMU_EXCL_RANGE_BAR_T const ExclRangeBar = pThis->ExclRangeBaseAddr;
4594 pHlp->pfnPrintf(pHlp, " Exclusion BAR = %#RX64\n", ExclRangeBar.u64);
4595 if (fVerbose)
4596 {
4597 pHlp->pfnPrintf(pHlp, " Exclusion enable = %RTbool\n", ExclRangeBar.n.u1ExclEnable);
4598 pHlp->pfnPrintf(pHlp, " Allow all devices = %RTbool\n", ExclRangeBar.n.u1AllowAll);
4599 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n",
4600 ExclRangeBar.n.u40ExclRangeBase << X86_PAGE_4K_SHIFT);
4601 }
4602 }
4603 /* Exclusion Range Limit Register. */
4604 {
4605 IOMMU_EXCL_RANGE_LIMIT_T const ExclRangeLimit = pThis->ExclRangeLimit;
4606 pHlp->pfnPrintf(pHlp, " Exclusion Range Limit = %#RX64\n", ExclRangeLimit.u64);
4607 if (fVerbose)
4608 pHlp->pfnPrintf(pHlp, " Range limit = %#RX64\n", ExclRangeLimit.n.u52ExclLimit);
4609 }
4610 /* Extended Feature Register. */
4611 {
4612 IOMMU_EXT_FEAT_T ExtFeat = pThis->ExtFeat;
4613 pHlp->pfnPrintf(pHlp, " Extended Feature Register = %#RX64\n", ExtFeat.u64);
4614 pHlp->pfnPrintf(pHlp, " Prefetch support = %RTbool\n", ExtFeat.n.u1PrefetchSup);
4615 if (fVerbose)
4616 {
4617 pHlp->pfnPrintf(pHlp, " PPR support = %RTbool\n", ExtFeat.n.u1PprSup);
4618 pHlp->pfnPrintf(pHlp, " x2APIC support = %RTbool\n", ExtFeat.n.u1X2ApicSup);
4619 pHlp->pfnPrintf(pHlp, " NX and privilege level support = %RTbool\n", ExtFeat.n.u1NoExecuteSup);
4620 pHlp->pfnPrintf(pHlp, " Guest translation support = %RTbool\n", ExtFeat.n.u1GstTranslateSup);
4621 pHlp->pfnPrintf(pHlp, " Invalidate-All command support = %RTbool\n", ExtFeat.n.u1InvAllSup);
4622 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC support = %RTbool\n", ExtFeat.n.u1GstVirtApicSup);
4623 pHlp->pfnPrintf(pHlp, " Hardware error register support = %RTbool\n", ExtFeat.n.u1HwErrorSup);
4624 pHlp->pfnPrintf(pHlp, " Performance counters support = %RTbool\n", ExtFeat.n.u1PerfCounterSup);
4625 pHlp->pfnPrintf(pHlp, " Host address translation size = %#x\n", ExtFeat.n.u2HostAddrTranslateSize);
4626 pHlp->pfnPrintf(pHlp, " Guest address translation size = %#x\n", ExtFeat.n.u2GstAddrTranslateSize);
4627 pHlp->pfnPrintf(pHlp, " Guest CR3 root table level support = %#x\n", ExtFeat.n.u2GstCr3RootTblLevel);
4628 pHlp->pfnPrintf(pHlp, " SMI filter register support = %#x\n", ExtFeat.n.u2SmiFilterSup);
4629 pHlp->pfnPrintf(pHlp, " SMI filter register count = %#x\n", ExtFeat.n.u3SmiFilterCount);
4630 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC modes support = %#x\n", ExtFeat.n.u3GstVirtApicModeSup);
4631 pHlp->pfnPrintf(pHlp, " Dual PPR log support = %#x\n", ExtFeat.n.u2DualPprLogSup);
4632 pHlp->pfnPrintf(pHlp, " Dual event log support = %#x\n", ExtFeat.n.u2DualEvtLogSup);
4633 pHlp->pfnPrintf(pHlp, " Maximum PASID = %#x\n", ExtFeat.n.u5MaxPasidSup);
4634 pHlp->pfnPrintf(pHlp, " User/supervisor page protection support = %RTbool\n", ExtFeat.n.u1UserSupervisorSup);
4635 pHlp->pfnPrintf(pHlp, " Device table segments supported = %#x (%u)\n", ExtFeat.n.u2DevTabSegSup,
4636 g_acDevTabSegs[ExtFeat.n.u2DevTabSegSup]);
4637 pHlp->pfnPrintf(pHlp, " PPR log overflow early warning support = %RTbool\n", ExtFeat.n.u1PprLogOverflowWarn);
4638 pHlp->pfnPrintf(pHlp, " PPR auto response support = %RTbool\n", ExtFeat.n.u1PprAutoRespSup);
4639 pHlp->pfnPrintf(pHlp, " MARC support = %#x\n", ExtFeat.n.u2MarcSup);
4640 pHlp->pfnPrintf(pHlp, " Block StopMark message support = %RTbool\n", ExtFeat.n.u1BlockStopMarkSup);
4641 pHlp->pfnPrintf(pHlp, " Performance optimization support = %RTbool\n", ExtFeat.n.u1PerfOptSup);
4642 pHlp->pfnPrintf(pHlp, " MSI capability MMIO access support = %RTbool\n", ExtFeat.n.u1MsiCapMmioSup);
4643 pHlp->pfnPrintf(pHlp, " Guest I/O protection support = %RTbool\n", ExtFeat.n.u1GstIoSup);
4644 pHlp->pfnPrintf(pHlp, " Host access support = %RTbool\n", ExtFeat.n.u1HostAccessSup);
4645 pHlp->pfnPrintf(pHlp, " Enhanced PPR handling support = %RTbool\n", ExtFeat.n.u1EnhancedPprSup);
4646 pHlp->pfnPrintf(pHlp, " Attribute forward supported = %RTbool\n", ExtFeat.n.u1AttrForwardSup);
4647 pHlp->pfnPrintf(pHlp, " Host dirty support = %RTbool\n", ExtFeat.n.u1HostDirtySup);
4648 pHlp->pfnPrintf(pHlp, " Invalidate IOTLB type support = %RTbool\n", ExtFeat.n.u1InvIoTlbTypeSup);
4649 pHlp->pfnPrintf(pHlp, " Guest page table access bit hw disable = %RTbool\n", ExtFeat.n.u1GstUpdateDisSup);
4650 pHlp->pfnPrintf(pHlp, " Force physical dest for remapped intr. = %RTbool\n", ExtFeat.n.u1ForcePhysDstSup);
4651 }
4652 }
4653 /* PPR Log Base Address Register. */
4654 {
4655 PPR_LOG_BAR_T PprLogBar = pThis->PprLogBaseAddr;
4656 uint8_t const uEncodedLen = PprLogBar.n.u4Len;
4657 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
4658 uint32_t const cbBuffer = iommuAmdGetBufLength(uEncodedLen);
4659 pHlp->pfnPrintf(pHlp, " PPR Log BAR = %#RX64\n", PprLogBar.u64);
4660 if (fVerbose)
4661 {
4662 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", PprLogBar.n.u40Base << X86_PAGE_4K_SHIFT);
4663 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
4664 cEntries, cbBuffer);
4665 }
4666 }
4667 /* Hardware Event (Hi) Register. */
4668 {
4669 IOMMU_HW_EVT_HI_T HwEvtHi = pThis->HwEvtHi;
4670 pHlp->pfnPrintf(pHlp, " Hardware Event (Hi) = %#RX64\n", HwEvtHi.u64);
4671 if (fVerbose)
4672 {
4673 pHlp->pfnPrintf(pHlp, " First operand = %#RX64\n", HwEvtHi.n.u60FirstOperand);
4674 pHlp->pfnPrintf(pHlp, " Event code = %#RX8\n", HwEvtHi.n.u4EvtCode);
4675 }
4676 }
4677 /* Hardware Event (Lo) Register. */
4678 pHlp->pfnPrintf(pHlp, " Hardware Event (Lo) = %#RX64\n", pThis->HwEvtLo);
4679 /* Hardware Event Status. */
4680 {
4681 IOMMU_HW_EVT_STATUS_T HwEvtStatus = pThis->HwEvtStatus;
4682 pHlp->pfnPrintf(pHlp, " Hardware Event Status = %#RX64\n", HwEvtStatus.u64);
4683 if (fVerbose)
4684 {
4685 pHlp->pfnPrintf(pHlp, " Valid = %RTbool\n", HwEvtStatus.n.u1Valid);
4686 pHlp->pfnPrintf(pHlp, " Overflow = %RTbool\n", HwEvtStatus.n.u1Overflow);
4687 }
4688 }
4689 /* Guest Virtual-APIC Log Base Address Register. */
4690 {
4691 GALOG_BAR_T const GALogBar = pThis->GALogBaseAddr;
4692 uint8_t const uEncodedLen = GALogBar.n.u4Len;
4693 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
4694 uint32_t const cbBuffer = iommuAmdGetBufLength(uEncodedLen);
4695 pHlp->pfnPrintf(pHlp, " Guest Log BAR = %#RX64\n", GALogBar.u64);
4696 if (fVerbose)
4697 {
4698 pHlp->pfnPrintf(pHlp, " Base address = %RTbool\n", GALogBar.n.u40Base << X86_PAGE_4K_SHIFT);
4699 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
4700 cEntries, cbBuffer);
4701 }
4702 }
4703 /* Guest Virtual-APIC Log Tail Address Register. */
4704 {
4705 GALOG_TAIL_ADDR_T GALogTail = pThis->GALogTailAddr;
4706 pHlp->pfnPrintf(pHlp, " Guest Log Tail Address = %#RX64\n", GALogTail.u64);
4707 if (fVerbose)
4708 pHlp->pfnPrintf(pHlp, " Tail address = %#RX64\n", GALogTail.n.u40GALogTailAddr);
4709 }
4710 /* PPR Log B Base Address Register. */
4711 {
4712 PPR_LOG_B_BAR_T PprLogBBar = pThis->PprLogBBaseAddr;
4713 uint8_t const uEncodedLen = PprLogBBar.n.u4Len;
4714 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
4715 uint32_t const cbBuffer = iommuAmdGetBufLength(uEncodedLen);
4716 pHlp->pfnPrintf(pHlp, " PPR Log B BAR = %#RX64\n", PprLogBBar.u64);
4717 if (fVerbose)
4718 {
4719 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", PprLogBBar.n.u40Base << X86_PAGE_4K_SHIFT);
4720 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
4721 cEntries, cbBuffer);
4722 }
4723 }
4724 /* Event Log B Base Address Register. */
4725 {
4726 EVT_LOG_B_BAR_T EvtLogBBar = pThis->EvtLogBBaseAddr;
4727 uint8_t const uEncodedLen = EvtLogBBar.n.u4Len;
4728 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
4729 uint32_t const cbBuffer = iommuAmdGetBufLength(uEncodedLen);
4730 pHlp->pfnPrintf(pHlp, " Event Log B BAR = %#RX64\n", EvtLogBBar.u64);
4731 if (fVerbose)
4732 {
4733 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", EvtLogBBar.n.u40Base << X86_PAGE_4K_SHIFT);
4734 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
4735 cEntries, cbBuffer);
4736 }
4737 }
4738 /* Device-Specific Feature Extension Register. */
4739 {
4740 DEV_SPECIFIC_FEAT_T const DevSpecificFeat = pThis->DevSpecificFeat;
4741 pHlp->pfnPrintf(pHlp, " Device-specific Feature = %#RX64\n", DevSpecificFeat.u64);
4742 if (fVerbose)
4743 {
4744 pHlp->pfnPrintf(pHlp, " Feature = %#RX32\n", DevSpecificFeat.n.u24DevSpecFeat);
4745 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificFeat.n.u4RevMinor);
4746 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificFeat.n.u4RevMajor);
4747 }
4748 }
4749 /* Device-Specific Control Extension Register. */
4750 {
4751 DEV_SPECIFIC_CTRL_T const DevSpecificCtrl = pThis->DevSpecificCtrl;
4752 pHlp->pfnPrintf(pHlp, " Device-specific Control = %#RX64\n", DevSpecificCtrl.u64);
4753 if (fVerbose)
4754 {
4755 pHlp->pfnPrintf(pHlp, " Control = %#RX32\n", DevSpecificCtrl.n.u24DevSpecCtrl);
4756 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificCtrl.n.u4RevMinor);
4757 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificCtrl.n.u4RevMajor);
4758 }
4759 }
4760 /* Device-Specific Status Extension Register. */
4761 {
4762 DEV_SPECIFIC_STATUS_T const DevSpecificStatus = pThis->DevSpecificStatus;
4763 pHlp->pfnPrintf(pHlp, " Device-specific Control = %#RX64\n", DevSpecificStatus.u64);
4764 if (fVerbose)
4765 {
4766 pHlp->pfnPrintf(pHlp, " Status = %#RX32\n", DevSpecificStatus.n.u24DevSpecStatus);
4767 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificStatus.n.u4RevMinor);
4768 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificStatus.n.u4RevMajor);
4769 }
4770 }
4771 /* MSI Miscellaneous Information Register (Lo and Hi). */
4772 {
4773 MSI_MISC_INFO_T const MsiMiscInfo = pThis->MsiMiscInfo;
4774 pHlp->pfnPrintf(pHlp, " MSI Misc. Info. Register = %#RX64\n", MsiMiscInfo.u64);
4775 if (fVerbose)
4776 {
4777 pHlp->pfnPrintf(pHlp, " Event Log MSI number = %#x\n", MsiMiscInfo.n.u5MsiNumEvtLog);
4778 pHlp->pfnPrintf(pHlp, " Guest Virtual-Address Size = %#x\n", MsiMiscInfo.n.u3GstVirtAddrSize);
4779 pHlp->pfnPrintf(pHlp, " Physical Address Size = %#x\n", MsiMiscInfo.n.u7PhysAddrSize);
4780 pHlp->pfnPrintf(pHlp, " Virtual-Address Size = %#x\n", MsiMiscInfo.n.u7VirtAddrSize);
4781 pHlp->pfnPrintf(pHlp, " HT Transport ATS Range Reserved = %RTbool\n", MsiMiscInfo.n.u1HtAtsResv);
4782 pHlp->pfnPrintf(pHlp, " PPR MSI number = %#x\n", MsiMiscInfo.n.u5MsiNumPpr);
4783 pHlp->pfnPrintf(pHlp, " GA Log MSI number = %#x\n", MsiMiscInfo.n.u5MsiNumGa);
4784 }
4785 }
4786 /* MSI Capability Header. */
4787 {
4788 MSI_CAP_HDR_T MsiCapHdr;
4789 MsiCapHdr.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
4790 pHlp->pfnPrintf(pHlp, " MSI Capability Header = %#RX32\n", MsiCapHdr.u32);
4791 if (fVerbose)
4792 {
4793 pHlp->pfnPrintf(pHlp, " Capability ID = %#x\n", MsiCapHdr.n.u8MsiCapId);
4794 pHlp->pfnPrintf(pHlp, " Capability Ptr (PCI config offset) = %#x\n", MsiCapHdr.n.u8MsiCapPtr);
4795 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", MsiCapHdr.n.u1MsiEnable);
4796 pHlp->pfnPrintf(pHlp, " Multi-message capability = %#x\n", MsiCapHdr.n.u3MsiMultiMessCap);
4797 pHlp->pfnPrintf(pHlp, " Multi-message enable = %#x\n", MsiCapHdr.n.u3MsiMultiMessEn);
4798 }
4799 }
4800 /* MSI Address Register (Lo and Hi). */
4801 {
4802 uint32_t const uMsiAddrLo = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
4803 uint32_t const uMsiAddrHi = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI);
4804 MSI_ADDR_T MsiAddr;
4805 MsiAddr.u64 = RT_MAKE_U64(uMsiAddrLo, uMsiAddrHi);
4806 pHlp->pfnPrintf(pHlp, " MSI Address = %#RX64\n", MsiAddr.u64);
4807 if (fVerbose)
4808 pHlp->pfnPrintf(pHlp, " Address = %#RX64\n", MsiAddr.n.u62MsiAddr);
4809 }
4810 /* MSI Data. */
4811 {
4812 MSI_DATA_T MsiData;
4813 MsiData.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
4814 pHlp->pfnPrintf(pHlp, " MSI Data = %#RX32\n", MsiData.u32);
4815 if (fVerbose)
4816 pHlp->pfnPrintf(pHlp, " Data = %#x\n", MsiData.n.u16MsiData);
4817 }
4818 /* MSI Mapping Capability Header (HyperTransport, reporting all 0s currently). */
4819 {
4820 MSI_MAP_CAP_HDR_T MsiMapCapHdr;
4821 MsiMapCapHdr.u32 = 0;
4822 pHlp->pfnPrintf(pHlp, " MSI Mapping Capability Header = %#RX32\n", MsiMapCapHdr.u32);
4823 if (fVerbose)
4824 {
4825 pHlp->pfnPrintf(pHlp, " Capability ID = %#x\n", MsiMapCapHdr.n.u8MsiMapCapId);
4826 pHlp->pfnPrintf(pHlp, " Map enable = %RTbool\n", MsiMapCapHdr.n.u1MsiMapEn);
4827 pHlp->pfnPrintf(pHlp, " Map fixed = %RTbool\n", MsiMapCapHdr.n.u1MsiMapFixed);
4828 pHlp->pfnPrintf(pHlp, " Map capability type = %#x\n", MsiMapCapHdr.n.u5MapCapType);
4829 }
4830 }
4831 /* Performance Optimization Control Register. */
4832 {
4833 IOMMU_PERF_OPT_CTRL_T const PerfOptCtrl = pThis->PerfOptCtrl;
4834 pHlp->pfnPrintf(pHlp, " Performance Optimization Control = %#RX32\n", PerfOptCtrl.u32);
4835 if (fVerbose)
4836 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PerfOptCtrl.n.u1PerfOptEn);
4837 }
4838 /* XT (x2APIC) General Interrupt Control Register. */
4839 {
4840 IOMMU_XT_GEN_INTR_CTRL_T const XtGenIntrCtrl = pThis->XtGenIntrCtrl;
4841 pHlp->pfnPrintf(pHlp, " XT General Interrupt Control = %#RX64\n", XtGenIntrCtrl.u64);
4842 if (fVerbose)
4843 {
4844 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
4845 !XtGenIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
4846 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
4847 RT_MAKE_U64(XtGenIntrCtrl.n.u24X2ApicIntrDstLo, XtGenIntrCtrl.n.u7X2ApicIntrDstHi));
4848 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtGenIntrCtrl.n.u8X2ApicIntrVector);
4849 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %#x\n",
4850 !XtGenIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
4851 }
4852 }
4853 /* XT (x2APIC) PPR Interrupt Control Register. */
4854 {
4855 IOMMU_XT_PPR_INTR_CTRL_T const XtPprIntrCtrl = pThis->XtPprIntrCtrl;
4856 pHlp->pfnPrintf(pHlp, " XT PPR Interrupt Control = %#RX64\n", XtPprIntrCtrl.u64);
4857 if (fVerbose)
4858 {
4859 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
4860 !XtPprIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
4861 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
4862 RT_MAKE_U64(XtPprIntrCtrl.n.u24X2ApicIntrDstLo, XtPprIntrCtrl.n.u7X2ApicIntrDstHi));
4863 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtPprIntrCtrl.n.u8X2ApicIntrVector);
4864 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %#x\n",
4865 !XtPprIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
4866 }
4867 }
4868 /* XT (X2APIC) GA Log Interrupt Control Register. */
4869 {
4870 IOMMU_XT_GALOG_INTR_CTRL_T const XtGALogIntrCtrl = pThis->XtGALogIntrCtrl;
4871 pHlp->pfnPrintf(pHlp, " XT PPR Interrupt Control = %#RX64\n", XtGALogIntrCtrl.u64);
4872 if (fVerbose)
4873 {
4874 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
4875 !XtGALogIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
4876 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
4877 RT_MAKE_U64(XtGALogIntrCtrl.n.u24X2ApicIntrDstLo, XtGALogIntrCtrl.n.u7X2ApicIntrDstHi));
4878 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtGALogIntrCtrl.n.u8X2ApicIntrVector);
4879 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %#x\n",
4880 !XtGALogIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
4881 }
4882 }
4883 /* MARC Registers. */
4884 {
4885 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aMarcApers); i++)
4886 {
4887 pHlp->pfnPrintf(pHlp, " MARC Aperature %u:\n", i);
4888 MARC_APER_BAR_T const MarcAperBar = pThis->aMarcApers[i].Base;
4889 pHlp->pfnPrintf(pHlp, " Base = %#RX64\n", MarcAperBar.n.u40MarcBaseAddr << X86_PAGE_4K_SHIFT);
4890
4891 MARC_APER_RELOC_T const MarcAperReloc = pThis->aMarcApers[i].Reloc;
4892 pHlp->pfnPrintf(pHlp, " Reloc = %#RX64 (addr: %#RX64, read-only: %RTbool, enable: %RTbool)\n",
4893 MarcAperReloc.u64, MarcAperReloc.n.u40MarcRelocAddr << X86_PAGE_4K_SHIFT,
4894 MarcAperReloc.n.u1ReadOnly, MarcAperReloc.n.u1RelocEn);
4895
4896 MARC_APER_LEN_T const MarcAperLen = pThis->aMarcApers[i].Length;
4897 pHlp->pfnPrintf(pHlp, " Length = %u pages\n", MarcAperLen.n.u40MarcLength);
4898 }
4899 }
4900 /* Reserved Register. */
4901 pHlp->pfnPrintf(pHlp, " Reserved Register = %#RX64\n", pThis->RsvdReg);
4902 /* Command Buffer Head Pointer Register. */
4903 {
4904 CMD_BUF_HEAD_PTR_T const CmdBufHeadPtr = pThis->CmdBufHeadPtr;
4905 pHlp->pfnPrintf(pHlp, " Command Buffer Head Pointer = %#RX64\n", CmdBufHeadPtr.u64);
4906 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", CmdBufHeadPtr.n.off);
4907 }
4908 /* Command Buffer Tail Pointer Register. */
4909 {
4910 CMD_BUF_HEAD_PTR_T const CmdBufTailPtr = pThis->CmdBufTailPtr;
4911 pHlp->pfnPrintf(pHlp, " Command Buffer Tail Pointer = %#RX64\n", CmdBufTailPtr.u64);
4912 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", CmdBufTailPtr.n.off);
4913 }
4914 /* Event Log Head Pointer Register. */
4915 {
4916 EVT_LOG_HEAD_PTR_T const EvtLogHeadPtr = pThis->EvtLogHeadPtr;
4917 pHlp->pfnPrintf(pHlp, " Event Log Head Pointer = %#RX64\n", EvtLogHeadPtr.u64);
4918 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", EvtLogHeadPtr.n.off);
4919 }
4920 /* Event Log Tail Pointer Register. */
4921 {
4922 EVT_LOG_TAIL_PTR_T const EvtLogTailPtr = pThis->EvtLogTailPtr;
4923 pHlp->pfnPrintf(pHlp, " Event Log Head Pointer = %#RX64\n", EvtLogTailPtr.u64);
4924 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", EvtLogTailPtr.n.off);
4925 }
4926 /* Status Register. */
4927 {
4928 IOMMU_STATUS_T const Status = pThis->Status;
4929 pHlp->pfnPrintf(pHlp, " Status Register = %#RX64\n", Status.u64);
4930 if (fVerbose)
4931 {
4932 pHlp->pfnPrintf(pHlp, " Event log overflow = %RTbool\n", Status.n.u1EvtOverflow);
4933 pHlp->pfnPrintf(pHlp, " Event log interrupt = %RTbool\n", Status.n.u1EvtLogIntr);
4934 pHlp->pfnPrintf(pHlp, " Completion wait interrupt = %RTbool\n", Status.n.u1CompWaitIntr);
4935 pHlp->pfnPrintf(pHlp, " Event log running = %RTbool\n", Status.n.u1EvtLogRunning);
4936 pHlp->pfnPrintf(pHlp, " Command buffer running = %RTbool\n", Status.n.u1CmdBufRunning);
4937 pHlp->pfnPrintf(pHlp, " PPR overflow = %RTbool\n", Status.n.u1PprOverflow);
4938 pHlp->pfnPrintf(pHlp, " PPR interrupt = %RTbool\n", Status.n.u1PprIntr);
4939 pHlp->pfnPrintf(pHlp, " PPR log running = %RTbool\n", Status.n.u1PprLogRunning);
4940 pHlp->pfnPrintf(pHlp, " Guest log running = %RTbool\n", Status.n.u1GstLogRunning);
4941 pHlp->pfnPrintf(pHlp, " Guest log interrupt = %RTbool\n", Status.n.u1GstLogIntr);
4942 pHlp->pfnPrintf(pHlp, " PPR log B overflow = %RTbool\n", Status.n.u1PprOverflowB);
4943 pHlp->pfnPrintf(pHlp, " PPR log active = %RTbool\n", Status.n.u1PprLogActive);
4944 pHlp->pfnPrintf(pHlp, " Event log B overflow = %RTbool\n", Status.n.u1EvtOverflowB);
4945 pHlp->pfnPrintf(pHlp, " Event log active = %RTbool\n", Status.n.u1EvtLogActive);
4946 pHlp->pfnPrintf(pHlp, " PPR log B overflow early warning = %RTbool\n", Status.n.u1PprOverflowEarlyB);
4947 pHlp->pfnPrintf(pHlp, " PPR log overflow early warning = %RTbool\n", Status.n.u1PprOverflowEarly);
4948 }
4949 }
4950 /* PPR Log Head Pointer. */
4951 {
4952 PPR_LOG_HEAD_PTR_T const PprLogHeadPtr = pThis->PprLogHeadPtr;
4953 pHlp->pfnPrintf(pHlp, " PPR Log Head Pointer = %#RX64\n", PprLogHeadPtr.u64);
4954 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", PprLogHeadPtr.n.off);
4955 }
4956 /* PPR Log Tail Pointer. */
4957 {
4958 PPR_LOG_TAIL_PTR_T const PprLogTailPtr = pThis->PprLogTailPtr;
4959 pHlp->pfnPrintf(pHlp, " PPR Log Tail Pointer = %#RX64\n", PprLogTailPtr.u64);
4960 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", PprLogTailPtr.n.off);
4961 }
4962 /* Guest Virtual-APIC Log Head Pointer. */
4963 {
4964 GALOG_HEAD_PTR_T const GALogHeadPtr = pThis->GALogHeadPtr;
4965 pHlp->pfnPrintf(pHlp, " Guest Virtual-APIC Log Head Pointer = %#RX64\n", GALogHeadPtr.u64);
4966 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", GALogHeadPtr.n.u12GALogPtr);
4967 }
4968 /* Guest Virtual-APIC Log Tail Pointer. */
4969 {
4970 GALOG_HEAD_PTR_T const GALogTailPtr = pThis->GALogTailPtr;
4971 pHlp->pfnPrintf(pHlp, " Guest Virtual-APIC Log Tail Pointer = %#RX64\n", GALogTailPtr.u64);
4972 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", GALogTailPtr.n.u12GALogPtr);
4973 }
4974 /* PPR Log B Head Pointer. */
4975 {
4976 PPR_LOG_B_HEAD_PTR_T const PprLogBHeadPtr = pThis->PprLogBHeadPtr;
4977 pHlp->pfnPrintf(pHlp, " PPR Log B Head Pointer = %#RX64\n", PprLogBHeadPtr.u64);
4978 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", PprLogBHeadPtr.n.off);
4979 }
4980 /* PPR Log B Tail Pointer. */
4981 {
4982 PPR_LOG_B_TAIL_PTR_T const PprLogBTailPtr = pThis->PprLogBTailPtr;
4983 pHlp->pfnPrintf(pHlp, " PPR Log B Tail Pointer = %#RX64\n", PprLogBTailPtr.u64);
4984 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", PprLogBTailPtr.n.off);
4985 }
4986 /* Event Log B Head Pointer. */
4987 {
4988 EVT_LOG_B_HEAD_PTR_T const EvtLogBHeadPtr = pThis->EvtLogBHeadPtr;
4989 pHlp->pfnPrintf(pHlp, " Event Log B Head Pointer = %#RX64\n", EvtLogBHeadPtr.u64);
4990 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", EvtLogBHeadPtr.n.off);
4991 }
4992 /* Event Log B Tail Pointer. */
4993 {
4994 EVT_LOG_B_TAIL_PTR_T const EvtLogBTailPtr = pThis->EvtLogBTailPtr;
4995 pHlp->pfnPrintf(pHlp, " Event Log B Tail Pointer = %#RX64\n", EvtLogBTailPtr.u64);
4996 pHlp->pfnPrintf(pHlp, " Pointer = %#x\n", EvtLogBTailPtr.n.off);
4997 }
4998 /* PPR Log Auto Response Register. */
4999 {
5000 PPR_LOG_AUTO_RESP_T const PprLogAutoResp = pThis->PprLogAutoResp;
5001 pHlp->pfnPrintf(pHlp, " PPR Log Auto Response Register = %#RX64\n", PprLogAutoResp.u64);
5002 if (fVerbose)
5003 {
5004 pHlp->pfnPrintf(pHlp, " Code = %#x\n", PprLogAutoResp.n.u4AutoRespCode);
5005 pHlp->pfnPrintf(pHlp, " Mask Gen. = %RTbool\n", PprLogAutoResp.n.u1AutoRespMaskGen);
5006 }
5007 }
5008 /* PPR Log Overflow Early Warning Indicator Register. */
5009 {
5010 PPR_LOG_OVERFLOW_EARLY_T const PprLogOverflowEarly = pThis->PprLogOverflowEarly;
5011 pHlp->pfnPrintf(pHlp, " PPR Log overflow early warning = %#RX64\n", PprLogOverflowEarly.u64);
5012 if (fVerbose)
5013 {
5014 pHlp->pfnPrintf(pHlp, " Threshold = %#x\n", PprLogOverflowEarly.n.u15Threshold);
5015 pHlp->pfnPrintf(pHlp, " Interrupt enable = %RTbool\n", PprLogOverflowEarly.n.u1IntrEn);
5016 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PprLogOverflowEarly.n.u1Enable);
5017 }
5018 }
5019 /* PPR Log Overflow Early Warning Indicator Register. */
5020 {
5021 PPR_LOG_OVERFLOW_EARLY_T const PprLogBOverflowEarly = pThis->PprLogBOverflowEarly;
5022 pHlp->pfnPrintf(pHlp, " PPR Log B overflow early warning = %#RX64\n", PprLogBOverflowEarly.u64);
5023 if (fVerbose)
5024 {
5025 pHlp->pfnPrintf(pHlp, " Threshold = %#x\n", PprLogBOverflowEarly.n.u15Threshold);
5026 pHlp->pfnPrintf(pHlp, " Interrupt enable = %RTbool\n", PprLogBOverflowEarly.n.u1IntrEn);
5027 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PprLogBOverflowEarly.n.u1Enable);
5028 }
5029 }
5030}
5031
5032
5033/**
5034 * @callback_method_impl{FNSSMDEVSAVEEXEC}
5035 */
5036static DECLCALLBACK(int) iommuAmdR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
5037{
5038 /** @todo IOMMU: Save state. */
5039 RT_NOREF2(pDevIns, pSSM);
5040 return VERR_NOT_IMPLEMENTED;
5041}
5042
5043
5044/**
5045 * @callback_method_impl{FNSSMDEVLOADEXEC}
5046 */
5047static DECLCALLBACK(int) iommuAmdR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
5048{
5049 /** @todo IOMMU: Load state. */
5050 RT_NOREF4(pDevIns, pSSM, uVersion, uPass);
5051 return VERR_NOT_IMPLEMENTED;
5052}
5053
5054
5055/**
5056 * @interface_method_impl{PDMDEVREG,pfnReset}
5057 */
5058static DECLCALLBACK(void) iommuAmdR3Reset(PPDMDEVINS pDevIns)
5059{
5060 /*
5061 * Resets read-write portion of the IOMMU state.
5062 *
5063 * State data not initialized here is expected to be initialized during
5064 * device construction and remain read-only through the lifetime of the VM.
5065 */
5066 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
5067 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
5068 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
5069
5070 memset(&pThis->aDevTabBaseAddrs[0], 0, sizeof(pThis->aDevTabBaseAddrs));
5071
5072 pThis->CmdBufBaseAddr.u64 = 0;
5073 pThis->CmdBufBaseAddr.n.u4Len = 8;
5074
5075 pThis->EvtLogBaseAddr.u64 = 0;
5076 pThis->EvtLogBaseAddr.n.u4Len = 8;
5077
5078 pThis->Ctrl.u64 = 0;
5079
5080 pThis->ExclRangeBaseAddr.u64 = 0;
5081 pThis->ExclRangeLimit.u64 = 0;
5082
5083 pThis->PprLogBaseAddr.u64 = 0;
5084 pThis->PprLogBaseAddr.n.u4Len = 8;
5085
5086 pThis->HwEvtHi.u64 = 0;
5087 pThis->HwEvtLo = 0;
5088 pThis->HwEvtStatus.u64 = 0;
5089
5090 pThis->GALogBaseAddr.u64 = 0;
5091 pThis->GALogBaseAddr.n.u4Len = 8;
5092 pThis->GALogTailAddr.u64 = 0;
5093
5094 pThis->PprLogBBaseAddr.u64 = 0;
5095 pThis->PprLogBBaseAddr.n.u4Len = 8;
5096
5097 pThis->EvtLogBBaseAddr.u64 = 0;
5098 pThis->EvtLogBBaseAddr.n.u4Len = 8;
5099
5100 pThis->DevSpecificFeat.u64 = 0;
5101 pThis->DevSpecificCtrl.u64 = 0;
5102 pThis->DevSpecificStatus.u64 = 0;
5103
5104 pThis->MsiMiscInfo.u64 = 0;
5105 pThis->PerfOptCtrl.u32 = 0;
5106
5107 pThis->XtGenIntrCtrl.u64 = 0;
5108 pThis->XtPprIntrCtrl.u64 = 0;
5109 pThis->XtGALogIntrCtrl.u64 = 0;
5110
5111 memset(&pThis->aMarcApers[0], 0, sizeof(pThis->aMarcApers));
5112
5113 pThis->CmdBufHeadPtr.u64 = 0;
5114 pThis->CmdBufTailPtr.u64 = 0;
5115 pThis->EvtLogHeadPtr.u64 = 0;
5116 pThis->EvtLogTailPtr.u64 = 0;
5117
5118 pThis->Status.u64 = 0;
5119
5120 pThis->PprLogHeadPtr.u64 = 0;
5121 pThis->PprLogTailPtr.u64 = 0;
5122
5123 pThis->GALogHeadPtr.u64 = 0;
5124 pThis->GALogTailPtr.u64 = 0;
5125
5126 pThis->PprLogBHeadPtr.u64 = 0;
5127 pThis->PprLogBTailPtr.u64 = 0;
5128
5129 pThis->EvtLogBHeadPtr.u64 = 0;
5130 pThis->EvtLogBTailPtr.u64 = 0;
5131
5132 pThis->PprLogAutoResp.u64 = 0;
5133 pThis->PprLogOverflowEarly.u64 = 0;
5134 pThis->PprLogBOverflowEarly.u64 = 0;
5135
5136 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_LO, 0);
5137 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_HI, 0);
5138}
5139
5140
5141/**
5142 * @interface_method_impl{PDMDEVREG,pfnDestruct}
5143 */
5144static DECLCALLBACK(int) iommuAmdR3Destruct(PPDMDEVINS pDevIns)
5145{
5146 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
5147 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
5148 LogFlowFunc(("\n"));
5149
5150 /* Close the command thread semaphore. */
5151 if (pThis->hEvtCmdThread != NIL_SUPSEMEVENT)
5152 {
5153 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEvtCmdThread);
5154 pThis->hEvtCmdThread = NIL_SUPSEMEVENT;
5155 }
5156 return VINF_SUCCESS;
5157}
5158
5159
5160/**
5161 * @interface_method_impl{PDMDEVREG,pfnConstruct}
5162 */
5163static DECLCALLBACK(int) iommuAmdR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
5164{
5165 NOREF(iInstance);
5166
5167 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
5168 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
5169 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC);
5170 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
5171 int rc;
5172 LogFlowFunc(("\n"));
5173
5174 pThisCC->pDevInsR3 = pDevIns;
5175
5176 /*
5177 * Validate and read the configuration.
5178 */
5179 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns, "Device|Function", "");
5180
5181 uint8_t uPciDevice;
5182 rc = pHlp->pfnCFGMQueryU8Def(pCfg, "Device", &uPciDevice, 0);
5183 if (RT_FAILURE(rc))
5184 return PDMDEV_SET_ERROR(pDevIns, rc, N_("IOMMU: Failed to query \"Device\""));
5185
5186 uint8_t uPciFunction;
5187 rc = pHlp->pfnCFGMQueryU8Def(pCfg, "Function", &uPciFunction, 2);
5188 if (RT_FAILURE(rc))
5189 return PDMDEV_SET_ERROR(pDevIns, rc, N_("IOMMU: Failed to query \"Function\""));
5190
5191 /*
5192 * Register the IOMMU with PDM.
5193 */
5194 PDMIOMMUREGR3 IommuReg;
5195 RT_ZERO(IommuReg);
5196 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
5197 IommuReg.pfnMemRead = iommuAmdDeviceMemRead;
5198 IommuReg.pfnMemWrite = iommuAmdDeviceMemWrite;
5199 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
5200 rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
5201 if (RT_FAILURE(rc))
5202 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
5203 if (pThisCC->CTX_SUFF(pIommuHlp)->u32Version != PDM_IOMMUHLPR3_VERSION)
5204 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
5205 N_("IOMMU helper version mismatch; got %#x expected %#x"),
5206 pThisCC->CTX_SUFF(pIommuHlp)->u32Version, PDM_IOMMUHLPR3_VERSION);
5207 if (pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd != PDM_IOMMUHLPR3_VERSION)
5208 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
5209 N_("IOMMU helper end-version mismatch; got %#x expected %#x"),
5210 pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd, PDM_IOMMUHLPR3_VERSION);
5211
5212 /*
5213 * Initialize read-only PCI configuration space.
5214 */
5215 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
5216 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
5217
5218 /* Header. */
5219 PDMPciDevSetVendorId(pPciDev, IOMMU_PCI_VENDOR_ID); /* AMD */
5220 PDMPciDevSetDeviceId(pPciDev, IOMMU_PCI_DEVICE_ID); /* VirtualBox IOMMU device */
5221 PDMPciDevSetCommand(pPciDev, 0); /* Command */
5222 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST); /* Status - CapList supported */
5223 PDMPciDevSetRevisionId(pPciDev, IOMMU_PCI_REVISION_ID); /* VirtualBox specific device implementation revision */
5224 PDMPciDevSetClassBase(pPciDev, 0x08); /* System Base Peripheral */
5225 PDMPciDevSetClassSub(pPciDev, 0x06); /* IOMMU */
5226 PDMPciDevSetClassProg(pPciDev, 0x00); /* IOMMU Programming interface */
5227 PDMPciDevSetHeaderType(pPciDev, 0x00); /* Single function, type 0. */
5228 PDMPciDevSetSubSystemId(pPciDev, IOMMU_PCI_DEVICE_ID); /* AMD */
5229 PDMPciDevSetSubSystemVendorId(pPciDev, IOMMU_PCI_VENDOR_ID); /* VirtualBox IOMMU device */
5230 PDMPciDevSetCapabilityList(pPciDev, IOMMU_PCI_OFF_CAP_HDR); /* Offset into capability registers. */
5231 PDMPciDevSetInterruptPin(pPciDev, 0x01); /* INTA#. */
5232 PDMPciDevSetInterruptLine(pPciDev, 0x00); /* For software compatibility; no effect on hardware. */
5233
5234 /* Capability Header. */
5235 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_CAP_HDR,
5236 RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_ID, 0xf) /* RO - Secure Device capability block */
5237 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_PTR, IOMMU_PCI_OFF_MSI_CAP_HDR) /* RO - Offset to next capability */
5238 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_TYPE, 0x3) /* RO - IOMMU capability block */
5239 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_REV, 0x1) /* RO - IOMMU interface revision */
5240 | RT_BF_MAKE(IOMMU_BF_CAPHDR_IOTLB_SUP, 0x0) /* RO - Remote IOTLB support */
5241 | RT_BF_MAKE(IOMMU_BF_CAPHDR_HT_TUNNEL, 0x0) /* RO - HyperTransport Tunnel support */
5242 | RT_BF_MAKE(IOMMU_BF_CAPHDR_NP_CACHE, 0x0) /* RO - Cache NP page table entries */
5243 | RT_BF_MAKE(IOMMU_BF_CAPHDR_EFR_SUP, 0x1) /* RO - Extended Feature Register support */
5244 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_EXT, 0x1)); /* RO - Misc. Information Register support */
5245
5246 /* Base Address Low Register. */
5247 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_LO, 0x0); /* RW - Base address (Lo) and enable bit. */
5248
5249 /* Base Address High Register. */
5250 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_HI, 0x0); /* RW - Base address (Hi) */
5251
5252 /* IOMMU Range Register. */
5253 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_RANGE_REG, 0x0); /* RW - Range register (implemented as RO by us). */
5254
5255 /* Misc. Information Register 0. */
5256 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MISCINFO_REG_0,
5257 RT_BF_MAKE(IOMMU_BF_MISCINFO_0_MSI_NUM, 0x0) /* RO - MSI number */
5258 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_GVA_SIZE, 0x2) /* RO - Guest Virt. Addr size (2=48 bits) */
5259 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_PA_SIZE, 0x30) /* RO - Physical Addr size (48 bits) */
5260 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_VA_SIZE, 0x40) /* RO - Virt. Addr size (64 bits) */
5261 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_HT_ATS_RESV, 0x0) /* RW - HT ATS reserved */
5262 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_MSI_NUM_PPR, 0x0)); /* RW - PPR interrupt number */
5263
5264 /* Misc. Information Register 1. */
5265 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MISCINFO_REG_0, 0);
5266
5267 /* MSI Capability Header register. */
5268 PDMMSIREG MsiReg;
5269 RT_ZERO(MsiReg);
5270 MsiReg.cMsiVectors = 1;
5271 MsiReg.iMsiCapOffset = IOMMU_PCI_OFF_MSI_CAP_HDR;
5272 MsiReg.iMsiNextOffset = 0; /* IOMMU_PCI_OFF_MSI_MAP_CAP_HDR */
5273 MsiReg.fMsi64bit = 1; /* 64-bit addressing support is mandatory; See AMD spec. 2.8 "IOMMU Interrupt Support". */
5274 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
5275 AssertRCReturn(rc, rc);
5276
5277 /* MSI Address (Lo, Hi) and MSI data are read-write PCI config registers handled by our generic PCI config space code. */
5278#if 0
5279 /* MSI Address Lo. */
5280 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO, 0); /* RW - MSI message address (Lo). */
5281 /* MSI Address Hi. */
5282 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI, 0); /* RW - MSI message address (Hi). */
5283 /* MSI Data. */
5284 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA, 0); /* RW - MSI data. */
5285#endif
5286
5287#if 0
5288 /** @todo IOMMU: I don't know if we need to support this, enable later if
5289 * required. */
5290 /* MSI Mapping Capability Header register. */
5291 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_MAP_CAP_HDR,
5292 RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_ID, 0x8) /* RO - Capability ID */
5293 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_PTR, 0x0) /* RO - Offset to next capability (NULL) */
5294 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_EN, 0x1) /* RO - MSI mapping capability enable */
5295 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_FIXED, 0x1) /* RO - MSI mapping range is fixed */
5296 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_TYPE, 0x15)); /* RO - MSI mapping capability */
5297 /* When implementing don't forget to copy this to its MMIO shadow register (MsiMapCapHdr) in iommuAmdR3Init. */
5298#endif
5299
5300 /*
5301 * Register the PCI function with PDM.
5302 */
5303 rc = PDMDevHlpPCIRegisterEx(pDevIns, pPciDev, 0 /* fFlags */, uPciDevice, uPciFunction, "amd-iommu");
5304 AssertLogRelRCReturn(rc, rc);
5305
5306 /*
5307 * Intercept PCI config. space accesses.
5308 */
5309 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, iommuAmdR3PciConfigRead, iommuAmdR3PciConfigWrite);
5310 AssertLogRelRCReturn(rc, rc);
5311
5312 /*
5313 * Create the MMIO region.
5314 * Mapping of the region is done when software configures it via PCI config space.
5315 */
5316 rc = PDMDevHlpMmioCreate(pDevIns, IOMMU_MMIO_REGION_SIZE, pPciDev, 0 /* iPciRegion */, iommuAmdMmioWrite, iommuAmdMmioRead,
5317 NULL /* pvUser */, IOMMMIO_FLAGS_READ_DWORD_QWORD | IOMMMIO_FLAGS_WRITE_DWORD_QWORD_ZEROED,
5318 "AMD-IOMMU", &pThis->hMmio);
5319 AssertLogRelRCReturn(rc, rc);
5320
5321 /*
5322 * Register saved state.
5323 */
5324 rc = PDMDevHlpSSMRegisterEx(pDevIns, IOMMU_SAVED_STATE_VERSION, sizeof(IOMMU), NULL,
5325 NULL, NULL, NULL,
5326 NULL, iommuAmdR3SaveExec, NULL,
5327 NULL, iommuAmdR3LoadExec, NULL);
5328 AssertLogRelRCReturn(rc, rc);
5329
5330 /*
5331 * Register debugger info item.
5332 */
5333 rc = PDMDevHlpDBGFInfoRegister(pDevIns, "iommu", "Display IOMMU state.", iommuAmdR3DbgInfo);
5334 AssertLogRelRCReturn(rc, rc);
5335
5336 /*
5337 * Create the command thread and its event semaphore.
5338 */
5339 rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->pCmdThread, pThis, iommuAmdR3CmdThread, iommuAmdR3CmdThreadWakeUp,
5340 0 /* cbStack */, RTTHREADTYPE_IO, "AMD-IOMMU");
5341 AssertLogRelRCReturn(rc, rc);
5342
5343 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEvtCmdThread);
5344 AssertLogRelRCReturn(rc, rc);
5345
5346 /*
5347 * Initialize read-only registers.
5348 */
5349 /** @todo Don't remove the =0 assignment for now. It's just there so it's easier
5350 * for me to see existing features that we might want to implement. Do it
5351 * later. */
5352 pThis->ExtFeat.u64 = 0;
5353 pThis->ExtFeat.n.u1PrefetchSup = 0;
5354 pThis->ExtFeat.n.u1PprSup = 0;
5355 pThis->ExtFeat.n.u1X2ApicSup = 0;
5356 pThis->ExtFeat.n.u1NoExecuteSup = 0;
5357 pThis->ExtFeat.n.u1GstTranslateSup = 0;
5358 pThis->ExtFeat.n.u1InvAllSup = 0;
5359 pThis->ExtFeat.n.u1GstVirtApicSup = 0;
5360 pThis->ExtFeat.n.u1HwErrorSup = 1;
5361 pThis->ExtFeat.n.u1PerfCounterSup = 0;
5362 pThis->ExtFeat.n.u2HostAddrTranslateSize = IOMMU_MAX_HOST_PT_LEVEL;
5363 pThis->ExtFeat.n.u2GstAddrTranslateSize = 0; /* Requires GstTranslateSup. */
5364 pThis->ExtFeat.n.u2GstCr3RootTblLevel = 0; /* Requires GstTranslateSup. */
5365 pThis->ExtFeat.n.u2SmiFilterSup = 0;
5366 pThis->ExtFeat.n.u3SmiFilterCount = 0;
5367 pThis->ExtFeat.n.u3GstVirtApicModeSup = 0; /* Requires GstVirtApicSup */
5368 pThis->ExtFeat.n.u2DualPprLogSup = 0;
5369 pThis->ExtFeat.n.u2DualEvtLogSup = 0;
5370 pThis->ExtFeat.n.u5MaxPasidSup = 0; /* Requires GstTranslateSup. */
5371 pThis->ExtFeat.n.u1UserSupervisorSup = 0;
5372 AssertCompile(IOMMU_MAX_DEV_TAB_SEGMENTS <= 3);
5373 pThis->ExtFeat.n.u2DevTabSegSup = IOMMU_MAX_DEV_TAB_SEGMENTS;
5374 pThis->ExtFeat.n.u1PprLogOverflowWarn = 0;
5375 pThis->ExtFeat.n.u1PprAutoRespSup = 0;
5376 pThis->ExtFeat.n.u2MarcSup = 0;
5377 pThis->ExtFeat.n.u1BlockStopMarkSup = 0;
5378 pThis->ExtFeat.n.u1PerfOptSup = 0;
5379 pThis->ExtFeat.n.u1MsiCapMmioSup = 1;
5380 pThis->ExtFeat.n.u1GstIoSup = 0;
5381 pThis->ExtFeat.n.u1HostAccessSup = 0;
5382 pThis->ExtFeat.n.u1EnhancedPprSup = 0;
5383 pThis->ExtFeat.n.u1AttrForwardSup = 0;
5384 pThis->ExtFeat.n.u1HostDirtySup = 0;
5385 pThis->ExtFeat.n.u1InvIoTlbTypeSup = 0;
5386 pThis->ExtFeat.n.u1GstUpdateDisSup = 0;
5387 pThis->ExtFeat.n.u1ForcePhysDstSup = 0;
5388
5389 pThis->RsvdReg = 0;
5390
5391 /*
5392 * Initialize parts of the IOMMU state as it would during reset.
5393 * Must be called -after- initializing PCI config. space registers.
5394 */
5395 iommuAmdR3Reset(pDevIns);
5396
5397 return VINF_SUCCESS;
5398}
5399
5400# else /* !IN_RING3 */
5401
5402/**
5403 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
5404 */
5405static DECLCALLBACK(int) iommuAmdRZConstruct(PPDMDEVINS pDevIns)
5406{
5407 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
5408 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
5409 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC);
5410
5411 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
5412
5413 /* Set up the MMIO RZ handlers. */
5414 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, iommuAmdMmioWrite, iommuAmdMmioRead, NULL /* pvUser */);
5415 AssertRCReturn(rc, rc);
5416
5417 /* Set up the IOMMU RZ callbacks. */
5418 PDMIOMMUREGCC IommuReg;
5419 RT_ZERO(IommuReg);
5420 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
5421 IommuReg.idxIommu = pThis->idxIommu;
5422 IommuReg.pfnMemRead = iommuAmdDeviceMemRead;
5423 IommuReg.pfnMemWrite = iommuAmdDeviceMemWrite;
5424 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
5425 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp));
5426 AssertRCReturn(rc, rc);
5427
5428 return VINF_SUCCESS;
5429}
5430
5431# endif /* !IN_RING3 */
5432
5433/**
5434 * The device registration structure.
5435 */
5436const PDMDEVREG g_DeviceIommuAmd =
5437{
5438 /* .u32Version = */ PDM_DEVREG_VERSION,
5439 /* .uReserved0 = */ 0,
5440 /* .szName = */ "iommu-amd",
5441 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
5442 /* .fClass = */ PDM_DEVREG_CLASS_BUS_ISA, /* Instantiate after PDM_DEVREG_CLASS_BUS_PCI */
5443 /* .cMaxInstances = */ ~0U,
5444 /* .uSharedVersion = */ 42,
5445 /* .cbInstanceShared = */ sizeof(IOMMU),
5446 /* .cbInstanceCC = */ sizeof(IOMMUCC),
5447 /* .cbInstanceRC = */ sizeof(IOMMURC),
5448 /* .cMaxPciDevices = */ 1,
5449 /* .cMaxMsixVectors = */ 0,
5450 /* .pszDescription = */ "IOMMU (AMD)",
5451#if defined(IN_RING3)
5452 /* .pszRCMod = */ "VBoxDDRC.rc",
5453 /* .pszR0Mod = */ "VBoxDDR0.r0",
5454 /* .pfnConstruct = */ iommuAmdR3Construct,
5455 /* .pfnDestruct = */ iommuAmdR3Destruct,
5456 /* .pfnRelocate = */ NULL,
5457 /* .pfnMemSetup = */ NULL,
5458 /* .pfnPowerOn = */ NULL,
5459 /* .pfnReset = */ iommuAmdR3Reset,
5460 /* .pfnSuspend = */ NULL,
5461 /* .pfnResume = */ NULL,
5462 /* .pfnAttach = */ NULL,
5463 /* .pfnDetach = */ NULL,
5464 /* .pfnQueryInterface = */ NULL,
5465 /* .pfnInitComplete = */ NULL,
5466 /* .pfnPowerOff = */ NULL,
5467 /* .pfnSoftReset = */ NULL,
5468 /* .pfnReserved0 = */ NULL,
5469 /* .pfnReserved1 = */ NULL,
5470 /* .pfnReserved2 = */ NULL,
5471 /* .pfnReserved3 = */ NULL,
5472 /* .pfnReserved4 = */ NULL,
5473 /* .pfnReserved5 = */ NULL,
5474 /* .pfnReserved6 = */ NULL,
5475 /* .pfnReserved7 = */ NULL,
5476#elif defined(IN_RING0)
5477 /* .pfnEarlyConstruct = */ NULL,
5478 /* .pfnConstruct = */ iommuAmdRZConstruct,
5479 /* .pfnDestruct = */ NULL,
5480 /* .pfnFinalDestruct = */ NULL,
5481 /* .pfnRequest = */ NULL,
5482 /* .pfnReserved0 = */ NULL,
5483 /* .pfnReserved1 = */ NULL,
5484 /* .pfnReserved2 = */ NULL,
5485 /* .pfnReserved3 = */ NULL,
5486 /* .pfnReserved4 = */ NULL,
5487 /* .pfnReserved5 = */ NULL,
5488 /* .pfnReserved6 = */ NULL,
5489 /* .pfnReserved7 = */ NULL,
5490#elif defined(IN_RC)
5491 /* .pfnConstruct = */ iommuAmdRZConstruct,
5492 /* .pfnReserved0 = */ NULL,
5493 /* .pfnReserved1 = */ NULL,
5494 /* .pfnReserved2 = */ NULL,
5495 /* .pfnReserved3 = */ NULL,
5496 /* .pfnReserved4 = */ NULL,
5497 /* .pfnReserved5 = */ NULL,
5498 /* .pfnReserved6 = */ NULL,
5499 /* .pfnReserved7 = */ NULL,
5500#else
5501# error "Not in IN_RING3, IN_RING0 or IN_RC!"
5502#endif
5503 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
5504};
5505
5506#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
5507
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette