VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp@ 86147

Last change on this file since 86147 was 86145, checked in by vboxsync, 4 years ago

AMD IOMMU: bugref:9654 Fix unmapping MMIO region when the region address changes, resetting IOMMU base address register to 0.
Also fix setting the PCI Bus master bit on VM reset. The VM now seems to behave identically on VM reset wrt to the IOMMU.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 194.0 KB
Line 
1/* $Id: DevIommuAmd.cpp 86145 2020-09-17 10:31:47Z vboxsync $ */
2/** @file
3 * IOMMU - Input/Output Memory Management Unit - AMD implementation.
4 */
5
6/*
7 * Copyright (C) 2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_IOMMU
23#include <VBox/msi.h>
24#include <VBox/iommu-amd.h>
25#include <VBox/vmm/pdmdev.h>
26#include <VBox/AssertGuest.h>
27
28#include <iprt/x86.h>
29#include <iprt/alloc.h>
30#include <iprt/string.h>
31
32#include "VBoxDD.h"
33#include "DevIommuAmd.h"
34
35
36/*********************************************************************************************************************************
37* Defined Constants And Macros *
38*********************************************************************************************************************************/
39/** Release log prefix string. */
40#define IOMMU_LOG_PFX "IOMMU-AMD"
41/** The current saved state version. */
42#define IOMMU_SAVED_STATE_VERSION 1
43/** The IOTLB entry magic. */
44#define IOMMU_IOTLBE_MAGIC 0x10acce55
45
46
47/*********************************************************************************************************************************
48* Structures and Typedefs *
49*********************************************************************************************************************************/
50/**
51 * Acquires the IOMMU PDM lock.
52 * This will make a long jump to ring-3 to acquire the lock if necessary.
53 */
54#define IOMMU_LOCK(a_pDevIns) \
55 do { \
56 int rcLock = PDMDevHlpCritSectEnter((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo), VINF_SUCCESS); \
57 if (RT_LIKELY(rcLock == VINF_SUCCESS)) \
58 { /* likely */ } \
59 else \
60 return rcLock; \
61 } while (0)
62
63/**
64 * Acquires the IOMMU PDM lock (asserts on failure rather than returning an error).
65 * This will make a long jump to ring-3 to acquire the lock if necessary.
66 */
67#define IOMMU_LOCK_NORET(a_pDevIns) \
68 do { \
69 int rcLock = PDMDevHlpCritSectEnter((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo), VINF_SUCCESS); \
70 AssertRC(rcLock); \
71 } while (0)
72
73/**
74 * Releases the IOMMU PDM lock.
75 */
76#define IOMMU_UNLOCK(a_pDevIns) \
77 do { \
78 PDMDevHlpCritSectLeave((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo)); \
79 } while (0)
80
81/**
82 * Asserts that the critsect is owned by this thread.
83 */
84#define IOMMU_ASSERT_LOCKED(a_pDevIns) \
85 do { \
86 Assert(PDMDevHlpCritSectIsOwner(pDevIns, pDevIns->CTX_SUFF(pCritSectRo))); \
87 } while (0)
88
89/**
90 * Asserts that the critsect is not owned by this thread.
91 */
92#define IOMMU_ASSERT_NOT_LOCKED(a_pDevIns) \
93 do { \
94 Assert(!PDMDevHlpCritSectIsOwner(pDevIns, pDevIns->CTX_SUFF(pCritSectRo))); \
95 } while (0)
96
97/**
98 * IOMMU operations (transaction) types.
99 */
100typedef enum IOMMUOP
101{
102 /** Address translation request. */
103 IOMMUOP_TRANSLATE_REQ = 0,
104 /** Memory read request. */
105 IOMMUOP_MEM_READ,
106 /** Memory write request. */
107 IOMMUOP_MEM_WRITE,
108 /** Interrupt request. */
109 IOMMUOP_INTR_REQ,
110 /** Command. */
111 IOMMUOP_CMD
112} IOMMUOP;
113AssertCompileSize(IOMMUOP, 4);
114
115/**
116 * I/O page walk result.
117 */
118typedef struct
119{
120 /** The translated system physical address. */
121 RTGCPHYS GCPhysSpa;
122 /** The number of offset bits in the system physical address. */
123 uint8_t cShift;
124 /** The I/O permissions allowed by the translation (IOMMU_IO_PERM_XXX). */
125 uint8_t fIoPerm;
126 /** Padding. */
127 uint8_t abPadding[2];
128} IOWALKRESULT;
129/** Pointer to an I/O walk result struct. */
130typedef IOWALKRESULT *PIOWALKRESULT;
131/** Pointer to a const I/O walk result struct. */
132typedef IOWALKRESULT *PCIOWALKRESULT;
133
134/**
135 * IOMMU I/O TLB Entry.
136 * Keep this as small and aligned as possible.
137 */
138typedef struct
139{
140 /** The translated system physical address (SPA) of the page. */
141 RTGCPHYS GCPhysSpa;
142 /** The index of the 4K page within a large page. */
143 uint32_t idxSubPage;
144 /** The I/O access permissions (IOMMU_IO_PERM_XXX). */
145 uint8_t fIoPerm;
146 /** The number of offset bits in the translation indicating page size. */
147 uint8_t cShift;
148 /** Alignment padding. */
149 uint8_t afPadding[2];
150} IOTLBE_T;
151AssertCompileSize(IOTLBE_T, 16);
152/** Pointer to an IOMMU I/O TLB entry struct. */
153typedef IOTLBE_T *PIOTLBE_T;
154/** Pointer to a const IOMMU I/O TLB entry struct. */
155typedef IOTLBE_T const *PCIOTLBE_T;
156
157/**
158 * The shared IOMMU device state.
159 */
160typedef struct IOMMU
161{
162 /** IOMMU device index (0 is at the top of the PCI tree hierarchy). */
163 uint32_t idxIommu;
164 /** Alignment padding. */
165 uint32_t uPadding0;
166
167 /** Whether the command thread is sleeping. */
168 bool volatile fCmdThreadSleeping;
169 /** Alignment padding. */
170 uint8_t afPadding0[3];
171 /** Whether the command thread has been signaled for wake up. */
172 bool volatile fCmdThreadSignaled;
173 /** Alignment padding. */
174 uint8_t afPadding1[3];
175
176 /** The event semaphore the command thread waits on. */
177 SUPSEMEVENT hEvtCmdThread;
178 /** The MMIO handle. */
179 IOMMMIOHANDLE hMmio;
180
181 /** @name PCI: Base capability block registers.
182 * @{ */
183 IOMMU_BAR_T IommuBar; /**< IOMMU base address register. */
184 /** @} */
185
186 /** @name MMIO: Control and status registers.
187 * @{ */
188 DEV_TAB_BAR_T aDevTabBaseAddrs[8]; /**< Device table base address registers. */
189 CMD_BUF_BAR_T CmdBufBaseAddr; /**< Command buffer base address register. */
190 EVT_LOG_BAR_T EvtLogBaseAddr; /**< Event log base address register. */
191 IOMMU_CTRL_T Ctrl; /**< IOMMU control register. */
192 IOMMU_EXCL_RANGE_BAR_T ExclRangeBaseAddr; /**< IOMMU exclusion range base register. */
193 IOMMU_EXCL_RANGE_LIMIT_T ExclRangeLimit; /**< IOMMU exclusion range limit. */
194 IOMMU_EXT_FEAT_T ExtFeat; /**< IOMMU extended feature register. */
195 /** @} */
196
197 /** @name MMIO: PPR Log registers.
198 * @{ */
199 PPR_LOG_BAR_T PprLogBaseAddr; /**< PPR Log base address register. */
200 IOMMU_HW_EVT_HI_T HwEvtHi; /**< IOMMU hardware event register (Hi). */
201 IOMMU_HW_EVT_LO_T HwEvtLo; /**< IOMMU hardware event register (Lo). */
202 IOMMU_HW_EVT_STATUS_T HwEvtStatus; /**< IOMMU hardware event status. */
203 /** @} */
204
205 /** @todo IOMMU: SMI filter. */
206
207 /** @name MMIO: Guest Virtual-APIC Log registers.
208 * @{ */
209 GALOG_BAR_T GALogBaseAddr; /**< Guest Virtual-APIC Log base address register. */
210 GALOG_TAIL_ADDR_T GALogTailAddr; /**< Guest Virtual-APIC Log Tail address register. */
211 /** @} */
212
213 /** @name MMIO: Alternate PPR and Event Log registers.
214 * @{ */
215 PPR_LOG_B_BAR_T PprLogBBaseAddr; /**< PPR Log B base address register. */
216 EVT_LOG_B_BAR_T EvtLogBBaseAddr; /**< Event Log B base address register. */
217 /** @} */
218
219 /** @name MMIO: Device-specific feature registers.
220 * @{ */
221 DEV_SPECIFIC_FEAT_T DevSpecificFeat; /**< Device-specific feature extension register (DSFX). */
222 DEV_SPECIFIC_CTRL_T DevSpecificCtrl; /**< Device-specific control extension register (DSCX). */
223 DEV_SPECIFIC_STATUS_T DevSpecificStatus; /**< Device-specific status extension register (DSSX). */
224 /** @} */
225
226 /** @name MMIO: MSI Capability Block registers.
227 * @{ */
228 MSI_MISC_INFO_T MiscInfo; /**< MSI Misc. info registers / MSI Vector registers. */
229 /** @} */
230
231 /** @name MMIO: Performance Optimization Control registers.
232 * @{ */
233 IOMMU_PERF_OPT_CTRL_T PerfOptCtrl; /**< IOMMU Performance optimization control register. */
234 /** @} */
235
236 /** @name MMIO: x2APIC Control registers.
237 * @{ */
238 IOMMU_XT_GEN_INTR_CTRL_T XtGenIntrCtrl; /**< IOMMU X2APIC General interrupt control register. */
239 IOMMU_XT_PPR_INTR_CTRL_T XtPprIntrCtrl; /**< IOMMU X2APIC PPR interrupt control register. */
240 IOMMU_XT_GALOG_INTR_CTRL_T XtGALogIntrCtrl; /**< IOMMU X2APIC Guest Log interrupt control register. */
241 /** @} */
242
243 /** @name MMIO: MARC registers.
244 * @{ */
245 MARC_APER_T aMarcApers[4]; /**< MARC Aperture Registers. */
246 /** @} */
247
248 /** @name MMIO: Reserved register.
249 * @{ */
250 IOMMU_RSVD_REG_T RsvdReg; /**< IOMMU Reserved Register. */
251 /** @} */
252
253 /** @name MMIO: Command and Event Log pointer registers.
254 * @{ */
255 CMD_BUF_HEAD_PTR_T CmdBufHeadPtr; /**< Command buffer head pointer register. */
256 CMD_BUF_TAIL_PTR_T CmdBufTailPtr; /**< Command buffer tail pointer register. */
257 EVT_LOG_HEAD_PTR_T EvtLogHeadPtr; /**< Event log head pointer register. */
258 EVT_LOG_TAIL_PTR_T EvtLogTailPtr; /**< Event log tail pointer register. */
259 /** @} */
260
261 /** @name MMIO: Command and Event Status register.
262 * @{ */
263 IOMMU_STATUS_T Status; /**< IOMMU status register. */
264 /** @} */
265
266 /** @name MMIO: PPR Log Head and Tail pointer registers.
267 * @{ */
268 PPR_LOG_HEAD_PTR_T PprLogHeadPtr; /**< IOMMU PPR log head pointer register. */
269 PPR_LOG_TAIL_PTR_T PprLogTailPtr; /**< IOMMU PPR log tail pointer register. */
270 /** @} */
271
272 /** @name MMIO: Guest Virtual-APIC Log Head and Tail pointer registers.
273 * @{ */
274 GALOG_HEAD_PTR_T GALogHeadPtr; /**< Guest Virtual-APIC log head pointer register. */
275 GALOG_TAIL_PTR_T GALogTailPtr; /**< Guest Virtual-APIC log tail pointer register. */
276 /** @} */
277
278 /** @name MMIO: PPR Log B Head and Tail pointer registers.
279 * @{ */
280 PPR_LOG_B_HEAD_PTR_T PprLogBHeadPtr; /**< PPR log B head pointer register. */
281 PPR_LOG_B_TAIL_PTR_T PprLogBTailPtr; /**< PPR log B tail pointer register. */
282 /** @} */
283
284 /** @name MMIO: Event Log B Head and Tail pointer registers.
285 * @{ */
286 EVT_LOG_B_HEAD_PTR_T EvtLogBHeadPtr; /**< Event log B head pointer register. */
287 EVT_LOG_B_TAIL_PTR_T EvtLogBTailPtr; /**< Event log B tail pointer register. */
288 /** @} */
289
290 /** @name MMIO: PPR Log Overflow protection registers.
291 * @{ */
292 PPR_LOG_AUTO_RESP_T PprLogAutoResp; /**< PPR Log Auto Response register. */
293 PPR_LOG_OVERFLOW_EARLY_T PprLogOverflowEarly; /**< PPR Log Overflow Early Indicator register. */
294 PPR_LOG_B_OVERFLOW_EARLY_T PprLogBOverflowEarly; /**< PPR Log B Overflow Early Indicator register. */
295 /** @} */
296
297 /** @todo IOMMU: IOMMU Event counter registers. */
298
299#ifdef VBOX_WITH_STATISTICS
300 /** @name IOMMU: Stat counters.
301 * @{ */
302 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */
303 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */
304
305 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */
306 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */
307
308 STAMCOUNTER StatMsiRemapR3; /**< Number of MSI remap requests in R3. */
309 STAMCOUNTER StatMsiRemapRZ; /**< Number of MSI remap requests in RZ. */
310
311 STAMCOUNTER StatCmd; /**< Number of commands processed. */
312 STAMCOUNTER StatCmdCompWait; /**< Number of Completion Wait commands processed. */
313 STAMCOUNTER StatCmdInvDte; /**< Number of Invalidate DTE commands processed. */
314 STAMCOUNTER StatCmdInvIommuPages; /**< Number of Invalidate IOMMU pages commands processed. */
315 STAMCOUNTER StatCmdInvIotlbPages; /**< Number of Invalidate IOTLB pages commands processed. */
316 STAMCOUNTER StatCmdInvIntrTable; /**< Number of Invalidate Interrupt Table commands processed. */
317 STAMCOUNTER StatCmdPrefIommuPages; /**< Number of Prefetch IOMMU Pages commands processed. */
318 STAMCOUNTER StatCmdCompletePprReq; /**< Number of Complete PPR Requests commands processed. */
319 STAMCOUNTER StatCmdInvIommuAll; /**< Number of Invalidate IOMMU All commands processed. */
320 /** @} */
321#endif
322} IOMMU;
323/** Pointer to the IOMMU device state. */
324typedef struct IOMMU *PIOMMU;
325/** Pointer to the const IOMMU device state. */
326typedef const struct IOMMU *PCIOMMU;
327AssertCompileMemberAlignment(IOMMU, fCmdThreadSleeping, 4);
328AssertCompileMemberAlignment(IOMMU, fCmdThreadSignaled, 4);
329AssertCompileMemberAlignment(IOMMU, hEvtCmdThread, 8);
330AssertCompileMemberAlignment(IOMMU, hMmio, 8);
331AssertCompileMemberAlignment(IOMMU, IommuBar, 8);
332
333/**
334 * The ring-3 IOMMU device state.
335 */
336typedef struct IOMMUR3
337{
338 /** Device instance. */
339 PPDMDEVINSR3 pDevInsR3;
340 /** The IOMMU helpers. */
341 PCPDMIOMMUHLPR3 pIommuHlpR3;
342 /** The command thread handle. */
343 R3PTRTYPE(PPDMTHREAD) pCmdThread;
344} IOMMUR3;
345/** Pointer to the ring-3 IOMMU device state. */
346typedef IOMMUR3 *PIOMMUR3;
347
348/**
349 * The ring-0 IOMMU device state.
350 */
351typedef struct IOMMUR0
352{
353 /** Device instance. */
354 PPDMDEVINSR0 pDevInsR0;
355 /** The IOMMU helpers. */
356 PCPDMIOMMUHLPR0 pIommuHlpR0;
357} IOMMUR0;
358/** Pointer to the ring-0 IOMMU device state. */
359typedef IOMMUR0 *PIOMMUR0;
360
361/**
362 * The raw-mode IOMMU device state.
363 */
364typedef struct IOMMURC
365{
366 /** Device instance. */
367 PPDMDEVINSR0 pDevInsRC;
368 /** The IOMMU helpers. */
369 PCPDMIOMMUHLPRC pIommuHlpRC;
370} IOMMURC;
371/** Pointer to the raw-mode IOMMU device state. */
372typedef IOMMURC *PIOMMURC;
373
374/** The IOMMU device state for the current context. */
375typedef CTX_SUFF(IOMMU) IOMMUCC;
376/** Pointer to the IOMMU device state for the current context. */
377typedef CTX_SUFF(PIOMMU) PIOMMUCC;
378
379/**
380 * IOMMU register access routines.
381 */
382typedef struct
383{
384 const char *pszName;
385 VBOXSTRICTRC (*pfnRead )(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t *pu64Value);
386 VBOXSTRICTRC (*pfnWrite)(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value);
387 bool f64BitReg;
388} IOMMUREGACC;
389
390
391/*********************************************************************************************************************************
392* Global Variables *
393*********************************************************************************************************************************/
394/**
395 * An array of the number of device table segments supported.
396 * Indexed by u2DevTabSegSup.
397 */
398static uint8_t const g_acDevTabSegs[] = { 0, 2, 4, 8 };
399
400/**
401 * An array of the masks to select the device table segment index from a device ID.
402 */
403static uint16_t const g_auDevTabSegMasks[] = { 0x0, 0x8000, 0xc000, 0xe000 };
404
405/**
406 * An array of the shift values to select the device table segment index from a
407 * device ID.
408 */
409static uint8_t const g_auDevTabSegShifts[] = { 0, 15, 14, 13 };
410
411/**
412 * The maximum size (inclusive) of each device table segment (0 to 7).
413 * Indexed by the device table segment index.
414 */
415static uint16_t const g_auDevTabSegMaxSizes[] = { 0x1ff, 0xff, 0x7f, 0x7f, 0x3f, 0x3f, 0x3f, 0x3f };
416
417
418#ifndef VBOX_DEVICE_STRUCT_TESTCASE
419/**
420 * Gets the maximum number of buffer entries for the given buffer length.
421 *
422 * @returns Number of buffer entries.
423 * @param uEncodedLen The length (power-of-2 encoded).
424 */
425DECLINLINE(uint32_t) iommuAmdGetBufMaxEntries(uint8_t uEncodedLen)
426{
427 Assert(uEncodedLen > 7);
428 return 2 << (uEncodedLen - 1);
429}
430
431
432/**
433 * Gets the total length of the buffer given a base register's encoded length.
434 *
435 * @returns The length of the buffer in bytes.
436 * @param uEncodedLen The length (power-of-2 encoded).
437 */
438DECLINLINE(uint32_t) iommuAmdGetTotalBufLength(uint8_t uEncodedLen)
439{
440 Assert(uEncodedLen > 7);
441 return (2 << (uEncodedLen - 1)) << 4;
442}
443
444
445/**
446 * Gets the number of (unconsumed) entries in the event log.
447 *
448 * @returns The number of entries in the event log.
449 * @param pThis The IOMMU device state.
450 */
451static uint32_t iommuAmdGetEvtLogEntryCount(PIOMMU pThis)
452{
453 uint32_t const idxTail = pThis->EvtLogTailPtr.n.off >> IOMMU_EVT_GENERIC_SHIFT;
454 uint32_t const idxHead = pThis->EvtLogHeadPtr.n.off >> IOMMU_EVT_GENERIC_SHIFT;
455 if (idxTail >= idxHead)
456 return idxTail - idxHead;
457
458 uint32_t const cMaxEvts = iommuAmdGetBufMaxEntries(pThis->EvtLogBaseAddr.n.u4Len);
459 return cMaxEvts - idxHead + idxTail;
460}
461
462
463/**
464 * Gets the number of (unconsumed) commands in the command buffer.
465 *
466 * @returns The number of commands in the command buffer.
467 * @param pThis The IOMMU device state.
468 */
469static uint32_t iommuAmdGetCmdBufEntryCount(PIOMMU pThis)
470{
471 uint32_t const idxTail = pThis->CmdBufTailPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;
472 uint32_t const idxHead = pThis->CmdBufHeadPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;
473 if (idxTail >= idxHead)
474 return idxTail - idxHead;
475
476 uint32_t const cMaxCmds = iommuAmdGetBufMaxEntries(pThis->CmdBufBaseAddr.n.u4Len);
477 return cMaxCmds - idxHead + idxTail;
478}
479
480
481DECL_FORCE_INLINE(IOMMU_STATUS_T) iommuAmdGetStatus(PCIOMMU pThis)
482{
483 IOMMU_STATUS_T Status;
484 Status.u64 = ASMAtomicReadU64((volatile uint64_t *)&pThis->Status.u64);
485 return Status;
486}
487
488
489DECL_FORCE_INLINE(IOMMU_CTRL_T) iommuAmdGetCtrl(PCIOMMU pThis)
490{
491 IOMMU_CTRL_T Ctrl;
492 Ctrl.u64 = ASMAtomicReadU64((volatile uint64_t *)&pThis->Ctrl.u64);
493 return Ctrl;
494}
495
496
497/**
498 * Returns whether MSI is enabled for the IOMMU.
499 *
500 * @returns Whether MSI is enabled.
501 * @param pDevIns The IOMMU device instance.
502 *
503 * @note There should be a PCIDevXxx function for this.
504 */
505static bool iommuAmdIsMsiEnabled(PPDMDEVINS pDevIns)
506{
507 MSI_CAP_HDR_T MsiCapHdr;
508 MsiCapHdr.u32 = PDMPciDevGetDWord(pDevIns->apPciDevs[0], IOMMU_PCI_OFF_MSI_CAP_HDR);
509 return MsiCapHdr.n.u1MsiEnable;
510}
511
512
513/**
514 * Signals a PCI target abort.
515 *
516 * @param pDevIns The IOMMU device instance.
517 */
518static void iommuAmdSetPciTargetAbort(PPDMDEVINS pDevIns)
519{
520 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
521 uint16_t const u16Status = PDMPciDevGetStatus(pPciDev) | VBOX_PCI_STATUS_SIG_TARGET_ABORT;
522 PDMPciDevSetStatus(pPciDev, u16Status);
523}
524
525
526/**
527 * Wakes up the command thread if there are commands to be processed or if
528 * processing is requested to be stopped by software.
529 *
530 * @param pDevIns The IOMMU device instance.
531 */
532static void iommuAmdCmdThreadWakeUpIfNeeded(PPDMDEVINS pDevIns)
533{
534 IOMMU_ASSERT_LOCKED(pDevIns);
535 Log5Func(("\n"));
536
537 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
538 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
539 if (Status.n.u1CmdBufRunning)
540 {
541 Log5Func(("Signaling command thread\n"));
542 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
543 }
544}
545
546
547/**
548 * Writes to a read-only register.
549 */
550static VBOXSTRICTRC iommuAmdIgnore_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
551{
552 RT_NOREF(pDevIns, pThis, iReg, u64Value);
553 LogFunc(("Write to read-only register (%#x) with value %#RX64 ignored\n", iReg, u64Value));
554 return VINF_SUCCESS;
555}
556
557
558/**
559 * Writes the Device Table Base Address Register.
560 */
561static VBOXSTRICTRC iommuAmdDevTabBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
562{
563 RT_NOREF(pDevIns, iReg);
564
565 /* Mask out all unrecognized bits. */
566 u64Value &= IOMMU_DEV_TAB_BAR_VALID_MASK;
567
568 /* Update the register. */
569 pThis->aDevTabBaseAddrs[0].u64 = u64Value;
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * Writes the Command Buffer Base Address Register.
576 */
577static VBOXSTRICTRC iommuAmdCmdBufBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
578{
579 RT_NOREF(pDevIns, iReg);
580
581 /*
582 * While this is not explicitly specified like the event log base address register,
583 * the AMD spec. does specify "CmdBufRun must be 0b to modify the command buffer registers properly".
584 * Inconsistent specs :/
585 */
586 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
587 if (Status.n.u1CmdBufRunning)
588 {
589 LogFunc(("Setting CmdBufBar (%#RX64) when command buffer is running -> Ignored\n", u64Value));
590 return VINF_SUCCESS;
591 }
592
593 /* Mask out all unrecognized bits. */
594 CMD_BUF_BAR_T CmdBufBaseAddr;
595 CmdBufBaseAddr.u64 = u64Value & IOMMU_CMD_BUF_BAR_VALID_MASK;
596
597 /* Validate the length. */
598 if (CmdBufBaseAddr.n.u4Len >= 8)
599 {
600 /* Update the register. */
601 pThis->CmdBufBaseAddr.u64 = CmdBufBaseAddr.u64;
602
603 /*
604 * Writing the command buffer base address, clears the command buffer head and tail pointers.
605 * See AMD spec. 2.4 "Commands".
606 */
607 pThis->CmdBufHeadPtr.u64 = 0;
608 pThis->CmdBufTailPtr.u64 = 0;
609 }
610 else
611 LogFunc(("Command buffer length (%#x) invalid -> Ignored\n", CmdBufBaseAddr.n.u4Len));
612
613 return VINF_SUCCESS;
614}
615
616
617/**
618 * Writes the Event Log Base Address Register.
619 */
620static VBOXSTRICTRC iommuAmdEvtLogBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
621{
622 RT_NOREF(pDevIns, iReg);
623
624 /*
625 * IOMMU behavior is undefined when software writes this register when event logging is running.
626 * In our emulation, we ignore the write entirely.
627 * See AMD IOMMU spec. "Event Log Base Address Register".
628 */
629 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
630 if (Status.n.u1EvtLogRunning)
631 {
632 LogFunc(("Setting EvtLogBar (%#RX64) when event logging is running -> Ignored\n", u64Value));
633 return VINF_SUCCESS;
634 }
635
636 /* Mask out all unrecognized bits. */
637 u64Value &= IOMMU_EVT_LOG_BAR_VALID_MASK;
638 EVT_LOG_BAR_T EvtLogBaseAddr;
639 EvtLogBaseAddr.u64 = u64Value;
640
641 /* Validate the length. */
642 if (EvtLogBaseAddr.n.u4Len >= 8)
643 {
644 /* Update the register. */
645 pThis->EvtLogBaseAddr.u64 = EvtLogBaseAddr.u64;
646
647 /*
648 * Writing the event log base address, clears the event log head and tail pointers.
649 * See AMD spec. 2.5 "Event Logging".
650 */
651 pThis->EvtLogHeadPtr.u64 = 0;
652 pThis->EvtLogTailPtr.u64 = 0;
653 }
654 else
655 LogFunc(("Event log length (%#x) invalid -> Ignored\n", EvtLogBaseAddr.n.u4Len));
656
657 return VINF_SUCCESS;
658}
659
660
661/**
662 * Writes the Control Register.
663 */
664static VBOXSTRICTRC iommuAmdCtrl_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
665{
666 RT_NOREF(pDevIns, iReg);
667
668 /* Mask out all unrecognized bits. */
669 u64Value &= IOMMU_CTRL_VALID_MASK;
670
671 IOMMU_CTRL_T const OldCtrl = iommuAmdGetCtrl(pThis);
672 IOMMU_CTRL_T NewCtrl;
673 NewCtrl.u64 = u64Value;
674
675 /* Update the register. */
676 ASMAtomicWriteU64(&pThis->Ctrl.u64, NewCtrl.u64);
677
678 bool const fNewIommuEn = NewCtrl.n.u1IommuEn;
679 bool const fOldIommuEn = OldCtrl.n.u1IommuEn;
680
681 /* Enable or disable event logging when the bit transitions. */
682 bool const fOldEvtLogEn = OldCtrl.n.u1EvtLogEn;
683 bool const fNewEvtLogEn = NewCtrl.n.u1EvtLogEn;
684 if ( fOldEvtLogEn != fNewEvtLogEn
685 || fOldIommuEn != fNewIommuEn)
686 {
687 if ( fNewIommuEn
688 && fNewEvtLogEn)
689 {
690 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_EVT_LOG_OVERFLOW);
691 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_RUNNING);
692 }
693 else
694 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_EVT_LOG_RUNNING);
695 }
696
697 /* Enable or disable command buffer processing when the bit transitions. */
698 bool const fOldCmdBufEn = OldCtrl.n.u1CmdBufEn;
699 bool const fNewCmdBufEn = NewCtrl.n.u1CmdBufEn;
700 if ( fOldCmdBufEn != fNewCmdBufEn
701 || fOldIommuEn != fNewIommuEn)
702 {
703 if ( fNewCmdBufEn
704 && fNewIommuEn)
705 {
706 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_CMD_BUF_RUNNING);
707 LogFunc(("Command buffer enabled\n"));
708
709 /* Wake up the command thread to start processing commands. */
710 iommuAmdCmdThreadWakeUpIfNeeded(pDevIns);
711 }
712 else
713 {
714 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING);
715 LogFunc(("Command buffer disabled\n"));
716 }
717 }
718
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Writes to the Excluse Range Base Address Register.
725 */
726static VBOXSTRICTRC iommuAmdExclRangeBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
727{
728 RT_NOREF(pDevIns, iReg);
729 pThis->ExclRangeBaseAddr.u64 = u64Value & IOMMU_EXCL_RANGE_BAR_VALID_MASK;
730 return VINF_SUCCESS;
731}
732
733
734/**
735 * Writes to the Excluse Range Limit Register.
736 */
737static VBOXSTRICTRC iommuAmdExclRangeLimit_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
738{
739 RT_NOREF(pDevIns, iReg);
740 u64Value &= IOMMU_EXCL_RANGE_LIMIT_VALID_MASK;
741 u64Value |= UINT64_C(0xfff);
742 pThis->ExclRangeLimit.u64 = u64Value;
743 return VINF_SUCCESS;
744}
745
746
747/**
748 * Writes the Hardware Event Register (Hi).
749 */
750static VBOXSTRICTRC iommuAmdHwEvtHi_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
751{
752 /** @todo IOMMU: Why the heck is this marked read/write by the AMD IOMMU spec? */
753 RT_NOREF(pDevIns, iReg);
754 LogFlowFunc(("Writing %#RX64 to hardware event (Hi) register!\n", u64Value));
755 pThis->HwEvtHi.u64 = u64Value;
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Writes the Hardware Event Register (Lo).
762 */
763static VBOXSTRICTRC iommuAmdHwEvtLo_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
764{
765 /** @todo IOMMU: Why the heck is this marked read/write by the AMD IOMMU spec? */
766 RT_NOREF(pDevIns, iReg);
767 LogFlowFunc(("Writing %#RX64 to hardware event (Lo) register!\n", u64Value));
768 pThis->HwEvtLo = u64Value;
769 return VINF_SUCCESS;
770}
771
772
773/**
774 * Writes the Hardware Event Status Register.
775 */
776static VBOXSTRICTRC iommuAmdHwEvtStatus_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
777{
778 RT_NOREF(pDevIns, iReg);
779
780 /* Mask out all unrecognized bits. */
781 u64Value &= IOMMU_HW_EVT_STATUS_VALID_MASK;
782
783 /*
784 * The two bits (HEO and HEV) are RW1C (Read/Write 1-to-Clear; writing 0 has no effect).
785 * If the current status bits or the bits being written are both 0, we've nothing to do.
786 * The Overflow bit (bit 1) is only valid when the Valid bit (bit 0) is 1.
787 */
788 uint64_t HwStatus = pThis->HwEvtStatus.u64;
789 if (!(HwStatus & RT_BIT(0)))
790 return VINF_SUCCESS;
791 if (u64Value & HwStatus & RT_BIT_64(0))
792 HwStatus &= ~RT_BIT_64(0);
793 if (u64Value & HwStatus & RT_BIT_64(1))
794 HwStatus &= ~RT_BIT_64(1);
795
796 /* Update the register. */
797 pThis->HwEvtStatus.u64 = HwStatus;
798 return VINF_SUCCESS;
799}
800
801
802/**
803 * Writes the Device Table Segment Base Address Register.
804 */
805static VBOXSTRICTRC iommuAmdDevTabSegBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
806{
807 RT_NOREF(pDevIns);
808
809 /* Figure out which segment is being written. */
810 uint8_t const offSegment = (iReg - IOMMU_MMIO_OFF_DEV_TAB_SEG_FIRST) >> 3;
811 uint8_t const idxSegment = offSegment + 1;
812 Assert(idxSegment < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
813
814 /* Mask out all unrecognized bits. */
815 u64Value &= IOMMU_DEV_TAB_SEG_BAR_VALID_MASK;
816 DEV_TAB_BAR_T DevTabSegBar;
817 DevTabSegBar.u64 = u64Value;
818
819 /* Validate the size. */
820 uint16_t const uSegSize = DevTabSegBar.n.u9Size;
821 uint16_t const uMaxSegSize = g_auDevTabSegMaxSizes[idxSegment];
822 if (uSegSize <= uMaxSegSize)
823 {
824 /* Update the register. */
825 pThis->aDevTabBaseAddrs[idxSegment].u64 = u64Value;
826 }
827 else
828 LogFunc(("Device table segment (%u) size invalid (%#RX32) -> Ignored\n", idxSegment, uSegSize));
829
830 return VINF_SUCCESS;
831}
832
833
834/**
835 * Writes the MSI Capability Header Register.
836 */
837static VBOXSTRICTRC iommuAmdMsiCapHdr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
838{
839 RT_NOREF(pThis, iReg);
840 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
841 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
842 MSI_CAP_HDR_T MsiCapHdr;
843 MsiCapHdr.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
844 MsiCapHdr.n.u1MsiEnable = RT_BOOL(u64Value & IOMMU_MSI_CAP_HDR_MSI_EN_MASK);
845 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR, MsiCapHdr.u32);
846 return VINF_SUCCESS;
847}
848
849
850/**
851 * Writes the MSI Address (Lo) Register (32-bit).
852 */
853static VBOXSTRICTRC iommuAmdMsiAddrLo_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
854{
855 RT_NOREF(pThis, iReg);
856 Assert(!RT_HI_U32(u64Value));
857 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
858 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
859 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO, u64Value & VBOX_MSI_ADDR_VALID_MASK);
860 return VINF_SUCCESS;
861}
862
863
864/**
865 * Writes the MSI Address (Hi) Register (32-bit).
866 */
867static VBOXSTRICTRC iommuAmdMsiAddrHi_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
868{
869 RT_NOREF(pThis, iReg);
870 Assert(!RT_HI_U32(u64Value));
871 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
872 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
873 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI, u64Value);
874 return VINF_SUCCESS;
875}
876
877
878/**
879 * Writes the MSI Data Register (32-bit).
880 */
881static VBOXSTRICTRC iommuAmdMsiData_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
882{
883 RT_NOREF(pThis, iReg);
884 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
885 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
886 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA, u64Value & VBOX_MSI_DATA_VALID_MASK);
887 return VINF_SUCCESS;
888}
889
890
891/**
892 * Writes the Command Buffer Head Pointer Register (32-bit).
893 */
894static VBOXSTRICTRC iommuAmdCmdBufHeadPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
895{
896 RT_NOREF(pDevIns, iReg);
897
898 /*
899 * IOMMU behavior is undefined when software writes this register when the command buffer is running.
900 * In our emulation, we ignore the write entirely.
901 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
902 */
903 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
904 if (Status.n.u1CmdBufRunning)
905 {
906 LogFunc(("Setting CmdBufHeadPtr (%#RX64) when command buffer is running -> Ignored\n", u64Value));
907 return VINF_SUCCESS;
908 }
909
910 /*
911 * IOMMU behavior is undefined when software writes a value outside the buffer length.
912 * In our emulation, we ignore the write entirely.
913 */
914 uint32_t const offBuf = u64Value & IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK;
915 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->CmdBufBaseAddr.n.u4Len);
916 Assert(cbBuf <= _512K);
917 if (offBuf >= cbBuf)
918 {
919 LogFunc(("Setting CmdBufHeadPtr (%#RX32) to a value that exceeds buffer length (%#RX23) -> Ignored\n", offBuf, cbBuf));
920 return VINF_SUCCESS;
921 }
922
923 /* Update the register. */
924 pThis->CmdBufHeadPtr.au32[0] = offBuf;
925
926 iommuAmdCmdThreadWakeUpIfNeeded(pDevIns);
927
928 Log5Func(("Set CmdBufHeadPtr to %#RX32\n", offBuf));
929 return VINF_SUCCESS;
930}
931
932
933/**
934 * Writes the Command Buffer Tail Pointer Register (32-bit).
935 */
936static VBOXSTRICTRC iommuAmdCmdBufTailPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
937{
938 RT_NOREF(pDevIns, iReg);
939
940 /*
941 * IOMMU behavior is undefined when software writes a value outside the buffer length.
942 * In our emulation, we ignore the write entirely.
943 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
944 */
945 uint32_t const offBuf = u64Value & IOMMU_CMD_BUF_TAIL_PTR_VALID_MASK;
946 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->CmdBufBaseAddr.n.u4Len);
947 Assert(cbBuf <= _512K);
948 if (offBuf >= cbBuf)
949 {
950 LogFunc(("Setting CmdBufTailPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n", offBuf, cbBuf));
951 return VINF_SUCCESS;
952 }
953
954 /*
955 * IOMMU behavior is undefined if software advances the tail pointer equal to or beyond the
956 * head pointer after adding one or more commands to the buffer.
957 *
958 * However, we cannot enforce this strictly because it's legal for software to shrink the
959 * command queue (by reducing the offset) as well as wrap around the pointer (when head isn't
960 * at 0). Software might even make the queue empty by making head and tail equal which is
961 * allowed. I don't think we can or should try too hard to prevent software shooting itself
962 * in the foot here. As long as we make sure the offset value is within the circular buffer
963 * bounds (which we do by masking bits above) it should be sufficient.
964 */
965 pThis->CmdBufTailPtr.au32[0] = offBuf;
966
967 iommuAmdCmdThreadWakeUpIfNeeded(pDevIns);
968
969 Log5Func(("Set CmdBufTailPtr to %#RX32\n", offBuf));
970 return VINF_SUCCESS;
971}
972
973
974/**
975 * Writes the Event Log Head Pointer Register (32-bit).
976 */
977static VBOXSTRICTRC iommuAmdEvtLogHeadPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
978{
979 RT_NOREF(pDevIns, iReg);
980
981 /*
982 * IOMMU behavior is undefined when software writes a value outside the buffer length.
983 * In our emulation, we ignore the write entirely.
984 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
985 */
986 uint32_t const offBuf = u64Value & IOMMU_EVT_LOG_HEAD_PTR_VALID_MASK;
987 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->EvtLogBaseAddr.n.u4Len);
988 Assert(cbBuf <= _512K);
989 if (offBuf >= cbBuf)
990 {
991 LogFunc(("Setting EvtLogHeadPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n", offBuf, cbBuf));
992 return VINF_SUCCESS;
993 }
994
995 /* Update the register. */
996 pThis->EvtLogHeadPtr.au32[0] = offBuf;
997
998 LogFlowFunc(("Set EvtLogHeadPtr to %#RX32\n", offBuf));
999 return VINF_SUCCESS;
1000}
1001
1002
1003/**
1004 * Writes the Event Log Tail Pointer Register (32-bit).
1005 */
1006static VBOXSTRICTRC iommuAmdEvtLogTailPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
1007{
1008 RT_NOREF(pDevIns, iReg);
1009 NOREF(pThis);
1010
1011 /*
1012 * IOMMU behavior is undefined when software writes this register when the event log is running.
1013 * In our emulation, we ignore the write entirely.
1014 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
1015 */
1016 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
1017 if (Status.n.u1EvtLogRunning)
1018 {
1019 LogFunc(("Setting EvtLogTailPtr (%#RX64) when event log is running -> Ignored\n", u64Value));
1020 return VINF_SUCCESS;
1021 }
1022
1023 /*
1024 * IOMMU behavior is undefined when software writes a value outside the buffer length.
1025 * In our emulation, we ignore the write entirely.
1026 */
1027 uint32_t const offBuf = u64Value & IOMMU_EVT_LOG_TAIL_PTR_VALID_MASK;
1028 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->EvtLogBaseAddr.n.u4Len);
1029 Assert(cbBuf <= _512K);
1030 if (offBuf >= cbBuf)
1031 {
1032 LogFunc(("Setting EvtLogTailPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n", offBuf, cbBuf));
1033 return VINF_SUCCESS;
1034 }
1035
1036 /* Update the register. */
1037 pThis->EvtLogTailPtr.au32[0] = offBuf;
1038
1039 LogFlowFunc(("Set EvtLogTailPtr to %#RX32\n", offBuf));
1040 return VINF_SUCCESS;
1041}
1042
1043
1044/**
1045 * Writes the Status Register (64-bit).
1046 */
1047static VBOXSTRICTRC iommuAmdStatus_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
1048{
1049 RT_NOREF(pDevIns, iReg);
1050
1051 /* Mask out all unrecognized bits. */
1052 u64Value &= IOMMU_STATUS_VALID_MASK;
1053
1054 /*
1055 * Compute RW1C (read-only, write-1-to-clear) bits and preserve the rest (which are read-only).
1056 * Writing 0 to an RW1C bit has no effect. Writing 1 to an RW1C bit, clears the bit if it's already 1.
1057 */
1058 IOMMU_STATUS_T const OldStatus = iommuAmdGetStatus(pThis);
1059 uint64_t const fOldRw1cBits = (OldStatus.u64 & IOMMU_STATUS_RW1C_MASK);
1060 uint64_t const fOldRoBits = (OldStatus.u64 & ~IOMMU_STATUS_RW1C_MASK);
1061 uint64_t const fNewRw1cBits = (u64Value & IOMMU_STATUS_RW1C_MASK);
1062
1063 uint64_t const uNewStatus = (fOldRw1cBits & ~fNewRw1cBits) | fOldRoBits;
1064
1065 /* Update the register. */
1066 ASMAtomicWriteU64(&pThis->Status.u64, uNewStatus);
1067 return VINF_SUCCESS;
1068}
1069
1070
1071#if 0
1072/**
1073 * Table 0: Registers-access table.
1074 */
1075static const IOMMUREGACC g_aTable0Regs[] =
1076{
1077
1078};
1079
1080/**
1081 * Table 1: Registers-access table.
1082 */
1083static const IOMMUREGACC g_aTable1Regs[] =
1084{
1085};
1086#endif
1087
1088
1089/**
1090 * Writes an IOMMU register (32-bit and 64-bit).
1091 *
1092 * @returns Strict VBox status code.
1093 * @param pDevIns The IOMMU device instance.
1094 * @param off MMIO byte offset to the register.
1095 * @param cb The size of the write access.
1096 * @param uValue The value being written.
1097 *
1098 * @thread EMT.
1099 */
1100static VBOXSTRICTRC iommuAmdWriteRegister(PPDMDEVINS pDevIns, uint32_t off, uint8_t cb, uint64_t uValue)
1101{
1102 Assert(off < IOMMU_MMIO_REGION_SIZE);
1103 Assert(cb == 4 || cb == 8);
1104 Assert(!(off & (cb - 1)));
1105
1106 Log5Func(("off=%#x cb=%u uValue=%#RX64\n", off, cb, uValue));
1107
1108 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1109 switch (off)
1110 {
1111 case IOMMU_MMIO_OFF_DEV_TAB_BAR: return iommuAmdDevTabBar_w(pDevIns, pThis, off, uValue);
1112 case IOMMU_MMIO_OFF_CMD_BUF_BAR: return iommuAmdCmdBufBar_w(pDevIns, pThis, off, uValue);
1113 case IOMMU_MMIO_OFF_EVT_LOG_BAR: return iommuAmdEvtLogBar_w(pDevIns, pThis, off, uValue);
1114 case IOMMU_MMIO_OFF_CTRL: return iommuAmdCtrl_w(pDevIns, pThis, off, uValue);
1115 case IOMMU_MMIO_OFF_EXCL_BAR: return iommuAmdExclRangeBar_w(pDevIns, pThis, off, uValue);
1116 case IOMMU_MMIO_OFF_EXCL_RANGE_LIMIT: return iommuAmdExclRangeLimit_w(pDevIns, pThis, off, uValue);
1117 case IOMMU_MMIO_OFF_EXT_FEAT: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1118
1119 case IOMMU_MMIO_OFF_PPR_LOG_BAR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1120 case IOMMU_MMIO_OFF_HW_EVT_HI: return iommuAmdHwEvtHi_w(pDevIns, pThis, off, uValue);
1121 case IOMMU_MMIO_OFF_HW_EVT_LO: return iommuAmdHwEvtLo_w(pDevIns, pThis, off, uValue);
1122 case IOMMU_MMIO_OFF_HW_EVT_STATUS: return iommuAmdHwEvtStatus_w(pDevIns, pThis, off, uValue);
1123
1124 case IOMMU_MMIO_OFF_GALOG_BAR:
1125 case IOMMU_MMIO_OFF_GALOG_TAIL_ADDR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1126
1127 case IOMMU_MMIO_OFF_PPR_LOG_B_BAR:
1128 case IOMMU_MMIO_OFF_PPR_EVT_B_BAR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1129
1130 case IOMMU_MMIO_OFF_DEV_TAB_SEG_1:
1131 case IOMMU_MMIO_OFF_DEV_TAB_SEG_2:
1132 case IOMMU_MMIO_OFF_DEV_TAB_SEG_3:
1133 case IOMMU_MMIO_OFF_DEV_TAB_SEG_4:
1134 case IOMMU_MMIO_OFF_DEV_TAB_SEG_5:
1135 case IOMMU_MMIO_OFF_DEV_TAB_SEG_6:
1136 case IOMMU_MMIO_OFF_DEV_TAB_SEG_7: return iommuAmdDevTabSegBar_w(pDevIns, pThis, off, uValue);
1137
1138 case IOMMU_MMIO_OFF_DEV_SPECIFIC_FEAT:
1139 case IOMMU_MMIO_OFF_DEV_SPECIFIC_CTRL:
1140 case IOMMU_MMIO_OFF_DEV_SPECIFIC_STATUS: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1141
1142 case IOMMU_MMIO_OFF_MSI_VECTOR_0:
1143 case IOMMU_MMIO_OFF_MSI_VECTOR_1: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1144 case IOMMU_MMIO_OFF_MSI_CAP_HDR:
1145 {
1146 VBOXSTRICTRC rcStrict = iommuAmdMsiCapHdr_w(pDevIns, pThis, off, (uint32_t)uValue);
1147 if (cb == 4 || RT_FAILURE(rcStrict))
1148 return rcStrict;
1149 uValue >>= 32;
1150 RT_FALL_THRU();
1151 }
1152 case IOMMU_MMIO_OFF_MSI_ADDR_LO: return iommuAmdMsiAddrLo_w(pDevIns, pThis, off, uValue);
1153 case IOMMU_MMIO_OFF_MSI_ADDR_HI:
1154 {
1155 VBOXSTRICTRC rcStrict = iommuAmdMsiAddrHi_w(pDevIns, pThis, off, (uint32_t)uValue);
1156 if (cb == 4 || RT_FAILURE(rcStrict))
1157 return rcStrict;
1158 uValue >>= 32;
1159 RT_FALL_THRU();
1160 }
1161 case IOMMU_MMIO_OFF_MSI_DATA: return iommuAmdMsiData_w(pDevIns, pThis, off, uValue);
1162 case IOMMU_MMIO_OFF_MSI_MAPPING_CAP_HDR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1163
1164 case IOMMU_MMIO_OFF_PERF_OPT_CTRL: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1165
1166 case IOMMU_MMIO_OFF_XT_GEN_INTR_CTRL:
1167 case IOMMU_MMIO_OFF_XT_PPR_INTR_CTRL:
1168 case IOMMU_MMIO_OFF_XT_GALOG_INT_CTRL: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1169
1170 case IOMMU_MMIO_OFF_MARC_APER_BAR_0:
1171 case IOMMU_MMIO_OFF_MARC_APER_RELOC_0:
1172 case IOMMU_MMIO_OFF_MARC_APER_LEN_0:
1173 case IOMMU_MMIO_OFF_MARC_APER_BAR_1:
1174 case IOMMU_MMIO_OFF_MARC_APER_RELOC_1:
1175 case IOMMU_MMIO_OFF_MARC_APER_LEN_1:
1176 case IOMMU_MMIO_OFF_MARC_APER_BAR_2:
1177 case IOMMU_MMIO_OFF_MARC_APER_RELOC_2:
1178 case IOMMU_MMIO_OFF_MARC_APER_LEN_2:
1179 case IOMMU_MMIO_OFF_MARC_APER_BAR_3:
1180 case IOMMU_MMIO_OFF_MARC_APER_RELOC_3:
1181 case IOMMU_MMIO_OFF_MARC_APER_LEN_3: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1182
1183 case IOMMU_MMIO_OFF_RSVD_REG: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1184
1185 case IOMMU_MMIO_CMD_BUF_HEAD_PTR: return iommuAmdCmdBufHeadPtr_w(pDevIns, pThis, off, uValue);
1186 case IOMMU_MMIO_CMD_BUF_TAIL_PTR: return iommuAmdCmdBufTailPtr_w(pDevIns, pThis, off, uValue);
1187 case IOMMU_MMIO_EVT_LOG_HEAD_PTR: return iommuAmdEvtLogHeadPtr_w(pDevIns, pThis, off, uValue);
1188 case IOMMU_MMIO_EVT_LOG_TAIL_PTR: return iommuAmdEvtLogTailPtr_w(pDevIns, pThis, off, uValue);
1189
1190 case IOMMU_MMIO_OFF_STATUS: return iommuAmdStatus_w(pDevIns, pThis, off, uValue);
1191
1192 case IOMMU_MMIO_OFF_PPR_LOG_HEAD_PTR:
1193 case IOMMU_MMIO_OFF_PPR_LOG_TAIL_PTR:
1194
1195 case IOMMU_MMIO_OFF_GALOG_HEAD_PTR:
1196 case IOMMU_MMIO_OFF_GALOG_TAIL_PTR:
1197
1198 case IOMMU_MMIO_OFF_PPR_LOG_B_HEAD_PTR:
1199 case IOMMU_MMIO_OFF_PPR_LOG_B_TAIL_PTR:
1200
1201 case IOMMU_MMIO_OFF_EVT_LOG_B_HEAD_PTR:
1202 case IOMMU_MMIO_OFF_EVT_LOG_B_TAIL_PTR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1203
1204 case IOMMU_MMIO_OFF_PPR_LOG_AUTO_RESP:
1205 case IOMMU_MMIO_OFF_PPR_LOG_OVERFLOW_EARLY:
1206 case IOMMU_MMIO_OFF_PPR_LOG_B_OVERFLOW_EARLY:
1207
1208 /* Not implemented. */
1209 case IOMMU_MMIO_OFF_SMI_FLT_FIRST:
1210 case IOMMU_MMIO_OFF_SMI_FLT_LAST:
1211 {
1212 LogFunc(("Writing unsupported register: SMI filter %u -> Ignored\n", (off - IOMMU_MMIO_OFF_SMI_FLT_FIRST) >> 3));
1213 return VINF_SUCCESS;
1214 }
1215
1216 /* Unknown. */
1217 default:
1218 {
1219 LogFunc(("Writing unknown register %u (%#x) with %#RX64 -> Ignored\n", off, off, uValue));
1220 return VINF_SUCCESS;
1221 }
1222 }
1223}
1224
1225
1226/**
1227 * Reads an IOMMU register (64-bit) given its MMIO offset.
1228 *
1229 * All reads are 64-bit but reads to 32-bit registers that are aligned on an 8-byte
1230 * boundary include the lower half of the subsequent register.
1231 *
1232 * This is because most registers are 64-bit and aligned on 8-byte boundaries but
1233 * some are really 32-bit registers aligned on an 8-byte boundary. We cannot assume
1234 * software will only perform 32-bit reads on those 32-bit registers that are
1235 * aligned on 8-byte boundaries.
1236 *
1237 * @returns Strict VBox status code.
1238 * @param pDevIns The IOMMU device instance.
1239 * @param off The MMIO offset of the register in bytes.
1240 * @param puResult Where to store the value being read.
1241 *
1242 * @thread EMT.
1243 */
1244static VBOXSTRICTRC iommuAmdReadRegister(PPDMDEVINS pDevIns, uint32_t off, uint64_t *puResult)
1245{
1246 Assert(off < IOMMU_MMIO_REGION_SIZE);
1247 Assert(!(off & 7) || !(off & 3));
1248
1249 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1250 PCPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1251 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1252
1253 Log5Func(("off=%#x\n", off));
1254
1255 /** @todo IOMMU: fine-grained locking? */
1256 uint64_t uReg;
1257 switch (off)
1258 {
1259 case IOMMU_MMIO_OFF_DEV_TAB_BAR: uReg = pThis->aDevTabBaseAddrs[0].u64; break;
1260 case IOMMU_MMIO_OFF_CMD_BUF_BAR: uReg = pThis->CmdBufBaseAddr.u64; break;
1261 case IOMMU_MMIO_OFF_EVT_LOG_BAR: uReg = pThis->EvtLogBaseAddr.u64; break;
1262 case IOMMU_MMIO_OFF_CTRL: uReg = pThis->Ctrl.u64; break;
1263 case IOMMU_MMIO_OFF_EXCL_BAR: uReg = pThis->ExclRangeBaseAddr.u64; break;
1264 case IOMMU_MMIO_OFF_EXCL_RANGE_LIMIT: uReg = pThis->ExclRangeLimit.u64; break;
1265 case IOMMU_MMIO_OFF_EXT_FEAT: uReg = pThis->ExtFeat.u64; break;
1266
1267 case IOMMU_MMIO_OFF_PPR_LOG_BAR: uReg = pThis->PprLogBaseAddr.u64; break;
1268 case IOMMU_MMIO_OFF_HW_EVT_HI: uReg = pThis->HwEvtHi.u64; break;
1269 case IOMMU_MMIO_OFF_HW_EVT_LO: uReg = pThis->HwEvtLo; break;
1270 case IOMMU_MMIO_OFF_HW_EVT_STATUS: uReg = pThis->HwEvtStatus.u64; break;
1271
1272 case IOMMU_MMIO_OFF_GALOG_BAR: uReg = pThis->GALogBaseAddr.u64; break;
1273 case IOMMU_MMIO_OFF_GALOG_TAIL_ADDR: uReg = pThis->GALogTailAddr.u64; break;
1274
1275 case IOMMU_MMIO_OFF_PPR_LOG_B_BAR: uReg = pThis->PprLogBBaseAddr.u64; break;
1276 case IOMMU_MMIO_OFF_PPR_EVT_B_BAR: uReg = pThis->EvtLogBBaseAddr.u64; break;
1277
1278 case IOMMU_MMIO_OFF_DEV_TAB_SEG_1:
1279 case IOMMU_MMIO_OFF_DEV_TAB_SEG_2:
1280 case IOMMU_MMIO_OFF_DEV_TAB_SEG_3:
1281 case IOMMU_MMIO_OFF_DEV_TAB_SEG_4:
1282 case IOMMU_MMIO_OFF_DEV_TAB_SEG_5:
1283 case IOMMU_MMIO_OFF_DEV_TAB_SEG_6:
1284 case IOMMU_MMIO_OFF_DEV_TAB_SEG_7:
1285 {
1286 uint8_t const offDevTabSeg = (off - IOMMU_MMIO_OFF_DEV_TAB_SEG_FIRST) >> 3;
1287 uint8_t const idxDevTabSeg = offDevTabSeg + 1;
1288 Assert(idxDevTabSeg < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
1289 uReg = pThis->aDevTabBaseAddrs[idxDevTabSeg].u64;
1290 break;
1291 }
1292
1293 case IOMMU_MMIO_OFF_DEV_SPECIFIC_FEAT: uReg = pThis->DevSpecificFeat.u64; break;
1294 case IOMMU_MMIO_OFF_DEV_SPECIFIC_CTRL: uReg = pThis->DevSpecificCtrl.u64; break;
1295 case IOMMU_MMIO_OFF_DEV_SPECIFIC_STATUS: uReg = pThis->DevSpecificStatus.u64; break;
1296
1297 case IOMMU_MMIO_OFF_MSI_VECTOR_0: uReg = pThis->MiscInfo.u64; break;
1298 case IOMMU_MMIO_OFF_MSI_VECTOR_1: uReg = pThis->MiscInfo.au32[1]; break;
1299 case IOMMU_MMIO_OFF_MSI_CAP_HDR:
1300 {
1301 uint32_t const uMsiCapHdr = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
1302 uint32_t const uMsiAddrLo = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
1303 uReg = RT_MAKE_U64(uMsiCapHdr, uMsiAddrLo);
1304 break;
1305 }
1306 case IOMMU_MMIO_OFF_MSI_ADDR_LO:
1307 {
1308 uReg = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
1309 break;
1310 }
1311 case IOMMU_MMIO_OFF_MSI_ADDR_HI:
1312 {
1313 uint32_t const uMsiAddrHi = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI);
1314 uint32_t const uMsiData = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
1315 uReg = RT_MAKE_U64(uMsiAddrHi, uMsiData);
1316 break;
1317 }
1318 case IOMMU_MMIO_OFF_MSI_DATA:
1319 {
1320 uReg = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
1321 break;
1322 }
1323 case IOMMU_MMIO_OFF_MSI_MAPPING_CAP_HDR:
1324 {
1325 /*
1326 * The PCI spec. lists MSI Mapping Capability 08H as related to HyperTransport capability.
1327 * The AMD IOMMU spec. fails to mention it explicitly and lists values for this register as
1328 * though HyperTransport is supported. We don't support HyperTransport, we thus just return
1329 * 0 for this register.
1330 */
1331 uReg = RT_MAKE_U64(0, pThis->PerfOptCtrl.u32);
1332 break;
1333 }
1334
1335 case IOMMU_MMIO_OFF_PERF_OPT_CTRL: uReg = pThis->PerfOptCtrl.u32; break;
1336
1337 case IOMMU_MMIO_OFF_XT_GEN_INTR_CTRL: uReg = pThis->XtGenIntrCtrl.u64; break;
1338 case IOMMU_MMIO_OFF_XT_PPR_INTR_CTRL: uReg = pThis->XtPprIntrCtrl.u64; break;
1339 case IOMMU_MMIO_OFF_XT_GALOG_INT_CTRL: uReg = pThis->XtGALogIntrCtrl.u64; break;
1340
1341 case IOMMU_MMIO_OFF_MARC_APER_BAR_0: uReg = pThis->aMarcApers[0].Base.u64; break;
1342 case IOMMU_MMIO_OFF_MARC_APER_RELOC_0: uReg = pThis->aMarcApers[0].Reloc.u64; break;
1343 case IOMMU_MMIO_OFF_MARC_APER_LEN_0: uReg = pThis->aMarcApers[0].Length.u64; break;
1344 case IOMMU_MMIO_OFF_MARC_APER_BAR_1: uReg = pThis->aMarcApers[1].Base.u64; break;
1345 case IOMMU_MMIO_OFF_MARC_APER_RELOC_1: uReg = pThis->aMarcApers[1].Reloc.u64; break;
1346 case IOMMU_MMIO_OFF_MARC_APER_LEN_1: uReg = pThis->aMarcApers[1].Length.u64; break;
1347 case IOMMU_MMIO_OFF_MARC_APER_BAR_2: uReg = pThis->aMarcApers[2].Base.u64; break;
1348 case IOMMU_MMIO_OFF_MARC_APER_RELOC_2: uReg = pThis->aMarcApers[2].Reloc.u64; break;
1349 case IOMMU_MMIO_OFF_MARC_APER_LEN_2: uReg = pThis->aMarcApers[2].Length.u64; break;
1350 case IOMMU_MMIO_OFF_MARC_APER_BAR_3: uReg = pThis->aMarcApers[3].Base.u64; break;
1351 case IOMMU_MMIO_OFF_MARC_APER_RELOC_3: uReg = pThis->aMarcApers[3].Reloc.u64; break;
1352 case IOMMU_MMIO_OFF_MARC_APER_LEN_3: uReg = pThis->aMarcApers[3].Length.u64; break;
1353
1354 case IOMMU_MMIO_OFF_RSVD_REG: uReg = pThis->RsvdReg; break;
1355
1356 case IOMMU_MMIO_CMD_BUF_HEAD_PTR: uReg = pThis->CmdBufHeadPtr.u64; break;
1357 case IOMMU_MMIO_CMD_BUF_TAIL_PTR: uReg = pThis->CmdBufTailPtr.u64; break;
1358 case IOMMU_MMIO_EVT_LOG_HEAD_PTR: uReg = pThis->EvtLogHeadPtr.u64; break;
1359 case IOMMU_MMIO_EVT_LOG_TAIL_PTR: uReg = pThis->EvtLogTailPtr.u64; break;
1360
1361 case IOMMU_MMIO_OFF_STATUS: uReg = pThis->Status.u64; break;
1362
1363 case IOMMU_MMIO_OFF_PPR_LOG_HEAD_PTR: uReg = pThis->PprLogHeadPtr.u64; break;
1364 case IOMMU_MMIO_OFF_PPR_LOG_TAIL_PTR: uReg = pThis->PprLogTailPtr.u64; break;
1365
1366 case IOMMU_MMIO_OFF_GALOG_HEAD_PTR: uReg = pThis->GALogHeadPtr.u64; break;
1367 case IOMMU_MMIO_OFF_GALOG_TAIL_PTR: uReg = pThis->GALogTailPtr.u64; break;
1368
1369 case IOMMU_MMIO_OFF_PPR_LOG_B_HEAD_PTR: uReg = pThis->PprLogBHeadPtr.u64; break;
1370 case IOMMU_MMIO_OFF_PPR_LOG_B_TAIL_PTR: uReg = pThis->PprLogBTailPtr.u64; break;
1371
1372 case IOMMU_MMIO_OFF_EVT_LOG_B_HEAD_PTR: uReg = pThis->EvtLogBHeadPtr.u64; break;
1373 case IOMMU_MMIO_OFF_EVT_LOG_B_TAIL_PTR: uReg = pThis->EvtLogBTailPtr.u64; break;
1374
1375 case IOMMU_MMIO_OFF_PPR_LOG_AUTO_RESP: uReg = pThis->PprLogAutoResp.u64; break;
1376 case IOMMU_MMIO_OFF_PPR_LOG_OVERFLOW_EARLY: uReg = pThis->PprLogOverflowEarly.u64; break;
1377 case IOMMU_MMIO_OFF_PPR_LOG_B_OVERFLOW_EARLY: uReg = pThis->PprLogBOverflowEarly.u64; break;
1378
1379 /* Not implemented. */
1380 case IOMMU_MMIO_OFF_SMI_FLT_FIRST:
1381 case IOMMU_MMIO_OFF_SMI_FLT_LAST:
1382 {
1383 LogFunc(("Reading unsupported register: SMI filter %u\n", (off - IOMMU_MMIO_OFF_SMI_FLT_FIRST) >> 3));
1384 uReg = 0;
1385 break;
1386 }
1387
1388 /* Unknown. */
1389 default:
1390 {
1391 LogFunc(("Reading unknown register %u (%#x) -> 0\n", off, off));
1392 uReg = 0;
1393 return VINF_IOM_MMIO_UNUSED_00;
1394 }
1395 }
1396
1397 *puResult = uReg;
1398 return VINF_SUCCESS;
1399}
1400
1401
1402/**
1403 * Raises the MSI interrupt for the IOMMU device.
1404 *
1405 * @param pDevIns The IOMMU device instance.
1406 *
1407 * @thread Any.
1408 * @remarks The IOMMU lock may or may not be held.
1409 */
1410static void iommuAmdRaiseMsiInterrupt(PPDMDEVINS pDevIns)
1411{
1412 LogFlowFunc(("\n"));
1413 if (iommuAmdIsMsiEnabled(pDevIns))
1414 {
1415 LogFunc(("Raising MSI\n"));
1416 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1417 }
1418}
1419
1420
1421/**
1422 * Clears the MSI interrupt for the IOMMU device.
1423 *
1424 * @param pDevIns The IOMMU device instance.
1425 *
1426 * @thread Any.
1427 * @remarks The IOMMU lock may or may not be held.
1428 */
1429static void iommuAmdClearMsiInterrupt(PPDMDEVINS pDevIns)
1430{
1431 if (iommuAmdIsMsiEnabled(pDevIns))
1432 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1433}
1434
1435
1436/**
1437 * Writes an entry to the event log in memory.
1438 *
1439 * @returns VBox status code.
1440 * @param pDevIns The IOMMU device instance.
1441 * @param pEvent The event to log.
1442 *
1443 * @thread Any.
1444 */
1445static int iommuAmdWriteEvtLogEntry(PPDMDEVINS pDevIns, PCEVT_GENERIC_T pEvent)
1446{
1447 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1448
1449 IOMMU_ASSERT_LOCKED(pDevIns);
1450
1451 /* Check if event logging is active and the log has not overflowed. */
1452 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
1453 if ( Status.n.u1EvtLogRunning
1454 && !Status.n.u1EvtOverflow)
1455 {
1456 uint32_t const cbEvt = sizeof(*pEvent);
1457
1458 /* Get the offset we need to write the event to in memory (circular buffer offset). */
1459 uint32_t const offEvt = pThis->EvtLogTailPtr.n.off;
1460 Assert(!(offEvt & ~IOMMU_EVT_LOG_TAIL_PTR_VALID_MASK));
1461
1462 /* Ensure we have space in the event log. */
1463 uint32_t const cMaxEvts = iommuAmdGetBufMaxEntries(pThis->EvtLogBaseAddr.n.u4Len);
1464 uint32_t const cEvts = iommuAmdGetEvtLogEntryCount(pThis);
1465 if (cEvts + 1 < cMaxEvts)
1466 {
1467 /* Write the event log entry to memory. */
1468 RTGCPHYS const GCPhysEvtLog = pThis->EvtLogBaseAddr.n.u40Base << X86_PAGE_4K_SHIFT;
1469 RTGCPHYS const GCPhysEvtLogEntry = GCPhysEvtLog + offEvt;
1470 int rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhysEvtLogEntry, pEvent, cbEvt);
1471 if (RT_FAILURE(rc))
1472 LogFunc(("Failed to write event log entry at %#RGp. rc=%Rrc\n", GCPhysEvtLogEntry, rc));
1473
1474 /* Increment the event log tail pointer. */
1475 uint32_t const cbEvtLog = iommuAmdGetTotalBufLength(pThis->EvtLogBaseAddr.n.u4Len);
1476 pThis->EvtLogTailPtr.n.off = (offEvt + cbEvt) % cbEvtLog;
1477
1478 /* Indicate that an event log entry was written. */
1479 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_INTR);
1480
1481 /* Check and signal an interrupt if software wants to receive one when an event log entry is written. */
1482 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
1483 if (Ctrl.n.u1EvtIntrEn)
1484 iommuAmdRaiseMsiInterrupt(pDevIns);
1485 }
1486 else
1487 {
1488 /* Indicate that the event log has overflowed. */
1489 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_OVERFLOW);
1490
1491 /* Check and signal an interrupt if software wants to receive one when the event log has overflowed. */
1492 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
1493 if (Ctrl.n.u1EvtIntrEn)
1494 iommuAmdRaiseMsiInterrupt(pDevIns);
1495 }
1496 }
1497
1498 return VINF_SUCCESS;
1499}
1500
1501
1502/**
1503 * Sets an event in the hardware error registers.
1504 *
1505 * @param pDevIns The IOMMU device instance.
1506 * @param pEvent The event.
1507 *
1508 * @thread Any.
1509 */
1510static void iommuAmdSetHwError(PPDMDEVINS pDevIns, PCEVT_GENERIC_T pEvent)
1511{
1512 IOMMU_ASSERT_LOCKED(pDevIns);
1513
1514 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1515 if (pThis->ExtFeat.n.u1HwErrorSup)
1516 {
1517 if (pThis->HwEvtStatus.n.u1Valid)
1518 pThis->HwEvtStatus.n.u1Overflow = 1;
1519 pThis->HwEvtStatus.n.u1Valid = 1;
1520 pThis->HwEvtHi.u64 = RT_MAKE_U64(pEvent->au32[0], pEvent->au32[1]);
1521 pThis->HwEvtLo = RT_MAKE_U64(pEvent->au32[2], pEvent->au32[3]);
1522 Assert( pThis->HwEvtHi.n.u4EvtCode == IOMMU_EVT_DEV_TAB_HW_ERROR
1523 || pThis->HwEvtHi.n.u4EvtCode == IOMMU_EVT_PAGE_TAB_HW_ERROR
1524 || pThis->HwEvtHi.n.u4EvtCode == IOMMU_EVT_COMMAND_HW_ERROR);
1525 }
1526}
1527
1528
1529/**
1530 * Initializes a PAGE_TAB_HARDWARE_ERROR event.
1531 *
1532 * @param uDevId The device ID.
1533 * @param uDomainId The domain ID.
1534 * @param GCPhysPtEntity The system physical address of the page table
1535 * entity.
1536 * @param enmOp The IOMMU operation being performed.
1537 * @param pEvtPageTabHwErr Where to store the initialized event.
1538 */
1539static void iommuAmdInitPageTabHwErrorEvent(uint16_t uDevId, uint16_t uDomainId, RTGCPHYS GCPhysPtEntity, IOMMUOP enmOp,
1540 PEVT_PAGE_TAB_HW_ERR_T pEvtPageTabHwErr)
1541{
1542 memset(pEvtPageTabHwErr, 0, sizeof(*pEvtPageTabHwErr));
1543 pEvtPageTabHwErr->n.u16DevId = uDevId;
1544 pEvtPageTabHwErr->n.u16DomainOrPasidLo = uDomainId;
1545 pEvtPageTabHwErr->n.u1GuestOrNested = 0;
1546 pEvtPageTabHwErr->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1547 pEvtPageTabHwErr->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1548 pEvtPageTabHwErr->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1549 pEvtPageTabHwErr->n.u2Type = enmOp == IOMMUOP_CMD ? HWEVTTYPE_DATA_ERROR : HWEVTTYPE_TARGET_ABORT;
1550 pEvtPageTabHwErr->n.u4EvtCode = IOMMU_EVT_PAGE_TAB_HW_ERROR;
1551 pEvtPageTabHwErr->n.u64Addr = GCPhysPtEntity;
1552}
1553
1554
1555/**
1556 * Raises a PAGE_TAB_HARDWARE_ERROR event.
1557 *
1558 * @param pDevIns The IOMMU device instance.
1559 * @param enmOp The IOMMU operation being performed.
1560 * @param pEvtPageTabHwErr The page table hardware error event.
1561 *
1562 * @thread Any.
1563 */
1564static void iommuAmdRaisePageTabHwErrorEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PEVT_PAGE_TAB_HW_ERR_T pEvtPageTabHwErr)
1565{
1566 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_PAGE_TAB_HW_ERR_T));
1567 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtPageTabHwErr;
1568
1569 IOMMU_LOCK_NORET(pDevIns);
1570
1571 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
1572 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
1573 if (enmOp != IOMMUOP_CMD)
1574 iommuAmdSetPciTargetAbort(pDevIns);
1575
1576 IOMMU_UNLOCK(pDevIns);
1577
1578 LogFunc(("Raised PAGE_TAB_HARDWARE_ERROR. uDevId=%#x uDomainId=%#x GCPhysPtEntity=%#RGp enmOp=%u u2Type=%u\n",
1579 pEvtPageTabHwErr->n.u16DevId, pEvtPageTabHwErr->n.u16DomainOrPasidLo, pEvtPageTabHwErr->n.u64Addr, enmOp,
1580 pEvtPageTabHwErr->n.u2Type));
1581}
1582
1583
1584/**
1585 * Initializes a COMMAND_HARDWARE_ERROR event.
1586 *
1587 * @param GCPhysAddr The system physical address the IOMMU attempted to access.
1588 * @param pEvtCmdHwErr Where to store the initialized event.
1589 */
1590static void iommuAmdInitCmdHwErrorEvent(RTGCPHYS GCPhysAddr, PEVT_CMD_HW_ERR_T pEvtCmdHwErr)
1591{
1592 memset(pEvtCmdHwErr, 0, sizeof(*pEvtCmdHwErr));
1593 pEvtCmdHwErr->n.u2Type = HWEVTTYPE_DATA_ERROR;
1594 pEvtCmdHwErr->n.u4EvtCode = IOMMU_EVT_COMMAND_HW_ERROR;
1595 pEvtCmdHwErr->n.u64Addr = GCPhysAddr;
1596}
1597
1598
1599/**
1600 * Raises a COMMAND_HARDWARE_ERROR event.
1601 *
1602 * @param pDevIns The IOMMU device instance.
1603 * @param pEvtCmdHwErr The command hardware error event.
1604 *
1605 * @thread Any.
1606 */
1607static void iommuAmdRaiseCmdHwErrorEvent(PPDMDEVINS pDevIns, PCEVT_CMD_HW_ERR_T pEvtCmdHwErr)
1608{
1609 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_CMD_HW_ERR_T));
1610 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtCmdHwErr;
1611 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1612
1613 IOMMU_LOCK_NORET(pDevIns);
1614
1615 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
1616 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
1617 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING);
1618
1619 IOMMU_UNLOCK(pDevIns);
1620
1621 LogFunc(("Raised COMMAND_HARDWARE_ERROR. GCPhysCmd=%#RGp u2Type=%u\n", pEvtCmdHwErr->n.u64Addr, pEvtCmdHwErr->n.u2Type));
1622}
1623
1624
1625/**
1626 * Initializes a DEV_TAB_HARDWARE_ERROR event.
1627 *
1628 * @param uDevId The device ID.
1629 * @param GCPhysDte The system physical address of the failed device table
1630 * access.
1631 * @param enmOp The IOMMU operation being performed.
1632 * @param pEvtDevTabHwErr Where to store the initialized event.
1633 */
1634static void iommuAmdInitDevTabHwErrorEvent(uint16_t uDevId, RTGCPHYS GCPhysDte, IOMMUOP enmOp,
1635 PEVT_DEV_TAB_HW_ERROR_T pEvtDevTabHwErr)
1636{
1637 memset(pEvtDevTabHwErr, 0, sizeof(*pEvtDevTabHwErr));
1638 pEvtDevTabHwErr->n.u16DevId = uDevId;
1639 pEvtDevTabHwErr->n.u1Intr = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1640 /** @todo IOMMU: Any other transaction type that can set read/write bit? */
1641 pEvtDevTabHwErr->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1642 pEvtDevTabHwErr->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1643 pEvtDevTabHwErr->n.u2Type = enmOp == IOMMUOP_CMD ? HWEVTTYPE_DATA_ERROR : HWEVTTYPE_TARGET_ABORT;
1644 pEvtDevTabHwErr->n.u4EvtCode = IOMMU_EVT_DEV_TAB_HW_ERROR;
1645 pEvtDevTabHwErr->n.u64Addr = GCPhysDte;
1646}
1647
1648
1649/**
1650 * Raises a DEV_TAB_HARDWARE_ERROR event.
1651 *
1652 * @param pDevIns The IOMMU device instance.
1653 * @param enmOp The IOMMU operation being performed.
1654 * @param pEvtDevTabHwErr The device table hardware error event.
1655 *
1656 * @thread Any.
1657 */
1658static void iommuAmdRaiseDevTabHwErrorEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PEVT_DEV_TAB_HW_ERROR_T pEvtDevTabHwErr)
1659{
1660 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_DEV_TAB_HW_ERROR_T));
1661 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtDevTabHwErr;
1662
1663 IOMMU_LOCK_NORET(pDevIns);
1664
1665 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
1666 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
1667 if (enmOp != IOMMUOP_CMD)
1668 iommuAmdSetPciTargetAbort(pDevIns);
1669
1670 IOMMU_UNLOCK(pDevIns);
1671
1672 LogFunc(("Raised DEV_TAB_HARDWARE_ERROR. uDevId=%#x GCPhysDte=%#RGp enmOp=%u u2Type=%u\n", pEvtDevTabHwErr->n.u16DevId,
1673 pEvtDevTabHwErr->n.u64Addr, enmOp, pEvtDevTabHwErr->n.u2Type));
1674}
1675
1676
1677/**
1678 * Initializes an ILLEGAL_COMMAND_ERROR event.
1679 *
1680 * @param GCPhysCmd The system physical address of the failed command
1681 * access.
1682 * @param pEvtIllegalCmd Where to store the initialized event.
1683 */
1684static void iommuAmdInitIllegalCmdEvent(RTGCPHYS GCPhysCmd, PEVT_ILLEGAL_CMD_ERR_T pEvtIllegalCmd)
1685{
1686 Assert(!(GCPhysCmd & UINT64_C(0xf)));
1687 memset(pEvtIllegalCmd, 0, sizeof(*pEvtIllegalCmd));
1688 pEvtIllegalCmd->n.u4EvtCode = IOMMU_EVT_ILLEGAL_CMD_ERROR;
1689 pEvtIllegalCmd->n.u64Addr = GCPhysCmd;
1690}
1691
1692
1693/**
1694 * Raises an ILLEGAL_COMMAND_ERROR event.
1695 *
1696 * @param pDevIns The IOMMU device instance.
1697 * @param pEvtIllegalCmd The illegal command error event.
1698 */
1699static void iommuAmdRaiseIllegalCmdEvent(PPDMDEVINS pDevIns, PCEVT_ILLEGAL_CMD_ERR_T pEvtIllegalCmd)
1700{
1701 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_ILLEGAL_DTE_T));
1702 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIllegalCmd;
1703 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1704
1705 IOMMU_LOCK_NORET(pDevIns);
1706
1707 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1708 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING);
1709
1710 IOMMU_UNLOCK(pDevIns);
1711
1712 LogFunc(("Raised ILLEGAL_COMMAND_ERROR. Addr=%#RGp\n", pEvtIllegalCmd->n.u64Addr));
1713}
1714
1715
1716/**
1717 * Initializes an ILLEGAL_DEV_TABLE_ENTRY event.
1718 *
1719 * @param uDevId The device ID.
1720 * @param uIova The I/O virtual address.
1721 * @param fRsvdNotZero Whether reserved bits are not zero. Pass @c false if the
1722 * event was caused by an invalid level encoding in the
1723 * DTE.
1724 * @param enmOp The IOMMU operation being performed.
1725 * @param pEvtIllegalDte Where to store the initialized event.
1726 */
1727static void iommuAmdInitIllegalDteEvent(uint16_t uDevId, uint64_t uIova, bool fRsvdNotZero, IOMMUOP enmOp,
1728 PEVT_ILLEGAL_DTE_T pEvtIllegalDte)
1729{
1730 memset(pEvtIllegalDte, 0, sizeof(*pEvtIllegalDte));
1731 pEvtIllegalDte->n.u16DevId = uDevId;
1732 pEvtIllegalDte->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1733 pEvtIllegalDte->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1734 pEvtIllegalDte->n.u1RsvdNotZero = fRsvdNotZero;
1735 pEvtIllegalDte->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1736 pEvtIllegalDte->n.u4EvtCode = IOMMU_EVT_ILLEGAL_DEV_TAB_ENTRY;
1737 pEvtIllegalDte->n.u64Addr = uIova & ~UINT64_C(0x3);
1738 /** @todo r=ramshankar: Not sure why the last 2 bits are marked as reserved by the
1739 * IOMMU spec here but not for this field for I/O page fault event. */
1740 Assert(!(uIova & UINT64_C(0x3)));
1741}
1742
1743
1744/**
1745 * Raises an ILLEGAL_DEV_TABLE_ENTRY event.
1746 *
1747 * @param pDevIns The IOMMU instance data.
1748 * @param enmOp The IOMMU operation being performed.
1749 * @param pEvtIllegalDte The illegal device table entry event.
1750 * @param enmEvtType The illegal device table entry event type.
1751 *
1752 * @thread Any.
1753 */
1754static void iommuAmdRaiseIllegalDteEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PCEVT_ILLEGAL_DTE_T pEvtIllegalDte,
1755 EVT_ILLEGAL_DTE_TYPE_T enmEvtType)
1756{
1757 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_ILLEGAL_DTE_T));
1758 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIllegalDte;
1759
1760 IOMMU_LOCK_NORET(pDevIns);
1761
1762 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1763 if (enmOp != IOMMUOP_CMD)
1764 iommuAmdSetPciTargetAbort(pDevIns);
1765
1766 IOMMU_UNLOCK(pDevIns);
1767
1768 LogFunc(("Raised ILLEGAL_DTE_EVENT. uDevId=%#x uIova=%#RX64 enmOp=%u enmEvtType=%u\n", pEvtIllegalDte->n.u16DevId,
1769 pEvtIllegalDte->n.u64Addr, enmOp, enmEvtType));
1770 NOREF(enmEvtType);
1771}
1772
1773
1774/**
1775 * Initializes an IO_PAGE_FAULT event.
1776 *
1777 * @param uDevId The device ID.
1778 * @param uDomainId The domain ID.
1779 * @param uIova The I/O virtual address being accessed.
1780 * @param fPresent Transaction to a page marked as present (including
1781 * DTE.V=1) or interrupt marked as remapped
1782 * (IRTE.RemapEn=1).
1783 * @param fRsvdNotZero Whether reserved bits are not zero. Pass @c false if
1784 * the I/O page fault was caused by invalid level
1785 * encoding.
1786 * @param fPermDenied Permission denied for the address being accessed.
1787 * @param enmOp The IOMMU operation being performed.
1788 * @param pEvtIoPageFault Where to store the initialized event.
1789 */
1790static void iommuAmdInitIoPageFaultEvent(uint16_t uDevId, uint16_t uDomainId, uint64_t uIova, bool fPresent, bool fRsvdNotZero,
1791 bool fPermDenied, IOMMUOP enmOp, PEVT_IO_PAGE_FAULT_T pEvtIoPageFault)
1792{
1793 Assert(!fPermDenied || fPresent);
1794 memset(pEvtIoPageFault, 0, sizeof(*pEvtIoPageFault));
1795 pEvtIoPageFault->n.u16DevId = uDevId;
1796 //pEvtIoPageFault->n.u4PasidHi = 0;
1797 pEvtIoPageFault->n.u16DomainOrPasidLo = uDomainId;
1798 //pEvtIoPageFault->n.u1GuestOrNested = 0;
1799 //pEvtIoPageFault->n.u1NoExecute = 0;
1800 //pEvtIoPageFault->n.u1User = 0;
1801 pEvtIoPageFault->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1802 pEvtIoPageFault->n.u1Present = fPresent;
1803 pEvtIoPageFault->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1804 pEvtIoPageFault->n.u1PermDenied = fPermDenied;
1805 pEvtIoPageFault->n.u1RsvdNotZero = fRsvdNotZero;
1806 pEvtIoPageFault->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1807 pEvtIoPageFault->n.u4EvtCode = IOMMU_EVT_IO_PAGE_FAULT;
1808 pEvtIoPageFault->n.u64Addr = uIova;
1809}
1810
1811
1812/**
1813 * Raises an IO_PAGE_FAULT event.
1814 *
1815 * @param pDevIns The IOMMU instance data.
1816 * @param pDte The device table entry. Optional, can be NULL
1817 * depending on @a enmOp.
1818 * @param pIrte The interrupt remapping table entry. Optional, can
1819 * be NULL depending on @a enmOp.
1820 * @param enmOp The IOMMU operation being performed.
1821 * @param pEvtIoPageFault The I/O page fault event.
1822 * @param enmEvtType The I/O page fault event type.
1823 *
1824 * @thread Any.
1825 */
1826static void iommuAmdRaiseIoPageFaultEvent(PPDMDEVINS pDevIns, PCDTE_T pDte, PCIRTE_T pIrte, IOMMUOP enmOp,
1827 PCEVT_IO_PAGE_FAULT_T pEvtIoPageFault, EVT_IO_PAGE_FAULT_TYPE_T enmEvtType)
1828{
1829 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_IO_PAGE_FAULT_T));
1830 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIoPageFault;
1831
1832 IOMMU_LOCK_NORET(pDevIns);
1833
1834 bool fSuppressEvtLogging = false;
1835 if ( enmOp == IOMMUOP_MEM_READ
1836 || enmOp == IOMMUOP_MEM_WRITE)
1837 {
1838 if ( pDte
1839 && pDte->n.u1Valid)
1840 {
1841 fSuppressEvtLogging = pDte->n.u1SuppressAllPfEvents;
1842 /** @todo IOMMU: Implement DTE.SE bit, i.e. device ID specific I/O page fault
1843 * suppression. Perhaps will be possible when we complete IOTLB/cache
1844 * handling. */
1845 }
1846 }
1847 else if (enmOp == IOMMUOP_INTR_REQ)
1848 {
1849 if ( pDte
1850 && pDte->n.u1IntrMapValid)
1851 fSuppressEvtLogging = !pDte->n.u1IgnoreUnmappedIntrs;
1852
1853 if ( !fSuppressEvtLogging
1854 && pIrte)
1855 fSuppressEvtLogging = pIrte->n.u1SuppressIoPf;
1856 }
1857 /* else: Events are never suppressed for commands. */
1858
1859 switch (enmEvtType)
1860 {
1861 case kIoPageFaultType_PermDenied:
1862 {
1863 /* Cannot be triggered by a command. */
1864 Assert(enmOp != IOMMUOP_CMD);
1865 RT_FALL_THRU();
1866 }
1867 case kIoPageFaultType_DteRsvdPagingMode:
1868 case kIoPageFaultType_PteInvalidPageSize:
1869 case kIoPageFaultType_PteInvalidLvlEncoding:
1870 case kIoPageFaultType_SkippedLevelIovaNotZero:
1871 case kIoPageFaultType_PteRsvdNotZero:
1872 case kIoPageFaultType_PteValidNotSet:
1873 case kIoPageFaultType_DteTranslationDisabled:
1874 case kIoPageFaultType_PasidInvalidRange:
1875 {
1876 /*
1877 * For a translation request, the IOMMU doesn't signal an I/O page fault nor does it
1878 * create an event log entry. See AMD spec. 2.1.3.2 "I/O Page Faults".
1879 */
1880 if (enmOp != IOMMUOP_TRANSLATE_REQ)
1881 {
1882 if (!fSuppressEvtLogging)
1883 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1884 if (enmOp != IOMMUOP_CMD)
1885 iommuAmdSetPciTargetAbort(pDevIns);
1886 }
1887 break;
1888 }
1889
1890 case kIoPageFaultType_UserSupervisor:
1891 {
1892 /* Access is blocked and only creates an event log entry. */
1893 if (!fSuppressEvtLogging)
1894 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1895 break;
1896 }
1897
1898 case kIoPageFaultType_IrteAddrInvalid:
1899 case kIoPageFaultType_IrteRsvdNotZero:
1900 case kIoPageFaultType_IrteRemapEn:
1901 case kIoPageFaultType_IrteRsvdIntType:
1902 case kIoPageFaultType_IntrReqAborted:
1903 case kIoPageFaultType_IntrWithPasid:
1904 {
1905 /* Only trigerred by interrupt requests. */
1906 Assert(enmOp == IOMMUOP_INTR_REQ);
1907 if (!fSuppressEvtLogging)
1908 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1909 iommuAmdSetPciTargetAbort(pDevIns);
1910 break;
1911 }
1912
1913 case kIoPageFaultType_SmiFilterMismatch:
1914 {
1915 /* Not supported and probably will never be, assert. */
1916 AssertMsgFailed(("kIoPageFaultType_SmiFilterMismatch - Upstream SMI requests not supported/implemented."));
1917 break;
1918 }
1919
1920 case kIoPageFaultType_DevId_Invalid:
1921 {
1922 /* Cannot be triggered by a command. */
1923 Assert(enmOp != IOMMUOP_CMD);
1924 Assert(enmOp != IOMMUOP_TRANSLATE_REQ); /** @todo IOMMU: We don't support translation requests yet. */
1925 if (!fSuppressEvtLogging)
1926 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1927 if ( enmOp == IOMMUOP_MEM_READ
1928 || enmOp == IOMMUOP_MEM_WRITE)
1929 iommuAmdSetPciTargetAbort(pDevIns);
1930 break;
1931 }
1932 }
1933
1934 IOMMU_UNLOCK(pDevIns);
1935}
1936
1937
1938/**
1939 * Returns whether the I/O virtual address is to be excluded from translation and
1940 * permission checks.
1941 *
1942 * @returns @c true if the DVA is excluded, @c false otherwise.
1943 * @param pThis The IOMMU device state.
1944 * @param pDte The device table entry.
1945 * @param uIova The I/O virtual address.
1946 *
1947 * @remarks Ensure the exclusion range is enabled prior to calling this function.
1948 *
1949 * @thread Any.
1950 */
1951static bool iommuAmdIsDvaInExclRange(PCIOMMU pThis, PCDTE_T pDte, uint64_t uIova)
1952{
1953 /* Ensure the exclusion range is enabled. */
1954 Assert(pThis->ExclRangeBaseAddr.n.u1ExclEnable);
1955
1956 /* Check if the IOVA falls within the exclusion range. */
1957 uint64_t const uIovaExclFirst = pThis->ExclRangeBaseAddr.n.u40ExclRangeBase << X86_PAGE_4K_SHIFT;
1958 uint64_t const uIovaExclLast = pThis->ExclRangeLimit.n.u52ExclLimit;
1959 if (uIovaExclLast - uIova >= uIovaExclFirst)
1960 {
1961 /* Check if device access to addresses in the exclusion range can be forwarded untranslated. */
1962 if ( pThis->ExclRangeBaseAddr.n.u1AllowAll
1963 || pDte->n.u1AllowExclusion)
1964 return true;
1965 }
1966 return false;
1967}
1968
1969
1970/**
1971 * Reads a device table entry from guest memory given the device ID.
1972 *
1973 * @returns VBox status code.
1974 * @param pDevIns The IOMMU device instance.
1975 * @param uDevId The device ID.
1976 * @param enmOp The IOMMU operation being performed.
1977 * @param pDte Where to store the device table entry.
1978 *
1979 * @thread Any.
1980 */
1981static int iommuAmdReadDte(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PDTE_T pDte)
1982{
1983 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1984 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
1985
1986 uint8_t const idxSegsEn = Ctrl.n.u3DevTabSegEn;
1987 Assert(idxSegsEn < RT_ELEMENTS(g_auDevTabSegShifts));
1988 Assert(idxSegsEn < RT_ELEMENTS(g_auDevTabSegMasks));
1989
1990 uint8_t const idxSeg = (uDevId & g_auDevTabSegMasks[idxSegsEn]) >> g_auDevTabSegShifts[idxSegsEn];
1991 Assert(idxSeg < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
1992
1993 RTGCPHYS const GCPhysDevTab = pThis->aDevTabBaseAddrs[idxSeg].n.u40Base << X86_PAGE_4K_SHIFT;
1994 uint16_t const offDte = (uDevId & ~g_auDevTabSegMasks[idxSegsEn]) * sizeof(DTE_T);
1995 RTGCPHYS const GCPhysDte = GCPhysDevTab + offDte;
1996
1997 Assert(!(GCPhysDevTab & X86_PAGE_4K_OFFSET_MASK));
1998 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysDte, pDte, sizeof(*pDte));
1999 if (RT_FAILURE(rc))
2000 {
2001 LogFunc(("Failed to read device table entry at %#RGp. rc=%Rrc -> DevTabHwError\n", GCPhysDte, rc));
2002
2003 EVT_DEV_TAB_HW_ERROR_T EvtDevTabHwErr;
2004 iommuAmdInitDevTabHwErrorEvent(uDevId, GCPhysDte, enmOp, &EvtDevTabHwErr);
2005 iommuAmdRaiseDevTabHwErrorEvent(pDevIns, enmOp, &EvtDevTabHwErr);
2006 return VERR_IOMMU_IPE_1;
2007 }
2008
2009 return rc;
2010}
2011
2012
2013/**
2014 * Walks the I/O page table to translate the I/O virtual address to a system
2015 * physical address.
2016 *
2017 * @returns VBox status code.
2018 * @param pDevIns The IOMMU device instance.
2019 * @param uIova The I/O virtual address to translate. Must be 4K aligned.
2020 * @param uDevId The device ID.
2021 * @param fAccess The access permissions (IOMMU_IO_PERM_XXX). This is the
2022 * permissions for the access being made.
2023 * @param pDte The device table entry.
2024 * @param enmOp The IOMMU operation being performed.
2025 * @param pWalkResult Where to store the results of the I/O page walk. This is
2026 * only updated when VINF_SUCCESS is returned.
2027 *
2028 * @thread Any.
2029 */
2030static int iommuAmdWalkIoPageTable(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, uint8_t fAccess, PCDTE_T pDte,
2031 IOMMUOP enmOp, PIOWALKRESULT pWalkResult)
2032{
2033 Assert(pDte->n.u1Valid);
2034 Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK));
2035
2036 /* If the translation is not valid, raise an I/O page fault. */
2037 if (pDte->n.u1TranslationValid)
2038 { /* likely */ }
2039 else
2040 {
2041 /** @todo r=ramshankar: The AMD IOMMU spec. says page walk is terminated but
2042 * doesn't explicitly say whether an I/O page fault is raised. From other
2043 * places in the spec. it seems early page walk terminations (starting with
2044 * the DTE) return the state computed so far and raises an I/O page fault. So
2045 * returning an invalid translation rather than skipping translation. */
2046 LogFunc(("Translation valid bit not set -> IOPF\n"));
2047 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2048 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, false /* fPresent */, false /* fRsvdNotZero */,
2049 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2050 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2051 kIoPageFaultType_DteTranslationDisabled);
2052 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2053 }
2054
2055 /* If the root page table level is 0, translation is skipped and access is controlled by the permission bits. */
2056 uint8_t const uMaxLevel = pDte->n.u3Mode;
2057 if (uMaxLevel != 0)
2058 { /* likely */ }
2059 else
2060 {
2061 uint8_t const fDtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
2062 if ((fAccess & fDtePerm) != fAccess)
2063 {
2064 LogFunc(("Access denied for IOVA (%#RX64). fAccess=%#x fDtePerm=%#x\n", uIova, fAccess, fDtePerm));
2065 return VERR_IOMMU_ADDR_ACCESS_DENIED;
2066 }
2067 pWalkResult->GCPhysSpa = uIova;
2068 pWalkResult->cShift = 0;
2069 pWalkResult->fIoPerm = fDtePerm;
2070 return VINF_SUCCESS;
2071 }
2072
2073 /* If the root page table level exceeds the allowed host-address translation level, page walk is terminated. */
2074 if (uMaxLevel <= IOMMU_MAX_HOST_PT_LEVEL)
2075 { /* likely */ }
2076 else
2077 {
2078 /** @todo r=ramshankar: I cannot make out from the AMD IOMMU spec. if I should be
2079 * raising an ILLEGAL_DEV_TABLE_ENTRY event or an IO_PAGE_FAULT event here.
2080 * I'm just going with I/O page fault. */
2081 LogFunc(("Invalid root page table level %#x -> IOPF\n", uMaxLevel));
2082 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2083 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2084 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2085 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2086 kIoPageFaultType_PteInvalidLvlEncoding);
2087 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2088 }
2089
2090 /* Check permissions bits of the root page table. */
2091 uint8_t const fRootPtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
2092 if ((fAccess & fRootPtePerm) == fAccess)
2093 { /* likely */ }
2094 else
2095 {
2096 LogFunc(("Permission denied (fAccess=%#x fRootPtePerm=%#x) -> IOPF\n", fAccess, fRootPtePerm));
2097 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2098 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2099 true /* fPermDenied */, enmOp, &EvtIoPageFault);
2100 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
2101 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2102 }
2103
2104 /** @todo r=ramshankar: IOMMU: Consider splitting the rest of this into a separate
2105 * function called iommuAmdWalkIoPageDirectory() and call it for multi-page
2106 * accesses from the 2nd page. We can avoid re-checking the DTE root-page
2107 * table entry every time. Not sure if it's worth optimizing that case now
2108 * or if at all. */
2109
2110 /* The virtual address bits indexing table. */
2111 static uint8_t const s_acIovaLevelShifts[] = { 0, 12, 21, 30, 39, 48, 57, 0 };
2112 static uint64_t const s_auIovaLevelMasks[] = { UINT64_C(0x0000000000000000),
2113 UINT64_C(0x00000000001ff000),
2114 UINT64_C(0x000000003fe00000),
2115 UINT64_C(0x0000007fc0000000),
2116 UINT64_C(0x0000ff8000000000),
2117 UINT64_C(0x01ff000000000000),
2118 UINT64_C(0xfe00000000000000),
2119 UINT64_C(0x0000000000000000) };
2120 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) == RT_ELEMENTS(s_auIovaLevelMasks));
2121 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) > IOMMU_MAX_HOST_PT_LEVEL);
2122
2123 /* Traverse the I/O page table starting with the page directory in the DTE. */
2124 IOPTENTITY_T PtEntity;
2125 PtEntity.u64 = pDte->au64[0];
2126 for (;;)
2127 {
2128 /* Figure out the system physical address of the page table at the current level. */
2129 uint8_t const uLevel = PtEntity.n.u3NextLevel;
2130
2131 /* Read the page table entity at the current level. */
2132 {
2133 Assert(uLevel > 0 && uLevel < RT_ELEMENTS(s_acIovaLevelShifts));
2134 Assert(uLevel <= IOMMU_MAX_HOST_PT_LEVEL);
2135 uint16_t const idxPte = (uIova >> s_acIovaLevelShifts[uLevel]) & UINT64_C(0x1ff);
2136 uint64_t const offPte = idxPte << 3;
2137 RTGCPHYS const GCPhysPtEntity = (PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK) + offPte;
2138 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysPtEntity, &PtEntity.u64, sizeof(PtEntity));
2139 if (RT_FAILURE(rc))
2140 {
2141 LogFunc(("Failed to read page table entry at %#RGp. rc=%Rrc -> PageTabHwError\n", GCPhysPtEntity, rc));
2142 EVT_PAGE_TAB_HW_ERR_T EvtPageTabHwErr;
2143 iommuAmdInitPageTabHwErrorEvent(uDevId, pDte->n.u16DomainId, GCPhysPtEntity, enmOp, &EvtPageTabHwErr);
2144 iommuAmdRaisePageTabHwErrorEvent(pDevIns, enmOp, &EvtPageTabHwErr);
2145 return VERR_IOMMU_IPE_2;
2146 }
2147 }
2148
2149 /* Check present bit. */
2150 if (PtEntity.n.u1Present)
2151 { /* likely */ }
2152 else
2153 {
2154 LogFunc(("Page table entry not present -> IOPF\n"));
2155 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2156 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, false /* fPresent */, false /* fRsvdNotZero */,
2157 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2158 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
2159 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2160 }
2161
2162 /* Check permission bits. */
2163 uint8_t const fPtePerm = (PtEntity.u64 >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
2164 if ((fAccess & fPtePerm) == fAccess)
2165 { /* likely */ }
2166 else
2167 {
2168 LogFunc(("Page table entry permission denied (fAccess=%#x fPtePerm=%#x) -> IOPF\n", fAccess, fPtePerm));
2169 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2170 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2171 true /* fPermDenied */, enmOp, &EvtIoPageFault);
2172 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
2173 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2174 }
2175
2176 /* If this is a PTE, we're at the final level and we're done. */
2177 uint8_t const uNextLevel = PtEntity.n.u3NextLevel;
2178 if (uNextLevel == 0)
2179 {
2180 /* The page size of the translation is the default (4K). */
2181 pWalkResult->GCPhysSpa = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
2182 pWalkResult->cShift = X86_PAGE_4K_SHIFT;
2183 pWalkResult->fIoPerm = fPtePerm;
2184 return VINF_SUCCESS;
2185 }
2186 if (uNextLevel == 7)
2187 {
2188 /* The default page size of the translation is overridden. */
2189 RTGCPHYS const GCPhysPte = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
2190 uint8_t cShift = X86_PAGE_4K_SHIFT;
2191 while (GCPhysPte & RT_BIT_64(cShift++))
2192 ;
2193
2194 /* The page size must be larger than the default size and lower than the default size of the higher level. */
2195 Assert(uLevel < IOMMU_MAX_HOST_PT_LEVEL); /* PTE at level 6 handled outside the loop, uLevel should be <= 5. */
2196 if ( cShift > s_acIovaLevelShifts[uLevel]
2197 && cShift < s_acIovaLevelShifts[uLevel + 1])
2198 {
2199 pWalkResult->GCPhysSpa = GCPhysPte;
2200 pWalkResult->cShift = cShift;
2201 pWalkResult->fIoPerm = fPtePerm;
2202 return VINF_SUCCESS;
2203 }
2204
2205 LogFunc(("Page size invalid cShift=%#x -> IOPF\n", cShift));
2206 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2207 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2208 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2209 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2210 kIoPageFaultType_PteInvalidPageSize);
2211 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2212 }
2213
2214 /* Validate the next level encoding of the PDE. */
2215#if IOMMU_MAX_HOST_PT_LEVEL < 6
2216 if (uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL)
2217 { /* likely */ }
2218 else
2219 {
2220 LogFunc(("Next level of PDE invalid uNextLevel=%#x -> IOPF\n", uNextLevel));
2221 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2222 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2223 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2224 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2225 kIoPageFaultType_PteInvalidLvlEncoding);
2226 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2227 }
2228#else
2229 Assert(uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL);
2230#endif
2231
2232 /* Validate level transition. */
2233 if (uNextLevel < uLevel)
2234 { /* likely */ }
2235 else
2236 {
2237 LogFunc(("Next level (%#x) must be less than the current level (%#x) -> IOPF\n", uNextLevel, uLevel));
2238 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2239 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2240 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2241 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2242 kIoPageFaultType_PteInvalidLvlEncoding);
2243 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2244 }
2245
2246 /* Ensure IOVA bits of skipped levels are zero. */
2247 Assert(uLevel > 0);
2248 uint64_t uIovaSkipMask = 0;
2249 for (unsigned idxLevel = uLevel - 1; idxLevel > uNextLevel; idxLevel--)
2250 uIovaSkipMask |= s_auIovaLevelMasks[idxLevel];
2251 if (!(uIova & uIovaSkipMask))
2252 { /* likely */ }
2253 else
2254 {
2255 LogFunc(("IOVA of skipped levels are not zero %#RX64 (SkipMask=%#RX64) -> IOPF\n", uIova, uIovaSkipMask));
2256 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2257 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2258 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2259 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2260 kIoPageFaultType_SkippedLevelIovaNotZero);
2261 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2262 }
2263
2264 /* Continue with traversing the page directory at this level. */
2265 }
2266}
2267
2268
2269/**
2270 * Looks up an I/O virtual address from the device table.
2271 *
2272 * @returns VBox status code.
2273 * @param pDevIns The IOMMU instance data.
2274 * @param uDevId The device ID.
2275 * @param uIova The I/O virtual address to lookup.
2276 * @param cbAccess The size of the access.
2277 * @param fAccess The access permissions (IOMMU_IO_PERM_XXX). This is the
2278 * permissions for the access being made.
2279 * @param enmOp The IOMMU operation being performed.
2280 * @param pGCPhysSpa Where to store the translated system physical address. Only
2281 * valid when translation succeeds and VINF_SUCCESS is
2282 * returned!
2283 *
2284 * @thread Any.
2285 */
2286static int iommuAmdLookupDeviceTable(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbAccess, uint8_t fAccess,
2287 IOMMUOP enmOp, PRTGCPHYS pGCPhysSpa)
2288{
2289 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2290
2291 /* Read the device table entry from memory. */
2292 DTE_T Dte;
2293 int rc = iommuAmdReadDte(pDevIns, uDevId, enmOp, &Dte);
2294 if (RT_SUCCESS(rc))
2295 {
2296 /* If the DTE is not valid, addresses are forwarded without translation */
2297 if (Dte.n.u1Valid)
2298 { /* likely */ }
2299 else
2300 {
2301 /** @todo IOMMU: Add to IOLTB cache. */
2302 *pGCPhysSpa = uIova;
2303 return VINF_SUCCESS;
2304 }
2305
2306 /* Validate bits 127:0 of the device table entry when DTE.V is 1. */
2307 uint64_t const fRsvd0 = Dte.au64[0] & ~(IOMMU_DTE_QWORD_0_VALID_MASK & ~IOMMU_DTE_QWORD_0_FEAT_MASK);
2308 uint64_t const fRsvd1 = Dte.au64[1] & ~(IOMMU_DTE_QWORD_1_VALID_MASK & ~IOMMU_DTE_QWORD_1_FEAT_MASK);
2309 if (RT_LIKELY( !fRsvd0
2310 && !fRsvd1))
2311 { /* likely */ }
2312 else
2313 {
2314 LogFunc(("Invalid reserved bits in DTE (u64[0]=%#RX64 u64[1]=%#RX64) -> Illegal DTE\n", fRsvd0, fRsvd1));
2315 EVT_ILLEGAL_DTE_T Event;
2316 iommuAmdInitIllegalDteEvent(uDevId, uIova, true /* fRsvdNotZero */, enmOp, &Event);
2317 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdNotZero);
2318 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2319 }
2320
2321 /* If the IOVA is subject to address exclusion, addresses are forwarded without translation. */
2322 if ( !pThis->ExclRangeBaseAddr.n.u1ExclEnable
2323 || !iommuAmdIsDvaInExclRange(pThis, &Dte, uIova))
2324 { /* likely */ }
2325 else
2326 {
2327 /** @todo IOMMU: Add to IOLTB cache. */
2328 *pGCPhysSpa = uIova;
2329 return VINF_SUCCESS;
2330 }
2331
2332 /** @todo IOMMU: Perhaps do the <= 4K access case first, if the generic loop
2333 * below gets too expensive and when we have iommuAmdWalkIoPageDirectory. */
2334
2335 uint64_t uBaseIova = uIova & X86_PAGE_4K_BASE_MASK;
2336 uint64_t offIova = uIova & X86_PAGE_4K_OFFSET_MASK;
2337 uint64_t cbRemaining = cbAccess;
2338 for (;;)
2339 {
2340 /* Walk the I/O page tables to translate the IOVA and check permission for the access. */
2341 IOWALKRESULT WalkResult;
2342 rc = iommuAmdWalkIoPageTable(pDevIns, uDevId, uBaseIova, fAccess, &Dte, enmOp, &WalkResult);
2343 if (RT_SUCCESS(rc))
2344 {
2345 /** @todo IOMMU: Split large pages into 4K IOTLB entries and add to IOTLB cache. */
2346
2347 /* Store the translated base address before continuing to check permissions for any more pages. */
2348 if (cbRemaining == cbAccess)
2349 {
2350 RTGCPHYS const offSpa = ~(UINT64_C(0xffffffffffffffff) << WalkResult.cShift);
2351 *pGCPhysSpa = WalkResult.GCPhysSpa | offSpa;
2352 }
2353
2354 uint64_t const cbPhysPage = UINT64_C(1) << WalkResult.cShift;
2355 if (cbRemaining > cbPhysPage - offIova)
2356 {
2357 cbRemaining -= (cbPhysPage - offIova);
2358 uBaseIova += cbPhysPage;
2359 offIova = 0;
2360 }
2361 else
2362 break;
2363 }
2364 else
2365 {
2366 LogFunc(("I/O page table walk failed. uIova=%#RX64 uBaseIova=%#RX64 fAccess=%u rc=%Rrc\n", uIova,
2367 uBaseIova, fAccess, rc));
2368 *pGCPhysSpa = NIL_RTGCPHYS;
2369 return rc;
2370 }
2371 }
2372
2373 return rc;
2374 }
2375
2376 LogFunc(("Failed to read device table entry. uDevId=%#x rc=%Rrc\n", uDevId, rc));
2377 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2378}
2379
2380
2381/**
2382 * Memory read request from a device.
2383 *
2384 * @returns VBox status code.
2385 * @param pDevIns The IOMMU device instance.
2386 * @param uDevId The device ID (bus, device, function).
2387 * @param uIova The I/O virtual address being read.
2388 * @param cbRead The number of bytes being read.
2389 * @param pGCPhysSpa Where to store the translated system physical address.
2390 *
2391 * @thread Any.
2392 */
2393static DECLCALLBACK(int) iommuAmdDeviceMemRead(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbRead,
2394 PRTGCPHYS pGCPhysSpa)
2395{
2396 /* Validate. */
2397 Assert(pDevIns);
2398 Assert(pGCPhysSpa);
2399 Assert(cbRead > 0);
2400
2401 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2402 LogFlowFunc(("uDevId=%#x uIova=%#RX64 cbRead=%u\n", uDevId, uIova, cbRead));
2403
2404 /* Addresses are forwarded without translation when the IOMMU is disabled. */
2405 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2406 if (Ctrl.n.u1IommuEn)
2407 {
2408 /** @todo IOMMU: IOTLB cache lookup. */
2409
2410 /* Lookup the IOVA from the device table. */
2411 return iommuAmdLookupDeviceTable(pDevIns, uDevId, uIova, cbRead, IOMMU_IO_PERM_READ, IOMMUOP_MEM_READ, pGCPhysSpa);
2412 }
2413
2414 *pGCPhysSpa = uIova;
2415 return VINF_SUCCESS;
2416}
2417
2418
2419/**
2420 * Memory write request from a device.
2421 *
2422 * @returns VBox status code.
2423 * @param pDevIns The IOMMU device instance.
2424 * @param uDevId The device ID (bus, device, function).
2425 * @param uIova The I/O virtual address being written.
2426 * @param cbWrite The number of bytes being written.
2427 * @param pGCPhysSpa Where to store the translated physical address.
2428 *
2429 * @thread Any.
2430 */
2431static DECLCALLBACK(int) iommuAmdDeviceMemWrite(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbWrite,
2432 PRTGCPHYS pGCPhysSpa)
2433{
2434 /* Validate. */
2435 Assert(pDevIns);
2436 Assert(pGCPhysSpa);
2437 Assert(cbWrite > 0);
2438
2439 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2440 LogFlowFunc(("uDevId=%#x uIova=%#RX64 cbWrite=%u\n", uDevId, uIova, cbWrite));
2441
2442 /* Addresses are forwarded without translation when the IOMMU is disabled. */
2443 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2444 if (Ctrl.n.u1IommuEn)
2445 {
2446 /** @todo IOMMU: IOTLB cache lookup. */
2447
2448 /* Lookup the IOVA from the device table. */
2449 return iommuAmdLookupDeviceTable(pDevIns, uDevId, uIova, cbWrite, IOMMU_IO_PERM_WRITE, IOMMUOP_MEM_WRITE, pGCPhysSpa);
2450 }
2451
2452 *pGCPhysSpa = uIova;
2453 return VINF_SUCCESS;
2454}
2455
2456
2457/**
2458 * Reads an interrupt remapping table entry from guest memory given its DTE.
2459 *
2460 * @returns VBox status code.
2461 * @param pDevIns The IOMMU device instance.
2462 * @param uDevId The device ID.
2463 * @param pDte The device table entry.
2464 * @param GCPhysIn The source MSI address.
2465 * @param uDataIn The source MSI data.
2466 * @param enmOp The IOMMU operation being performed.
2467 * @param pIrte Where to store the interrupt remapping table entry.
2468 *
2469 * @thread Any.
2470 */
2471static int iommuAmdReadIrte(PPDMDEVINS pDevIns, uint16_t uDevId, PCDTE_T pDte, RTGCPHYS GCPhysIn, uint32_t uDataIn,
2472 IOMMUOP enmOp, PIRTE_T pIrte)
2473{
2474 /* Ensure the IRTE length is valid. */
2475 Assert(pDte->n.u4IntrTableLength < IOMMU_DTE_INTR_TAB_LEN_MAX);
2476
2477 RTGCPHYS const GCPhysIntrTable = pDte->au64[2] & IOMMU_DTE_IRTE_ROOT_PTR_MASK;
2478 uint16_t const cbIntrTable = IOMMU_GET_INTR_TAB_LEN(pDte);
2479 uint16_t const offIrte = (uDataIn & IOMMU_MSI_DATA_IRTE_OFFSET_MASK) * sizeof(IRTE_T);
2480 RTGCPHYS const GCPhysIrte = GCPhysIntrTable + offIrte;
2481
2482 /* Ensure the IRTE falls completely within the interrupt table. */
2483 if (offIrte + sizeof(IRTE_T) <= cbIntrTable)
2484 { /* likely */ }
2485 else
2486 {
2487 LogFunc(("IRTE exceeds table length (GCPhysIntrTable=%#RGp cbIntrTable=%u offIrte=%#x uDataIn=%#x) -> IOPF\n",
2488 GCPhysIntrTable, cbIntrTable, offIrte, uDataIn));
2489
2490 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2491 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, GCPhysIn, false /* fPresent */, false /* fRsvdNotZero */,
2492 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2493 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2494 kIoPageFaultType_IrteAddrInvalid);
2495 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2496 }
2497
2498 /* Read the IRTE from memory. */
2499 Assert(!(GCPhysIrte & 3));
2500 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysIrte, pIrte, sizeof(*pIrte));
2501 if (RT_SUCCESS(rc))
2502 return VINF_SUCCESS;
2503
2504 /** @todo The IOMMU spec. does not tell what kind of error is reported in this
2505 * situation. Is it an I/O page fault or a device table hardware error?
2506 * There's no interrupt table hardware error event, but it's unclear what
2507 * we should do here. */
2508 LogFunc(("Failed to read interrupt table entry at %#RGp. rc=%Rrc -> ???\n", GCPhysIrte, rc));
2509 return VERR_IOMMU_IPE_4;
2510}
2511
2512
2513/**
2514 * Remaps the interrupt using the interrupt remapping table.
2515 *
2516 * @returns VBox status code.
2517 * @param pDevIns The IOMMU instance data.
2518 * @param uDevId The device ID.
2519 * @param pDte The device table entry.
2520 * @param enmOp The IOMMU operation being performed.
2521 * @param pMsiIn The source MSI.
2522 * @param pMsiOut Where to store the remapped MSI.
2523 *
2524 * @thread Any.
2525 */
2526static int iommuAmdRemapIntr(PPDMDEVINS pDevIns, uint16_t uDevId, PCDTE_T pDte, IOMMUOP enmOp, PCMSIMSG pMsiIn,
2527 PMSIMSG pMsiOut)
2528{
2529 Assert(pDte->n.u2IntrCtrl == IOMMU_INTR_CTRL_REMAP);
2530
2531 IRTE_T Irte;
2532 int rc = iommuAmdReadIrte(pDevIns, uDevId, pDte, pMsiIn->Addr.u64, pMsiIn->Data.u32, enmOp, &Irte);
2533 if (RT_SUCCESS(rc))
2534 {
2535 if (Irte.n.u1RemapEnable)
2536 {
2537 if (!Irte.n.u1GuestMode)
2538 {
2539 if (Irte.n.u3IntrType <= VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO)
2540 {
2541 /* Preserve all bits from the source MSI address that don't map 1:1 from the IRTE. */
2542 pMsiOut->Addr.u64 = pMsiIn->Addr.u64;
2543 pMsiOut->Addr.n.u1DestMode = Irte.n.u1DestMode;
2544 pMsiOut->Addr.n.u8DestId = Irte.n.u8Dest;
2545
2546 /* Preserve all bits from the source MSI data that don't map 1:1 from the IRTE. */
2547 pMsiOut->Data.u32 = pMsiIn->Data.u32;
2548 pMsiOut->Data.n.u8Vector = Irte.n.u8Vector;
2549 pMsiOut->Data.n.u3DeliveryMode = Irte.n.u3IntrType;
2550
2551 return VINF_SUCCESS;
2552 }
2553
2554 LogFunc(("Interrupt type (%#x) invalid -> IOPF\n", Irte.n.u3IntrType));
2555 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2556 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, pMsiIn->Addr.u64, Irte.n.u1RemapEnable,
2557 true /* fRsvdNotZero */, false /* fPermDenied */, enmOp, &EvtIoPageFault);
2558 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, &Irte, enmOp, &EvtIoPageFault, kIoPageFaultType_IrteRsvdIntType);
2559 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2560 }
2561
2562 LogFunc(("Guest mode not supported -> IOPF\n"));
2563 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2564 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, pMsiIn->Addr.u64, Irte.n.u1RemapEnable,
2565 true /* fRsvdNotZero */, false /* fPermDenied */, enmOp, &EvtIoPageFault);
2566 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, &Irte, enmOp, &EvtIoPageFault, kIoPageFaultType_IrteRsvdNotZero);
2567 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2568 }
2569
2570 LogFunc(("Remapping disabled -> IOPF\n"));
2571 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2572 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, pMsiIn->Addr.u64, Irte.n.u1RemapEnable,
2573 false /* fRsvdNotZero */, false /* fPermDenied */, enmOp, &EvtIoPageFault);
2574 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, &Irte, enmOp, &EvtIoPageFault, kIoPageFaultType_IrteRemapEn);
2575 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2576 }
2577
2578 return rc;
2579}
2580
2581
2582/**
2583 * Looks up an MSI interrupt from the interrupt remapping table.
2584 *
2585 * @returns VBox status code.
2586 * @param pDevIns The IOMMU instance data.
2587 * @param uDevId The device ID.
2588 * @param enmOp The IOMMU operation being performed.
2589 * @param pMsiIn The source MSI.
2590 * @param pMsiOut Where to store the remapped MSI.
2591 *
2592 * @thread Any.
2593 */
2594static int iommuAmdLookupIntrTable(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
2595{
2596 /* Read the device table entry from memory. */
2597 LogFlowFunc(("uDevId=%#x enmOp=%u\n", uDevId, enmOp));
2598
2599 DTE_T Dte;
2600 int rc = iommuAmdReadDte(pDevIns, uDevId, enmOp, &Dte);
2601 if (RT_SUCCESS(rc))
2602 {
2603 /* If the DTE is not valid, all interrupts are forwarded without remapping. */
2604 if (Dte.n.u1IntrMapValid)
2605 {
2606 /* Validate bits 255:128 of the device table entry when DTE.IV is 1. */
2607 uint64_t const fRsvd0 = Dte.au64[2] & ~IOMMU_DTE_QWORD_2_VALID_MASK;
2608 uint64_t const fRsvd1 = Dte.au64[3] & ~IOMMU_DTE_QWORD_3_VALID_MASK;
2609 if (RT_LIKELY( !fRsvd0
2610 && !fRsvd1))
2611 { /* likely */ }
2612 else
2613 {
2614 LogFunc(("Invalid reserved bits in DTE (u64[2]=%#RX64 u64[3]=%#RX64) -> Illegal DTE\n", fRsvd0,
2615 fRsvd1));
2616 EVT_ILLEGAL_DTE_T Event;
2617 iommuAmdInitIllegalDteEvent(uDevId, pMsiIn->Addr.u64, true /* fRsvdNotZero */, enmOp, &Event);
2618 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdNotZero);
2619 return VERR_IOMMU_INTR_REMAP_FAILED;
2620 }
2621
2622 /*
2623 * LINT0/LINT1 pins cannot be driven by PCI(e) devices. Perhaps for a Southbridge
2624 * that's connected through HyperTransport it might be possible; but for us, it
2625 * doesn't seem we need to specially handle these pins.
2626 */
2627
2628 /*
2629 * Validate the MSI source address.
2630 *
2631 * 64-bit MSIs are supported by the PCI and AMD IOMMU spec. However as far as the
2632 * CPU is concerned, the MSI region is fixed and we must ensure no other device
2633 * claims the region as I/O space.
2634 *
2635 * See PCI spec. 6.1.4. "Message Signaled Interrupt (MSI) Support".
2636 * See AMD IOMMU spec. 2.8 "IOMMU Interrupt Support".
2637 * See Intel spec. 10.11.1 "Message Address Register Format".
2638 */
2639 if ((pMsiIn->Addr.u64 & VBOX_MSI_ADDR_ADDR_MASK) == VBOX_MSI_ADDR_BASE)
2640 {
2641 /*
2642 * The IOMMU remaps fixed and arbitrated interrupts using the IRTE.
2643 * See AMD IOMMU spec. "2.2.5.1 Interrupt Remapping Tables, Guest Virtual APIC Not Enabled".
2644 */
2645 uint8_t const u8DeliveryMode = pMsiIn->Data.n.u3DeliveryMode;
2646 bool fPassThru = false;
2647 switch (u8DeliveryMode)
2648 {
2649 case VBOX_MSI_DELIVERY_MODE_FIXED:
2650 case VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO:
2651 {
2652 uint8_t const uIntrCtrl = Dte.n.u2IntrCtrl;
2653 if (uIntrCtrl == IOMMU_INTR_CTRL_TARGET_ABORT)
2654 {
2655 LogFunc(("IntCtl=0: Target aborting fixed/arbitrated interrupt -> Target abort\n"));
2656 iommuAmdSetPciTargetAbort(pDevIns);
2657 return VERR_IOMMU_INTR_REMAP_DENIED;
2658 }
2659
2660 if (uIntrCtrl == IOMMU_INTR_CTRL_FWD_UNMAPPED)
2661 {
2662 fPassThru = true;
2663 break;
2664 }
2665
2666 if (uIntrCtrl == IOMMU_INTR_CTRL_REMAP)
2667 {
2668 /* Validate the encoded interrupt table length when IntCtl specifies remapping. */
2669 uint8_t const uIntrTabLen = Dte.n.u4IntrTableLength;
2670 if (uIntrTabLen < IOMMU_DTE_INTR_TAB_LEN_MAX)
2671 {
2672 /*
2673 * We don't support guest interrupt remapping yet. When we do, we'll need to
2674 * check Ctrl.u1GstVirtApicEn and use the guest Virtual APIC Table Root Pointer
2675 * in the DTE rather than the Interrupt Root Table Pointer. Since the caller
2676 * already reads the control register, add that as a parameter when we eventually
2677 * support guest interrupt remapping. For now, just assert.
2678 */
2679 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2680 Assert(!pThis->ExtFeat.n.u1GstVirtApicSup);
2681 NOREF(pThis);
2682
2683 return iommuAmdRemapIntr(pDevIns, uDevId, &Dte, enmOp, pMsiIn, pMsiOut);
2684 }
2685
2686 LogFunc(("Invalid interrupt table length %#x -> Illegal DTE\n", uIntrTabLen));
2687 EVT_ILLEGAL_DTE_T Event;
2688 iommuAmdInitIllegalDteEvent(uDevId, pMsiIn->Addr.u64, false /* fRsvdNotZero */, enmOp, &Event);
2689 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdIntTabLen);
2690 return VERR_IOMMU_INTR_REMAP_FAILED;
2691 }
2692
2693 /* Paranoia. */
2694 Assert(uIntrCtrl == IOMMU_INTR_CTRL_RSVD);
2695
2696 LogFunc(("IntCtl mode invalid %#x -> Illegal DTE\n", uIntrCtrl));
2697
2698 EVT_ILLEGAL_DTE_T Event;
2699 iommuAmdInitIllegalDteEvent(uDevId, pMsiIn->Addr.u64, true /* fRsvdNotZero */, enmOp, &Event);
2700 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdIntCtl);
2701 return VERR_IOMMU_INTR_REMAP_FAILED;
2702 }
2703
2704 /* SMIs are passed through unmapped. We don't implement SMI filters. */
2705 case VBOX_MSI_DELIVERY_MODE_SMI: fPassThru = true; break;
2706 case VBOX_MSI_DELIVERY_MODE_NMI: fPassThru = Dte.n.u1NmiPassthru; break;
2707 case VBOX_MSI_DELIVERY_MODE_INIT: fPassThru = Dte.n.u1InitPassthru; break;
2708 case VBOX_MSI_DELIVERY_MODE_EXT_INT: fPassThru = Dte.n.u1ExtIntPassthru; break;
2709 default:
2710 {
2711 LogFunc(("MSI data delivery mode invalid %#x -> Target abort\n", u8DeliveryMode));
2712 iommuAmdSetPciTargetAbort(pDevIns);
2713 return VERR_IOMMU_INTR_REMAP_FAILED;
2714 }
2715 }
2716
2717 if (fPassThru)
2718 {
2719 *pMsiOut = *pMsiIn;
2720 return VINF_SUCCESS;
2721 }
2722
2723 iommuAmdSetPciTargetAbort(pDevIns);
2724 return VERR_IOMMU_INTR_REMAP_DENIED;
2725 }
2726 else
2727 {
2728 LogFunc(("MSI address region invalid %#RX64\n", pMsiIn->Addr.u64));
2729 return VERR_IOMMU_INTR_REMAP_FAILED;
2730 }
2731 }
2732 else
2733 {
2734 /** @todo IOMMU: Add to interrupt remapping cache. */
2735 LogFlowFunc(("DTE interrupt map not valid\n"));
2736 *pMsiOut = *pMsiIn;
2737 return VINF_SUCCESS;
2738 }
2739 }
2740
2741 LogFunc(("Failed to read device table entry. uDevId=%#x rc=%Rrc\n", uDevId, rc));
2742 return VERR_IOMMU_INTR_REMAP_FAILED;
2743}
2744
2745
2746/**
2747 * Interrupt remap request from a device.
2748 *
2749 * @returns VBox status code.
2750 * @param pDevIns The IOMMU device instance.
2751 * @param uDevId The device ID (bus, device, function).
2752 * @param pMsiIn The source MSI.
2753 * @param pMsiOut Where to store the remapped MSI.
2754 */
2755static DECLCALLBACK(int) iommuAmdDeviceMsiRemap(PPDMDEVINS pDevIns, uint16_t uDevId, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
2756{
2757 /* Validate. */
2758 Assert(pDevIns);
2759 Assert(pMsiIn);
2760 Assert(pMsiOut);
2761
2762 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2763 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemap));
2764
2765 LogFlowFunc(("uDevId=%#x\n", uDevId));
2766
2767 /* Interrupts are forwarded with remapping when the IOMMU is disabled. */
2768 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2769 if (Ctrl.n.u1IommuEn)
2770 {
2771 /** @todo Cache? */
2772
2773 return iommuAmdLookupIntrTable(pDevIns, uDevId, IOMMUOP_INTR_REQ, pMsiIn, pMsiOut);
2774 }
2775
2776 *pMsiOut = *pMsiIn;
2777 return VINF_SUCCESS;
2778}
2779
2780
2781/**
2782 * @callback_method_impl{FNIOMMMIONEWWRITE}
2783 */
2784static DECLCALLBACK(VBOXSTRICTRC) iommuAmdMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2785{
2786 NOREF(pvUser);
2787 Assert(cb == 4 || cb == 8);
2788 Assert(!(off & (cb - 1)));
2789
2790 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2791 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioWrite)); NOREF(pThis);
2792
2793 uint64_t const uValue = cb == 8 ? *(uint64_t const *)pv : *(uint32_t const *)pv;
2794 return iommuAmdWriteRegister(pDevIns, off, cb, uValue);
2795}
2796
2797
2798/**
2799 * @callback_method_impl{FNIOMMMIONEWREAD}
2800 */
2801static DECLCALLBACK(VBOXSTRICTRC) iommuAmdMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2802{
2803 NOREF(pvUser);
2804 Assert(cb == 4 || cb == 8);
2805 Assert(!(off & (cb - 1)));
2806
2807 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2808 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioRead)); NOREF(pThis);
2809
2810 uint64_t uResult;
2811 VBOXSTRICTRC rcStrict = iommuAmdReadRegister(pDevIns, off, &uResult);
2812 if (cb == 8)
2813 *(uint64_t *)pv = uResult;
2814 else
2815 *(uint32_t *)pv = (uint32_t)uResult;
2816
2817 return rcStrict;
2818}
2819
2820# ifdef IN_RING3
2821
2822/**
2823 * Processes an IOMMU command.
2824 *
2825 * @returns VBox status code.
2826 * @param pDevIns The IOMMU device instance.
2827 * @param pCmd The command to process.
2828 * @param GCPhysCmd The system physical address of the command.
2829 * @param pEvtError Where to store the error event in case of failures.
2830 *
2831 * @thread Command thread.
2832 */
2833static int iommuAmdR3ProcessCmd(PPDMDEVINS pDevIns, PCCMD_GENERIC_T pCmd, RTGCPHYS GCPhysCmd, PEVT_GENERIC_T pEvtError)
2834{
2835 IOMMU_ASSERT_NOT_LOCKED(pDevIns);
2836
2837 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2838 STAM_COUNTER_INC(&pThis->StatCmd);
2839
2840 uint8_t const bCmd = pCmd->n.u4Opcode;
2841 switch (bCmd)
2842 {
2843 case IOMMU_CMD_COMPLETION_WAIT:
2844 {
2845 STAM_COUNTER_INC(&pThis->StatCmdCompWait);
2846
2847 PCCMD_COMWAIT_T pCmdComWait = (PCCMD_COMWAIT_T)pCmd;
2848 AssertCompile(sizeof(*pCmdComWait) == sizeof(*pCmd));
2849
2850 /* Validate reserved bits in the command. */
2851 if (!(pCmdComWait->au64[0] & ~IOMMU_CMD_COM_WAIT_QWORD_0_VALID_MASK))
2852 {
2853 /* If Completion Store is requested, write the StoreData to the specified address. */
2854 if (pCmdComWait->n.u1Store)
2855 {
2856 RTGCPHYS const GCPhysStore = RT_MAKE_U64(pCmdComWait->n.u29StoreAddrLo << 3, pCmdComWait->n.u20StoreAddrHi);
2857 uint64_t const u64Data = pCmdComWait->n.u64StoreData;
2858 int rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhysStore, &u64Data, sizeof(u64Data));
2859 if (RT_FAILURE(rc))
2860 {
2861 LogFunc(("Cmd(%#x): Failed to write StoreData (%#RX64) to %#RGp, rc=%Rrc\n", bCmd, u64Data,
2862 GCPhysStore, rc));
2863 iommuAmdInitCmdHwErrorEvent(GCPhysStore, (PEVT_CMD_HW_ERR_T)pEvtError);
2864 return VERR_IOMMU_CMD_HW_ERROR;
2865 }
2866 }
2867
2868 /* If the command requests an interrupt and completion wait interrupts are enabled, raise it. */
2869 if (pCmdComWait->n.u1Interrupt)
2870 {
2871 IOMMU_LOCK(pDevIns);
2872 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_COMPLETION_WAIT_INTR);
2873 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2874 bool const fRaiseInt = Ctrl.n.u1CompWaitIntrEn;
2875 IOMMU_UNLOCK(pDevIns);
2876
2877 if (fRaiseInt)
2878 iommuAmdRaiseMsiInterrupt(pDevIns);
2879 }
2880 return VINF_SUCCESS;
2881 }
2882 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2883 return VERR_IOMMU_CMD_INVALID_FORMAT;
2884 }
2885
2886 case IOMMU_CMD_INV_DEV_TAB_ENTRY:
2887 {
2888 /** @todo IOMMU: Implement this once we implement IOTLB. Pretend success until
2889 * then. */
2890 STAM_COUNTER_INC(&pThis->StatCmdInvDte);
2891 return VINF_SUCCESS;
2892 }
2893
2894 case IOMMU_CMD_INV_IOMMU_PAGES:
2895 {
2896 /** @todo IOMMU: Implement this once we implement IOTLB. Pretend success until
2897 * then. */
2898 STAM_COUNTER_INC(&pThis->StatCmdInvIommuPages);
2899 return VINF_SUCCESS;
2900 }
2901
2902 case IOMMU_CMD_INV_IOTLB_PAGES:
2903 {
2904 STAM_COUNTER_INC(&pThis->StatCmdInvIotlbPages);
2905
2906 uint32_t const uCapHdr = PDMPciDevGetDWord(pDevIns->apPciDevs[0], IOMMU_PCI_OFF_CAP_HDR);
2907 if (RT_BF_GET(uCapHdr, IOMMU_BF_CAPHDR_IOTLB_SUP))
2908 {
2909 /** @todo IOMMU: Implement remote IOTLB invalidation. */
2910 return VERR_NOT_IMPLEMENTED;
2911 }
2912 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2913 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2914 }
2915
2916 case IOMMU_CMD_INV_INTR_TABLE:
2917 {
2918 /** @todo IOMMU: Implement this once we implement IOTLB. Pretend success until
2919 * then. */
2920 STAM_COUNTER_INC(&pThis->StatCmdInvIntrTable);
2921 return VINF_SUCCESS;
2922 }
2923
2924 case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
2925 {
2926 STAM_COUNTER_INC(&pThis->StatCmdPrefIommuPages);
2927 if (pThis->ExtFeat.n.u1PrefetchSup)
2928 {
2929 /** @todo IOMMU: Implement prefetch. Pretend success until then. */
2930 return VINF_SUCCESS;
2931 }
2932 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2933 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2934 }
2935
2936 case IOMMU_CMD_COMPLETE_PPR_REQ:
2937 {
2938 STAM_COUNTER_INC(&pThis->StatCmdCompletePprReq);
2939
2940 /* We don't support PPR requests yet. */
2941 Assert(!pThis->ExtFeat.n.u1PprSup);
2942 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2943 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2944 }
2945
2946 case IOMMU_CMD_INV_IOMMU_ALL:
2947 {
2948 STAM_COUNTER_INC(&pThis->StatCmdInvIommuAll);
2949
2950 if (pThis->ExtFeat.n.u1InvAllSup)
2951 {
2952 /** @todo IOMMU: Invalidate all. Pretend success until then. */
2953 return VINF_SUCCESS;
2954 }
2955 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2956 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2957 }
2958 }
2959
2960 STAM_COUNTER_DEC(&pThis->StatCmd);
2961 LogFunc(("Cmd(%#x): Unrecognized\n", bCmd));
2962 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2963 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2964}
2965
2966
2967/**
2968 * The IOMMU command thread.
2969 *
2970 * @returns VBox status code.
2971 * @param pDevIns The IOMMU device instance.
2972 * @param pThread The command thread.
2973 */
2974static DECLCALLBACK(int) iommuAmdR3CmdThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
2975{
2976 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2977
2978 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
2979 return VINF_SUCCESS;
2980
2981 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
2982 {
2983 /*
2984 * Sleep perpetually until we are woken up to process commands.
2985 */
2986 {
2987 ASMAtomicWriteBool(&pThis->fCmdThreadSleeping, true);
2988 bool fSignaled = ASMAtomicXchgBool(&pThis->fCmdThreadSignaled, false);
2989 if (!fSignaled)
2990 {
2991 Assert(ASMAtomicReadBool(&pThis->fCmdThreadSleeping));
2992 int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEvtCmdThread, RT_INDEFINITE_WAIT);
2993 AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc), rc);
2994 if (RT_UNLIKELY(pThread->enmState != PDMTHREADSTATE_RUNNING))
2995 break;
2996 Log5Func(("Woken up with rc=%Rrc\n", rc));
2997 ASMAtomicWriteBool(&pThis->fCmdThreadSignaled, false);
2998 }
2999 ASMAtomicWriteBool(&pThis->fCmdThreadSleeping, false);
3000 }
3001
3002 /*
3003 * Fetch and process IOMMU commands.
3004 */
3005 /** @todo r=ramshankar: This employs a simplistic method of fetching commands (one
3006 * at a time) and is expensive due to calls to PGM for fetching guest memory.
3007 * We could optimize by fetching a bunch of commands at a time reducing
3008 * number of calls to PGM. In the longer run we could lock the memory and
3009 * mappings and accessing them directly. */
3010 IOMMU_LOCK(pDevIns);
3011
3012 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
3013 if (Status.n.u1CmdBufRunning)
3014 {
3015 /* Get the offset we need to read the command from memory (circular buffer offset). */
3016 uint32_t const cbCmdBuf = iommuAmdGetTotalBufLength(pThis->CmdBufBaseAddr.n.u4Len);
3017 uint32_t offHead = pThis->CmdBufHeadPtr.n.off;
3018 Assert(!(offHead & ~IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK));
3019 Assert(offHead < cbCmdBuf);
3020 while (offHead != pThis->CmdBufTailPtr.n.off)
3021 {
3022 /* Read the command from memory. */
3023 CMD_GENERIC_T Cmd;
3024 RTGCPHYS const GCPhysCmd = (pThis->CmdBufBaseAddr.n.u40Base << X86_PAGE_4K_SHIFT) + offHead;
3025 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysCmd, &Cmd, sizeof(Cmd));
3026 if (RT_SUCCESS(rc))
3027 {
3028 /* Increment the command buffer head pointer. */
3029 offHead = (offHead + sizeof(CMD_GENERIC_T)) % cbCmdBuf;
3030 pThis->CmdBufHeadPtr.n.off = offHead;
3031
3032 /* Process the fetched command. */
3033 EVT_GENERIC_T EvtError;
3034 IOMMU_UNLOCK(pDevIns);
3035 rc = iommuAmdR3ProcessCmd(pDevIns, &Cmd, GCPhysCmd, &EvtError);
3036 IOMMU_LOCK(pDevIns);
3037 if (RT_FAILURE(rc))
3038 {
3039 if ( rc == VERR_IOMMU_CMD_NOT_SUPPORTED
3040 || rc == VERR_IOMMU_CMD_INVALID_FORMAT)
3041 {
3042 Assert(EvtError.n.u4EvtCode == IOMMU_EVT_ILLEGAL_CMD_ERROR);
3043 iommuAmdRaiseIllegalCmdEvent(pDevIns, (PCEVT_ILLEGAL_CMD_ERR_T)&EvtError);
3044 }
3045 else if (rc == VERR_IOMMU_CMD_HW_ERROR)
3046 {
3047 Assert(EvtError.n.u4EvtCode == IOMMU_EVT_COMMAND_HW_ERROR);
3048 LogFunc(("Raising command hardware error. Cmd=%#x -> COMMAND_HW_ERROR\n", Cmd.n.u4Opcode));
3049 iommuAmdRaiseCmdHwErrorEvent(pDevIns, (PCEVT_CMD_HW_ERR_T)&EvtError);
3050 }
3051 break;
3052 }
3053 }
3054 else
3055 {
3056 LogFunc(("Failed to read command at %#RGp. rc=%Rrc -> COMMAND_HW_ERROR\n", GCPhysCmd, rc));
3057 EVT_CMD_HW_ERR_T EvtCmdHwErr;
3058 iommuAmdInitCmdHwErrorEvent(GCPhysCmd, &EvtCmdHwErr);
3059 iommuAmdRaiseCmdHwErrorEvent(pDevIns, &EvtCmdHwErr);
3060 break;
3061 }
3062 }
3063 }
3064
3065 IOMMU_UNLOCK(pDevIns);
3066 }
3067
3068 LogFlowFunc(("Command thread terminating\n"));
3069 return VINF_SUCCESS;
3070}
3071
3072
3073/**
3074 * Wakes up the command thread so it can respond to a state change.
3075 *
3076 * @returns VBox status code.
3077 * @param pDevIns The IOMMU device instance.
3078 * @param pThread The command thread.
3079 */
3080static DECLCALLBACK(int) iommuAmdR3CmdThreadWakeUp(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
3081{
3082 RT_NOREF(pThread);
3083 LogFlowFunc(("\n"));
3084 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3085 return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
3086}
3087
3088
3089/**
3090 * @callback_method_impl{FNPCICONFIGREAD}
3091 */
3092static DECLCALLBACK(VBOXSTRICTRC) iommuAmdR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress,
3093 unsigned cb, uint32_t *pu32Value)
3094{
3095 /** @todo IOMMU: PCI config read stat counter. */
3096 VBOXSTRICTRC rcStrict = PDMDevHlpPCIConfigRead(pDevIns, pPciDev, uAddress, cb, pu32Value);
3097 Log3Func(("uAddress=%#x (cb=%u) -> %#x. rc=%Rrc\n", uAddress, cb, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
3098 return rcStrict;
3099}
3100
3101
3102/**
3103 * @callback_method_impl{FNPCICONFIGWRITE}
3104 */
3105static DECLCALLBACK(VBOXSTRICTRC) iommuAmdR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress,
3106 unsigned cb, uint32_t u32Value)
3107{
3108 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3109
3110 /*
3111 * Discard writes to read-only registers that are specific to the IOMMU.
3112 * Other common PCI registers are handled by the generic code, see devpciR3IsConfigByteWritable().
3113 * See PCI spec. 6.1. "Configuration Space Organization".
3114 */
3115 switch (uAddress)
3116 {
3117 case IOMMU_PCI_OFF_CAP_HDR: /* All bits are read-only. */
3118 case IOMMU_PCI_OFF_RANGE_REG: /* We don't have any devices integrated with the IOMMU. */
3119 case IOMMU_PCI_OFF_MISCINFO_REG_0: /* We don't support MSI-X. */
3120 case IOMMU_PCI_OFF_MISCINFO_REG_1: /* We don't support guest-address translation. */
3121 {
3122 LogFunc(("PCI config write (%#RX32) to read-only register %#x -> Ignored\n", u32Value, uAddress));
3123 return VINF_SUCCESS;
3124 }
3125 }
3126
3127 IOMMU_LOCK(pDevIns);
3128
3129 VBOXSTRICTRC rcStrict = VERR_IOMMU_IPE_3;
3130 switch (uAddress)
3131 {
3132 case IOMMU_PCI_OFF_BASE_ADDR_REG_LO:
3133 {
3134 if (pThis->IommuBar.n.u1Enable)
3135 {
3136 rcStrict = VINF_SUCCESS;
3137 LogFunc(("Writing Base Address (Lo) when it's already enabled -> Ignored\n"));
3138 break;
3139 }
3140
3141 pThis->IommuBar.au32[0] = u32Value & IOMMU_BAR_VALID_MASK;
3142 if (pThis->IommuBar.n.u1Enable)
3143 {
3144 Assert(pThis->hMmio != NIL_IOMMMIOHANDLE); /* Paranoia. Ensure we have a valid IOM MMIO handle. */
3145 Assert(!pThis->ExtFeat.n.u1PerfCounterSup); /* Base is 16K aligned when performance counters aren't supported. */
3146 RTGCPHYS const GCPhysMmioBase = RT_MAKE_U64(pThis->IommuBar.au32[0] & 0xffffc000, pThis->IommuBar.au32[1]);
3147 RTGCPHYS const GCPhysMmioBasePrev = PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmio);
3148
3149 /* If the MMIO region is already mapped at the specified address, we're done. */
3150 Assert(GCPhysMmioBase != NIL_RTGCPHYS);
3151 if (GCPhysMmioBasePrev == GCPhysMmioBase)
3152 {
3153 rcStrict = VINF_SUCCESS;
3154 break;
3155 }
3156
3157 /* Unmap the previous MMIO region (which is at a different address). */
3158 if (GCPhysMmioBasePrev != NIL_RTGCPHYS)
3159 {
3160 LogFlowFunc(("Unmapping previous MMIO region at %#RGp\n", GCPhysMmioBasePrev));
3161 rcStrict = PDMDevHlpMmioUnmap(pDevIns, pThis->hMmio);
3162 if (RT_FAILURE(rcStrict))
3163 {
3164 LogFunc(("Failed to unmap MMIO region at %#RGp. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
3165 break;
3166 }
3167 }
3168
3169 /* Map the newly specified MMIO region. */
3170 LogFlowFunc(("Mapping MMIO region at %#RGp\n", GCPhysMmioBase));
3171 rcStrict = PDMDevHlpMmioMap(pDevIns, pThis->hMmio, GCPhysMmioBase);
3172 if (RT_FAILURE(rcStrict))
3173 {
3174 LogFunc(("Failed to unmap MMIO region at %#RGp. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
3175 break;
3176 }
3177 }
3178 else
3179 rcStrict = VINF_SUCCESS;
3180 break;
3181 }
3182
3183 case IOMMU_PCI_OFF_BASE_ADDR_REG_HI:
3184 {
3185 if (!pThis->IommuBar.n.u1Enable)
3186 pThis->IommuBar.au32[1] = u32Value;
3187 else
3188 {
3189 rcStrict = VINF_SUCCESS;
3190 LogFunc(("Writing Base Address (Hi) when it's already enabled -> Ignored\n"));
3191 }
3192 break;
3193 }
3194
3195 case IOMMU_PCI_OFF_MSI_CAP_HDR:
3196 {
3197 u32Value |= RT_BIT(23); /* 64-bit MSI addressess must always be enabled for IOMMU. */
3198 RT_FALL_THRU();
3199 }
3200 default:
3201 {
3202 rcStrict = PDMDevHlpPCIConfigWrite(pDevIns, pPciDev, uAddress, cb, u32Value);
3203 break;
3204 }
3205 }
3206
3207 IOMMU_UNLOCK(pDevIns);
3208
3209 Log3Func(("uAddress=%#x (cb=%u) with %#x. rc=%Rrc\n", uAddress, cb, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
3210 return rcStrict;
3211}
3212
3213
3214/**
3215 * @callback_method_impl{FNDBGFHANDLERDEV}
3216 */
3217static DECLCALLBACK(void) iommuAmdR3DbgInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
3218{
3219 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3220 PCPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3221 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3222
3223 bool fVerbose;
3224 if ( pszArgs
3225 && !strncmp(pszArgs, RT_STR_TUPLE("verbose")))
3226 fVerbose = true;
3227 else
3228 fVerbose = false;
3229
3230 pHlp->pfnPrintf(pHlp, "AMD-IOMMU:\n");
3231 /* Device Table Base Addresses (all segments). */
3232 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aDevTabBaseAddrs); i++)
3233 {
3234 DEV_TAB_BAR_T const DevTabBar = pThis->aDevTabBaseAddrs[i];
3235 pHlp->pfnPrintf(pHlp, " Device Table BAR %u = %#RX64\n", i, DevTabBar.u64);
3236 if (fVerbose)
3237 {
3238 pHlp->pfnPrintf(pHlp, " Size = %#x (%u bytes)\n", DevTabBar.n.u9Size,
3239 IOMMU_GET_DEV_TAB_LEN(&DevTabBar));
3240 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", DevTabBar.n.u40Base << X86_PAGE_4K_SHIFT);
3241 }
3242 }
3243 /* Command Buffer Base Address Register. */
3244 {
3245 CMD_BUF_BAR_T const CmdBufBar = pThis->CmdBufBaseAddr;
3246 uint8_t const uEncodedLen = CmdBufBar.n.u4Len;
3247 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3248 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3249 pHlp->pfnPrintf(pHlp, " Command Buffer BAR = %#RX64\n", CmdBufBar.u64);
3250 if (fVerbose)
3251 {
3252 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", CmdBufBar.n.u40Base << X86_PAGE_4K_SHIFT);
3253 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3254 cEntries, cbBuffer);
3255 }
3256 }
3257 /* Event Log Base Address Register. */
3258 {
3259 EVT_LOG_BAR_T const EvtLogBar = pThis->EvtLogBaseAddr;
3260 uint8_t const uEncodedLen = EvtLogBar.n.u4Len;
3261 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3262 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3263 pHlp->pfnPrintf(pHlp, " Event Log BAR = %#RX64\n", EvtLogBar.u64);
3264 if (fVerbose)
3265 {
3266 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", EvtLogBar.n.u40Base << X86_PAGE_4K_SHIFT);
3267 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3268 cEntries, cbBuffer);
3269 }
3270 }
3271 /* IOMMU Control Register. */
3272 {
3273 IOMMU_CTRL_T const Ctrl = pThis->Ctrl;
3274 pHlp->pfnPrintf(pHlp, " Control = %#RX64\n", Ctrl.u64);
3275 if (fVerbose)
3276 {
3277 pHlp->pfnPrintf(pHlp, " IOMMU enable = %RTbool\n", Ctrl.n.u1IommuEn);
3278 pHlp->pfnPrintf(pHlp, " HT Tunnel translation enable = %RTbool\n", Ctrl.n.u1HtTunEn);
3279 pHlp->pfnPrintf(pHlp, " Event log enable = %RTbool\n", Ctrl.n.u1EvtLogEn);
3280 pHlp->pfnPrintf(pHlp, " Event log interrupt enable = %RTbool\n", Ctrl.n.u1EvtIntrEn);
3281 pHlp->pfnPrintf(pHlp, " Completion wait interrupt enable = %RTbool\n", Ctrl.n.u1EvtIntrEn);
3282 pHlp->pfnPrintf(pHlp, " Invalidation timeout = %u\n", Ctrl.n.u3InvTimeOut);
3283 pHlp->pfnPrintf(pHlp, " Pass posted write = %RTbool\n", Ctrl.n.u1PassPW);
3284 pHlp->pfnPrintf(pHlp, " Respose Pass posted write = %RTbool\n", Ctrl.n.u1ResPassPW);
3285 pHlp->pfnPrintf(pHlp, " Coherent = %RTbool\n", Ctrl.n.u1Coherent);
3286 pHlp->pfnPrintf(pHlp, " Isochronous = %RTbool\n", Ctrl.n.u1Isoc);
3287 pHlp->pfnPrintf(pHlp, " Command buffer enable = %RTbool\n", Ctrl.n.u1CmdBufEn);
3288 pHlp->pfnPrintf(pHlp, " PPR log enable = %RTbool\n", Ctrl.n.u1PprLogEn);
3289 pHlp->pfnPrintf(pHlp, " PPR interrupt enable = %RTbool\n", Ctrl.n.u1PprIntrEn);
3290 pHlp->pfnPrintf(pHlp, " PPR enable = %RTbool\n", Ctrl.n.u1PprEn);
3291 pHlp->pfnPrintf(pHlp, " Guest translation eanble = %RTbool\n", Ctrl.n.u1GstTranslateEn);
3292 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC enable = %RTbool\n", Ctrl.n.u1GstVirtApicEn);
3293 pHlp->pfnPrintf(pHlp, " CRW = %#x\n", Ctrl.n.u4Crw);
3294 pHlp->pfnPrintf(pHlp, " SMI filter enable = %RTbool\n", Ctrl.n.u1SmiFilterEn);
3295 pHlp->pfnPrintf(pHlp, " Self-writeback disable = %RTbool\n", Ctrl.n.u1SelfWriteBackDis);
3296 pHlp->pfnPrintf(pHlp, " SMI filter log enable = %RTbool\n", Ctrl.n.u1SmiFilterLogEn);
3297 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC mode enable = %#x\n", Ctrl.n.u3GstVirtApicModeEn);
3298 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC GA log enable = %RTbool\n", Ctrl.n.u1GstLogEn);
3299 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC interrupt enable = %RTbool\n", Ctrl.n.u1GstIntrEn);
3300 pHlp->pfnPrintf(pHlp, " Dual PPR log enable = %#x\n", Ctrl.n.u2DualPprLogEn);
3301 pHlp->pfnPrintf(pHlp, " Dual event log enable = %#x\n", Ctrl.n.u2DualEvtLogEn);
3302 pHlp->pfnPrintf(pHlp, " Device table segmentation enable = %#x\n", Ctrl.n.u3DevTabSegEn);
3303 pHlp->pfnPrintf(pHlp, " Privilege abort enable = %#x\n", Ctrl.n.u2PrivAbortEn);
3304 pHlp->pfnPrintf(pHlp, " PPR auto response enable = %RTbool\n", Ctrl.n.u1PprAutoRespEn);
3305 pHlp->pfnPrintf(pHlp, " MARC enable = %RTbool\n", Ctrl.n.u1MarcEn);
3306 pHlp->pfnPrintf(pHlp, " Block StopMark enable = %RTbool\n", Ctrl.n.u1BlockStopMarkEn);
3307 pHlp->pfnPrintf(pHlp, " PPR auto response always-on enable = %RTbool\n", Ctrl.n.u1PprAutoRespAlwaysOnEn);
3308 pHlp->pfnPrintf(pHlp, " Domain IDPNE = %RTbool\n", Ctrl.n.u1DomainIDPNE);
3309 pHlp->pfnPrintf(pHlp, " Enhanced PPR handling = %RTbool\n", Ctrl.n.u1EnhancedPpr);
3310 pHlp->pfnPrintf(pHlp, " Host page table access/dirty bit update = %#x\n", Ctrl.n.u2HstAccDirtyBitUpdate);
3311 pHlp->pfnPrintf(pHlp, " Guest page table dirty bit disable = %RTbool\n", Ctrl.n.u1GstDirtyUpdateDis);
3312 pHlp->pfnPrintf(pHlp, " x2APIC enable = %RTbool\n", Ctrl.n.u1X2ApicEn);
3313 pHlp->pfnPrintf(pHlp, " x2APIC interrupt enable = %RTbool\n", Ctrl.n.u1X2ApicIntrGenEn);
3314 pHlp->pfnPrintf(pHlp, " Guest page table access bit update = %RTbool\n", Ctrl.n.u1GstAccessUpdateDis);
3315 }
3316 }
3317 /* Exclusion Base Address Register. */
3318 {
3319 IOMMU_EXCL_RANGE_BAR_T const ExclRangeBar = pThis->ExclRangeBaseAddr;
3320 pHlp->pfnPrintf(pHlp, " Exclusion BAR = %#RX64\n", ExclRangeBar.u64);
3321 if (fVerbose)
3322 {
3323 pHlp->pfnPrintf(pHlp, " Exclusion enable = %RTbool\n", ExclRangeBar.n.u1ExclEnable);
3324 pHlp->pfnPrintf(pHlp, " Allow all devices = %RTbool\n", ExclRangeBar.n.u1AllowAll);
3325 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n",
3326 ExclRangeBar.n.u40ExclRangeBase << X86_PAGE_4K_SHIFT);
3327 }
3328 }
3329 /* Exclusion Range Limit Register. */
3330 {
3331 IOMMU_EXCL_RANGE_LIMIT_T const ExclRangeLimit = pThis->ExclRangeLimit;
3332 pHlp->pfnPrintf(pHlp, " Exclusion Range Limit = %#RX64\n", ExclRangeLimit.u64);
3333 if (fVerbose)
3334 pHlp->pfnPrintf(pHlp, " Range limit = %#RX64\n", ExclRangeLimit.n.u52ExclLimit);
3335 }
3336 /* Extended Feature Register. */
3337 {
3338 IOMMU_EXT_FEAT_T ExtFeat = pThis->ExtFeat;
3339 pHlp->pfnPrintf(pHlp, " Extended Feature Register = %#RX64\n", ExtFeat.u64);
3340 if (fVerbose)
3341 {
3342 pHlp->pfnPrintf(pHlp, " Prefetch support = %RTbool\n", ExtFeat.n.u1PrefetchSup);
3343 pHlp->pfnPrintf(pHlp, " PPR support = %RTbool\n", ExtFeat.n.u1PprSup);
3344 pHlp->pfnPrintf(pHlp, " x2APIC support = %RTbool\n", ExtFeat.n.u1X2ApicSup);
3345 pHlp->pfnPrintf(pHlp, " NX and privilege level support = %RTbool\n", ExtFeat.n.u1NoExecuteSup);
3346 pHlp->pfnPrintf(pHlp, " Guest translation support = %RTbool\n", ExtFeat.n.u1GstTranslateSup);
3347 pHlp->pfnPrintf(pHlp, " Invalidate-All command support = %RTbool\n", ExtFeat.n.u1InvAllSup);
3348 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC support = %RTbool\n", ExtFeat.n.u1GstVirtApicSup);
3349 pHlp->pfnPrintf(pHlp, " Hardware error register support = %RTbool\n", ExtFeat.n.u1HwErrorSup);
3350 pHlp->pfnPrintf(pHlp, " Performance counters support = %RTbool\n", ExtFeat.n.u1PerfCounterSup);
3351 pHlp->pfnPrintf(pHlp, " Host address translation size = %#x\n", ExtFeat.n.u2HostAddrTranslateSize);
3352 pHlp->pfnPrintf(pHlp, " Guest address translation size = %#x\n", ExtFeat.n.u2GstAddrTranslateSize);
3353 pHlp->pfnPrintf(pHlp, " Guest CR3 root table level support = %#x\n", ExtFeat.n.u2GstCr3RootTblLevel);
3354 pHlp->pfnPrintf(pHlp, " SMI filter register support = %#x\n", ExtFeat.n.u2SmiFilterSup);
3355 pHlp->pfnPrintf(pHlp, " SMI filter register count = %#x\n", ExtFeat.n.u3SmiFilterCount);
3356 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC modes support = %#x\n", ExtFeat.n.u3GstVirtApicModeSup);
3357 pHlp->pfnPrintf(pHlp, " Dual PPR log support = %#x\n", ExtFeat.n.u2DualPprLogSup);
3358 pHlp->pfnPrintf(pHlp, " Dual event log support = %#x\n", ExtFeat.n.u2DualEvtLogSup);
3359 pHlp->pfnPrintf(pHlp, " Maximum PASID = %#x\n", ExtFeat.n.u5MaxPasidSup);
3360 pHlp->pfnPrintf(pHlp, " User/supervisor page protection support = %RTbool\n", ExtFeat.n.u1UserSupervisorSup);
3361 pHlp->pfnPrintf(pHlp, " Device table segments supported = %#x (%u)\n", ExtFeat.n.u2DevTabSegSup,
3362 g_acDevTabSegs[ExtFeat.n.u2DevTabSegSup]);
3363 pHlp->pfnPrintf(pHlp, " PPR log overflow early warning support = %RTbool\n", ExtFeat.n.u1PprLogOverflowWarn);
3364 pHlp->pfnPrintf(pHlp, " PPR auto response support = %RTbool\n", ExtFeat.n.u1PprAutoRespSup);
3365 pHlp->pfnPrintf(pHlp, " MARC support = %#x\n", ExtFeat.n.u2MarcSup);
3366 pHlp->pfnPrintf(pHlp, " Block StopMark message support = %RTbool\n", ExtFeat.n.u1BlockStopMarkSup);
3367 pHlp->pfnPrintf(pHlp, " Performance optimization support = %RTbool\n", ExtFeat.n.u1PerfOptSup);
3368 pHlp->pfnPrintf(pHlp, " MSI capability MMIO access support = %RTbool\n", ExtFeat.n.u1MsiCapMmioSup);
3369 pHlp->pfnPrintf(pHlp, " Guest I/O protection support = %RTbool\n", ExtFeat.n.u1GstIoSup);
3370 pHlp->pfnPrintf(pHlp, " Host access support = %RTbool\n", ExtFeat.n.u1HostAccessSup);
3371 pHlp->pfnPrintf(pHlp, " Enhanced PPR handling support = %RTbool\n", ExtFeat.n.u1EnhancedPprSup);
3372 pHlp->pfnPrintf(pHlp, " Attribute forward supported = %RTbool\n", ExtFeat.n.u1AttrForwardSup);
3373 pHlp->pfnPrintf(pHlp, " Host dirty support = %RTbool\n", ExtFeat.n.u1HostDirtySup);
3374 pHlp->pfnPrintf(pHlp, " Invalidate IOTLB type support = %RTbool\n", ExtFeat.n.u1InvIoTlbTypeSup);
3375 pHlp->pfnPrintf(pHlp, " Guest page table access bit hw disable = %RTbool\n", ExtFeat.n.u1GstUpdateDisSup);
3376 pHlp->pfnPrintf(pHlp, " Force physical dest for remapped intr. = %RTbool\n", ExtFeat.n.u1ForcePhysDstSup);
3377 }
3378 }
3379 /* PPR Log Base Address Register. */
3380 {
3381 PPR_LOG_BAR_T PprLogBar = pThis->PprLogBaseAddr;
3382 uint8_t const uEncodedLen = PprLogBar.n.u4Len;
3383 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3384 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3385 pHlp->pfnPrintf(pHlp, " PPR Log BAR = %#RX64\n", PprLogBar.u64);
3386 if (fVerbose)
3387 {
3388 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", PprLogBar.n.u40Base << X86_PAGE_4K_SHIFT);
3389 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3390 cEntries, cbBuffer);
3391 }
3392 }
3393 /* Hardware Event (Hi) Register. */
3394 {
3395 IOMMU_HW_EVT_HI_T HwEvtHi = pThis->HwEvtHi;
3396 pHlp->pfnPrintf(pHlp, " Hardware Event (Hi) = %#RX64\n", HwEvtHi.u64);
3397 if (fVerbose)
3398 {
3399 pHlp->pfnPrintf(pHlp, " First operand = %#RX64\n", HwEvtHi.n.u60FirstOperand);
3400 pHlp->pfnPrintf(pHlp, " Event code = %#RX8\n", HwEvtHi.n.u4EvtCode);
3401 }
3402 }
3403 /* Hardware Event (Lo) Register. */
3404 pHlp->pfnPrintf(pHlp, " Hardware Event (Lo) = %#RX64\n", pThis->HwEvtLo);
3405 /* Hardware Event Status. */
3406 {
3407 IOMMU_HW_EVT_STATUS_T HwEvtStatus = pThis->HwEvtStatus;
3408 pHlp->pfnPrintf(pHlp, " Hardware Event Status = %#RX64\n", HwEvtStatus.u64);
3409 if (fVerbose)
3410 {
3411 pHlp->pfnPrintf(pHlp, " Valid = %RTbool\n", HwEvtStatus.n.u1Valid);
3412 pHlp->pfnPrintf(pHlp, " Overflow = %RTbool\n", HwEvtStatus.n.u1Overflow);
3413 }
3414 }
3415 /* Guest Virtual-APIC Log Base Address Register. */
3416 {
3417 GALOG_BAR_T const GALogBar = pThis->GALogBaseAddr;
3418 uint8_t const uEncodedLen = GALogBar.n.u4Len;
3419 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3420 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3421 pHlp->pfnPrintf(pHlp, " Guest Log BAR = %#RX64\n", GALogBar.u64);
3422 if (fVerbose)
3423 {
3424 pHlp->pfnPrintf(pHlp, " Base address = %RTbool\n", GALogBar.n.u40Base << X86_PAGE_4K_SHIFT);
3425 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3426 cEntries, cbBuffer);
3427 }
3428 }
3429 /* Guest Virtual-APIC Log Tail Address Register. */
3430 {
3431 GALOG_TAIL_ADDR_T GALogTail = pThis->GALogTailAddr;
3432 pHlp->pfnPrintf(pHlp, " Guest Log Tail Address = %#RX64\n", GALogTail.u64);
3433 if (fVerbose)
3434 pHlp->pfnPrintf(pHlp, " Tail address = %#RX64\n", GALogTail.n.u40GALogTailAddr);
3435 }
3436 /* PPR Log B Base Address Register. */
3437 {
3438 PPR_LOG_B_BAR_T PprLogBBar = pThis->PprLogBBaseAddr;
3439 uint8_t const uEncodedLen = PprLogBBar.n.u4Len;
3440 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3441 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3442 pHlp->pfnPrintf(pHlp, " PPR Log B BAR = %#RX64\n", PprLogBBar.u64);
3443 if (fVerbose)
3444 {
3445 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", PprLogBBar.n.u40Base << X86_PAGE_4K_SHIFT);
3446 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3447 cEntries, cbBuffer);
3448 }
3449 }
3450 /* Event Log B Base Address Register. */
3451 {
3452 EVT_LOG_B_BAR_T EvtLogBBar = pThis->EvtLogBBaseAddr;
3453 uint8_t const uEncodedLen = EvtLogBBar.n.u4Len;
3454 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3455 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3456 pHlp->pfnPrintf(pHlp, " Event Log B BAR = %#RX64\n", EvtLogBBar.u64);
3457 if (fVerbose)
3458 {
3459 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", EvtLogBBar.n.u40Base << X86_PAGE_4K_SHIFT);
3460 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3461 cEntries, cbBuffer);
3462 }
3463 }
3464 /* Device-Specific Feature Extension Register. */
3465 {
3466 DEV_SPECIFIC_FEAT_T const DevSpecificFeat = pThis->DevSpecificFeat;
3467 pHlp->pfnPrintf(pHlp, " Device-specific Feature = %#RX64\n", DevSpecificFeat.u64);
3468 if (fVerbose)
3469 {
3470 pHlp->pfnPrintf(pHlp, " Feature = %#RX32\n", DevSpecificFeat.n.u24DevSpecFeat);
3471 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificFeat.n.u4RevMinor);
3472 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificFeat.n.u4RevMajor);
3473 }
3474 }
3475 /* Device-Specific Control Extension Register. */
3476 {
3477 DEV_SPECIFIC_CTRL_T const DevSpecificCtrl = pThis->DevSpecificCtrl;
3478 pHlp->pfnPrintf(pHlp, " Device-specific Control = %#RX64\n", DevSpecificCtrl.u64);
3479 if (fVerbose)
3480 {
3481 pHlp->pfnPrintf(pHlp, " Control = %#RX32\n", DevSpecificCtrl.n.u24DevSpecCtrl);
3482 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificCtrl.n.u4RevMinor);
3483 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificCtrl.n.u4RevMajor);
3484 }
3485 }
3486 /* Device-Specific Status Extension Register. */
3487 {
3488 DEV_SPECIFIC_STATUS_T const DevSpecificStatus = pThis->DevSpecificStatus;
3489 pHlp->pfnPrintf(pHlp, " Device-specific Status = %#RX64\n", DevSpecificStatus.u64);
3490 if (fVerbose)
3491 {
3492 pHlp->pfnPrintf(pHlp, " Status = %#RX32\n", DevSpecificStatus.n.u24DevSpecStatus);
3493 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificStatus.n.u4RevMinor);
3494 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificStatus.n.u4RevMajor);
3495 }
3496 }
3497 /* Miscellaneous Information Register (Lo and Hi). */
3498 {
3499 MSI_MISC_INFO_T const MiscInfo = pThis->MiscInfo;
3500 pHlp->pfnPrintf(pHlp, " Misc. Info. Register = %#RX64\n", MiscInfo.u64);
3501 if (fVerbose)
3502 {
3503 pHlp->pfnPrintf(pHlp, " Event Log MSI number = %#x\n", MiscInfo.n.u5MsiNumEvtLog);
3504 pHlp->pfnPrintf(pHlp, " Guest Virtual-Address Size = %#x\n", MiscInfo.n.u3GstVirtAddrSize);
3505 pHlp->pfnPrintf(pHlp, " Physical Address Size = %#x\n", MiscInfo.n.u7PhysAddrSize);
3506 pHlp->pfnPrintf(pHlp, " Virtual-Address Size = %#x\n", MiscInfo.n.u7VirtAddrSize);
3507 pHlp->pfnPrintf(pHlp, " HT Transport ATS Range Reserved = %RTbool\n", MiscInfo.n.u1HtAtsResv);
3508 pHlp->pfnPrintf(pHlp, " PPR MSI number = %#x\n", MiscInfo.n.u5MsiNumPpr);
3509 pHlp->pfnPrintf(pHlp, " GA Log MSI number = %#x\n", MiscInfo.n.u5MsiNumGa);
3510 }
3511 }
3512 /* MSI Capability Header. */
3513 {
3514 MSI_CAP_HDR_T MsiCapHdr;
3515 MsiCapHdr.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
3516 pHlp->pfnPrintf(pHlp, " MSI Capability Header = %#RX32\n", MsiCapHdr.u32);
3517 if (fVerbose)
3518 {
3519 pHlp->pfnPrintf(pHlp, " Capability ID = %#x\n", MsiCapHdr.n.u8MsiCapId);
3520 pHlp->pfnPrintf(pHlp, " Capability Ptr (PCI config offset) = %#x\n", MsiCapHdr.n.u8MsiCapPtr);
3521 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", MsiCapHdr.n.u1MsiEnable);
3522 pHlp->pfnPrintf(pHlp, " Multi-message capability = %#x\n", MsiCapHdr.n.u3MsiMultiMessCap);
3523 pHlp->pfnPrintf(pHlp, " Multi-message enable = %#x\n", MsiCapHdr.n.u3MsiMultiMessEn);
3524 }
3525 }
3526 /* MSI Address Register (Lo and Hi). */
3527 {
3528 uint32_t const uMsiAddrLo = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
3529 uint32_t const uMsiAddrHi = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI);
3530 MSIADDR MsiAddr;
3531 MsiAddr.u64 = RT_MAKE_U64(uMsiAddrLo, uMsiAddrHi);
3532 pHlp->pfnPrintf(pHlp, " MSI Address = %#RX64\n", MsiAddr.u64);
3533 if (fVerbose)
3534 {
3535 pHlp->pfnPrintf(pHlp, " Destination mode = %#x\n", MsiAddr.n.u1DestMode);
3536 pHlp->pfnPrintf(pHlp, " Redirection hint = %#x\n", MsiAddr.n.u1RedirHint);
3537 pHlp->pfnPrintf(pHlp, " Destination Id = %#x\n", MsiAddr.n.u8DestId);
3538 pHlp->pfnPrintf(pHlp, " Address = %#RX32\n", MsiAddr.n.u12Addr);
3539 pHlp->pfnPrintf(pHlp, " Address (Hi) / Rsvd? = %#RX32\n", MsiAddr.n.u32Rsvd0);
3540 }
3541 }
3542 /* MSI Data. */
3543 {
3544 MSIDATA MsiData;
3545 MsiData.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
3546 pHlp->pfnPrintf(pHlp, " MSI Data = %#RX32\n", MsiData.u32);
3547 if (fVerbose)
3548 {
3549 pHlp->pfnPrintf(pHlp, " Vector = %#x (%u)\n", MsiData.n.u8Vector,
3550 MsiData.n.u8Vector);
3551 pHlp->pfnPrintf(pHlp, " Delivery mode = %#x\n", MsiData.n.u3DeliveryMode);
3552 pHlp->pfnPrintf(pHlp, " Level = %#x\n", MsiData.n.u1Level);
3553 pHlp->pfnPrintf(pHlp, " Trigger mode = %s\n", MsiData.n.u1TriggerMode ?
3554 "level" : "edge");
3555 }
3556 }
3557 /* MSI Mapping Capability Header (HyperTransport, reporting all 0s currently). */
3558 {
3559 MSI_MAP_CAP_HDR_T MsiMapCapHdr;
3560 MsiMapCapHdr.u32 = 0;
3561 pHlp->pfnPrintf(pHlp, " MSI Mapping Capability Header = %#RX32\n", MsiMapCapHdr.u32);
3562 if (fVerbose)
3563 {
3564 pHlp->pfnPrintf(pHlp, " Capability ID = %#x\n", MsiMapCapHdr.n.u8MsiMapCapId);
3565 pHlp->pfnPrintf(pHlp, " Map enable = %RTbool\n", MsiMapCapHdr.n.u1MsiMapEn);
3566 pHlp->pfnPrintf(pHlp, " Map fixed = %RTbool\n", MsiMapCapHdr.n.u1MsiMapFixed);
3567 pHlp->pfnPrintf(pHlp, " Map capability type = %#x\n", MsiMapCapHdr.n.u5MapCapType);
3568 }
3569 }
3570 /* Performance Optimization Control Register. */
3571 {
3572 IOMMU_PERF_OPT_CTRL_T const PerfOptCtrl = pThis->PerfOptCtrl;
3573 pHlp->pfnPrintf(pHlp, " Performance Optimization Control = %#RX32\n", PerfOptCtrl.u32);
3574 if (fVerbose)
3575 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PerfOptCtrl.n.u1PerfOptEn);
3576 }
3577 /* XT (x2APIC) General Interrupt Control Register. */
3578 {
3579 IOMMU_XT_GEN_INTR_CTRL_T const XtGenIntrCtrl = pThis->XtGenIntrCtrl;
3580 pHlp->pfnPrintf(pHlp, " XT General Interrupt Control = %#RX64\n", XtGenIntrCtrl.u64);
3581 if (fVerbose)
3582 {
3583 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
3584 !XtGenIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
3585 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
3586 RT_MAKE_U64(XtGenIntrCtrl.n.u24X2ApicIntrDstLo, XtGenIntrCtrl.n.u7X2ApicIntrDstHi));
3587 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtGenIntrCtrl.n.u8X2ApicIntrVector);
3588 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %s\n",
3589 !XtGenIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
3590 }
3591 }
3592 /* XT (x2APIC) PPR Interrupt Control Register. */
3593 {
3594 IOMMU_XT_PPR_INTR_CTRL_T const XtPprIntrCtrl = pThis->XtPprIntrCtrl;
3595 pHlp->pfnPrintf(pHlp, " XT PPR Interrupt Control = %#RX64\n", XtPprIntrCtrl.u64);
3596 if (fVerbose)
3597 {
3598 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
3599 !XtPprIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
3600 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
3601 RT_MAKE_U64(XtPprIntrCtrl.n.u24X2ApicIntrDstLo, XtPprIntrCtrl.n.u7X2ApicIntrDstHi));
3602 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtPprIntrCtrl.n.u8X2ApicIntrVector);
3603 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %s\n",
3604 !XtPprIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
3605 }
3606 }
3607 /* XT (X2APIC) GA Log Interrupt Control Register. */
3608 {
3609 IOMMU_XT_GALOG_INTR_CTRL_T const XtGALogIntrCtrl = pThis->XtGALogIntrCtrl;
3610 pHlp->pfnPrintf(pHlp, " XT PPR Interrupt Control = %#RX64\n", XtGALogIntrCtrl.u64);
3611 if (fVerbose)
3612 {
3613 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
3614 !XtGALogIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
3615 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
3616 RT_MAKE_U64(XtGALogIntrCtrl.n.u24X2ApicIntrDstLo, XtGALogIntrCtrl.n.u7X2ApicIntrDstHi));
3617 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtGALogIntrCtrl.n.u8X2ApicIntrVector);
3618 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %s\n",
3619 !XtGALogIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
3620 }
3621 }
3622 /* MARC Registers. */
3623 {
3624 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aMarcApers); i++)
3625 {
3626 pHlp->pfnPrintf(pHlp, " MARC Aperature %u:\n", i);
3627 MARC_APER_BAR_T const MarcAperBar = pThis->aMarcApers[i].Base;
3628 pHlp->pfnPrintf(pHlp, " Base = %#RX64\n", MarcAperBar.n.u40MarcBaseAddr << X86_PAGE_4K_SHIFT);
3629
3630 MARC_APER_RELOC_T const MarcAperReloc = pThis->aMarcApers[i].Reloc;
3631 pHlp->pfnPrintf(pHlp, " Reloc = %#RX64 (addr: %#RX64, read-only: %RTbool, enable: %RTbool)\n",
3632 MarcAperReloc.u64, MarcAperReloc.n.u40MarcRelocAddr << X86_PAGE_4K_SHIFT,
3633 MarcAperReloc.n.u1ReadOnly, MarcAperReloc.n.u1RelocEn);
3634
3635 MARC_APER_LEN_T const MarcAperLen = pThis->aMarcApers[i].Length;
3636 pHlp->pfnPrintf(pHlp, " Length = %u pages\n", MarcAperLen.n.u40MarcLength);
3637 }
3638 }
3639 /* Reserved Register. */
3640 pHlp->pfnPrintf(pHlp, " Reserved Register = %#RX64\n", pThis->RsvdReg);
3641 /* Command Buffer Head Pointer Register. */
3642 {
3643 CMD_BUF_HEAD_PTR_T const CmdBufHeadPtr = pThis->CmdBufHeadPtr;
3644 pHlp->pfnPrintf(pHlp, " Command Buffer Head Pointer = %#RX64 (off: %#x)\n", CmdBufHeadPtr.u64,
3645 CmdBufHeadPtr.n.off);
3646 }
3647 /* Command Buffer Tail Pointer Register. */
3648 {
3649 CMD_BUF_HEAD_PTR_T const CmdBufTailPtr = pThis->CmdBufTailPtr;
3650 pHlp->pfnPrintf(pHlp, " Command Buffer Tail Pointer = %#RX64 (off: %#x)\n", CmdBufTailPtr.u64,
3651 CmdBufTailPtr.n.off);
3652 }
3653 /* Event Log Head Pointer Register. */
3654 {
3655 EVT_LOG_HEAD_PTR_T const EvtLogHeadPtr = pThis->EvtLogHeadPtr;
3656 pHlp->pfnPrintf(pHlp, " Event Log Head Pointer = %#RX64 (off: %#x)\n", EvtLogHeadPtr.u64,
3657 EvtLogHeadPtr.n.off);
3658 }
3659 /* Event Log Tail Pointer Register. */
3660 {
3661 EVT_LOG_TAIL_PTR_T const EvtLogTailPtr = pThis->EvtLogTailPtr;
3662 pHlp->pfnPrintf(pHlp, " Event Log Head Pointer = %#RX64 (off: %#x)\n", EvtLogTailPtr.u64,
3663 EvtLogTailPtr.n.off);
3664 }
3665 /* Status Register. */
3666 {
3667 IOMMU_STATUS_T const Status = pThis->Status;
3668 pHlp->pfnPrintf(pHlp, " Status Register = %#RX64\n", Status.u64);
3669 if (fVerbose)
3670 {
3671 pHlp->pfnPrintf(pHlp, " Event log overflow = %RTbool\n", Status.n.u1EvtOverflow);
3672 pHlp->pfnPrintf(pHlp, " Event log interrupt = %RTbool\n", Status.n.u1EvtLogIntr);
3673 pHlp->pfnPrintf(pHlp, " Completion wait interrupt = %RTbool\n", Status.n.u1CompWaitIntr);
3674 pHlp->pfnPrintf(pHlp, " Event log running = %RTbool\n", Status.n.u1EvtLogRunning);
3675 pHlp->pfnPrintf(pHlp, " Command buffer running = %RTbool\n", Status.n.u1CmdBufRunning);
3676 pHlp->pfnPrintf(pHlp, " PPR overflow = %RTbool\n", Status.n.u1PprOverflow);
3677 pHlp->pfnPrintf(pHlp, " PPR interrupt = %RTbool\n", Status.n.u1PprIntr);
3678 pHlp->pfnPrintf(pHlp, " PPR log running = %RTbool\n", Status.n.u1PprLogRunning);
3679 pHlp->pfnPrintf(pHlp, " Guest log running = %RTbool\n", Status.n.u1GstLogRunning);
3680 pHlp->pfnPrintf(pHlp, " Guest log interrupt = %RTbool\n", Status.n.u1GstLogIntr);
3681 pHlp->pfnPrintf(pHlp, " PPR log B overflow = %RTbool\n", Status.n.u1PprOverflowB);
3682 pHlp->pfnPrintf(pHlp, " PPR log active = %RTbool\n", Status.n.u1PprLogActive);
3683 pHlp->pfnPrintf(pHlp, " Event log B overflow = %RTbool\n", Status.n.u1EvtOverflowB);
3684 pHlp->pfnPrintf(pHlp, " Event log active = %RTbool\n", Status.n.u1EvtLogActive);
3685 pHlp->pfnPrintf(pHlp, " PPR log B overflow early warning = %RTbool\n", Status.n.u1PprOverflowEarlyB);
3686 pHlp->pfnPrintf(pHlp, " PPR log overflow early warning = %RTbool\n", Status.n.u1PprOverflowEarly);
3687 }
3688 }
3689 /* PPR Log Head Pointer. */
3690 {
3691 PPR_LOG_HEAD_PTR_T const PprLogHeadPtr = pThis->PprLogHeadPtr;
3692 pHlp->pfnPrintf(pHlp, " PPR Log Head Pointer = %#RX64 (off: %#x)\n", PprLogHeadPtr.u64,
3693 PprLogHeadPtr.n.off);
3694 }
3695 /* PPR Log Tail Pointer. */
3696 {
3697 PPR_LOG_TAIL_PTR_T const PprLogTailPtr = pThis->PprLogTailPtr;
3698 pHlp->pfnPrintf(pHlp, " PPR Log Tail Pointer = %#RX64 (off: %#x)\n", PprLogTailPtr.u64,
3699 PprLogTailPtr.n.off);
3700 }
3701 /* Guest Virtual-APIC Log Head Pointer. */
3702 {
3703 GALOG_HEAD_PTR_T const GALogHeadPtr = pThis->GALogHeadPtr;
3704 pHlp->pfnPrintf(pHlp, " Guest Virtual-APIC Log Head Pointer = %#RX64 (off: %#x)\n", GALogHeadPtr.u64,
3705 GALogHeadPtr.n.u12GALogPtr);
3706 }
3707 /* Guest Virtual-APIC Log Tail Pointer. */
3708 {
3709 GALOG_HEAD_PTR_T const GALogTailPtr = pThis->GALogTailPtr;
3710 pHlp->pfnPrintf(pHlp, " Guest Virtual-APIC Log Tail Pointer = %#RX64 (off: %#x)\n", GALogTailPtr.u64,
3711 GALogTailPtr.n.u12GALogPtr);
3712 }
3713 /* PPR Log B Head Pointer. */
3714 {
3715 PPR_LOG_B_HEAD_PTR_T const PprLogBHeadPtr = pThis->PprLogBHeadPtr;
3716 pHlp->pfnPrintf(pHlp, " PPR Log B Head Pointer = %#RX64 (off: %#x)\n", PprLogBHeadPtr.u64,
3717 PprLogBHeadPtr.n.off);
3718 }
3719 /* PPR Log B Tail Pointer. */
3720 {
3721 PPR_LOG_B_TAIL_PTR_T const PprLogBTailPtr = pThis->PprLogBTailPtr;
3722 pHlp->pfnPrintf(pHlp, " PPR Log B Tail Pointer = %#RX64 (off: %#x)\n", PprLogBTailPtr.u64,
3723 PprLogBTailPtr.n.off);
3724 }
3725 /* Event Log B Head Pointer. */
3726 {
3727 EVT_LOG_B_HEAD_PTR_T const EvtLogBHeadPtr = pThis->EvtLogBHeadPtr;
3728 pHlp->pfnPrintf(pHlp, " Event Log B Head Pointer = %#RX64 (off: %#x)\n", EvtLogBHeadPtr.u64,
3729 EvtLogBHeadPtr.n.off);
3730 }
3731 /* Event Log B Tail Pointer. */
3732 {
3733 EVT_LOG_B_TAIL_PTR_T const EvtLogBTailPtr = pThis->EvtLogBTailPtr;
3734 pHlp->pfnPrintf(pHlp, " Event Log B Tail Pointer = %#RX64 (off: %#x)\n", EvtLogBTailPtr.u64,
3735 EvtLogBTailPtr.n.off);
3736 }
3737 /* PPR Log Auto Response Register. */
3738 {
3739 PPR_LOG_AUTO_RESP_T const PprLogAutoResp = pThis->PprLogAutoResp;
3740 pHlp->pfnPrintf(pHlp, " PPR Log Auto Response Register = %#RX64\n", PprLogAutoResp.u64);
3741 if (fVerbose)
3742 {
3743 pHlp->pfnPrintf(pHlp, " Code = %#x\n", PprLogAutoResp.n.u4AutoRespCode);
3744 pHlp->pfnPrintf(pHlp, " Mask Gen. = %RTbool\n", PprLogAutoResp.n.u1AutoRespMaskGen);
3745 }
3746 }
3747 /* PPR Log Overflow Early Warning Indicator Register. */
3748 {
3749 PPR_LOG_OVERFLOW_EARLY_T const PprLogOverflowEarly = pThis->PprLogOverflowEarly;
3750 pHlp->pfnPrintf(pHlp, " PPR Log overflow early warning = %#RX64\n", PprLogOverflowEarly.u64);
3751 if (fVerbose)
3752 {
3753 pHlp->pfnPrintf(pHlp, " Threshold = %#x\n", PprLogOverflowEarly.n.u15Threshold);
3754 pHlp->pfnPrintf(pHlp, " Interrupt enable = %RTbool\n", PprLogOverflowEarly.n.u1IntrEn);
3755 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PprLogOverflowEarly.n.u1Enable);
3756 }
3757 }
3758 /* PPR Log Overflow Early Warning Indicator Register. */
3759 {
3760 PPR_LOG_OVERFLOW_EARLY_T const PprLogBOverflowEarly = pThis->PprLogBOverflowEarly;
3761 pHlp->pfnPrintf(pHlp, " PPR Log B overflow early warning = %#RX64\n", PprLogBOverflowEarly.u64);
3762 if (fVerbose)
3763 {
3764 pHlp->pfnPrintf(pHlp, " Threshold = %#x\n", PprLogBOverflowEarly.n.u15Threshold);
3765 pHlp->pfnPrintf(pHlp, " Interrupt enable = %RTbool\n", PprLogBOverflowEarly.n.u1IntrEn);
3766 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PprLogBOverflowEarly.n.u1Enable);
3767 }
3768 }
3769}
3770
3771
3772/**
3773 * Dumps the DTE via the info callback helper.
3774 *
3775 * @param pHlp The info helper.
3776 * @param pDte The device table entry.
3777 * @param pszPrefix The string prefix.
3778 */
3779static void iommuAmdR3DbgInfoDteWorker(PCDBGFINFOHLP pHlp, PCDTE_T pDte, const char *pszPrefix)
3780{
3781 AssertReturnVoid(pHlp);
3782 AssertReturnVoid(pDte);
3783 AssertReturnVoid(pszPrefix);
3784
3785 pHlp->pfnPrintf(pHlp, "%sValid = %RTbool\n", pszPrefix, pDte->n.u1Valid);
3786 pHlp->pfnPrintf(pHlp, "%sTranslation Valid = %RTbool\n", pszPrefix, pDte->n.u1TranslationValid);
3787 pHlp->pfnPrintf(pHlp, "%sHost Access Dirty = %#x\n", pszPrefix, pDte->n.u2Had);
3788 pHlp->pfnPrintf(pHlp, "%sPaging Mode = %u\n", pszPrefix, pDte->n.u3Mode);
3789 pHlp->pfnPrintf(pHlp, "%sPage Table Root Ptr = %#RX64 (addr=%#RGp)\n", pszPrefix, pDte->n.u40PageTableRootPtrLo,
3790 pDte->n.u40PageTableRootPtrLo << 12);
3791 pHlp->pfnPrintf(pHlp, "%sPPR enable = %RTbool\n", pszPrefix, pDte->n.u1Ppr);
3792 pHlp->pfnPrintf(pHlp, "%sGuest PPR Resp w/ PASID = %RTbool\n", pszPrefix, pDte->n.u1GstPprRespPasid);
3793 pHlp->pfnPrintf(pHlp, "%sGuest I/O Prot Valid = %RTbool\n", pszPrefix, pDte->n.u1GstIoValid);
3794 pHlp->pfnPrintf(pHlp, "%sGuest Translation Valid = %RTbool\n", pszPrefix, pDte->n.u1GstTranslateValid);
3795 pHlp->pfnPrintf(pHlp, "%sGuest Levels Translated = %#x\n", pszPrefix, pDte->n.u2GstMode);
3796 pHlp->pfnPrintf(pHlp, "%sGuest Root Page Table Ptr = %#x %#x %#x (addr=%#RGp)\n", pszPrefix,
3797 pDte->n.u3GstCr3TableRootPtrLo, pDte->n.u16GstCr3TableRootPtrMid, pDte->n.u21GstCr3TableRootPtrHi,
3798 (pDte->n.u21GstCr3TableRootPtrHi << 31)
3799 | (pDte->n.u16GstCr3TableRootPtrMid << 15)
3800 | (pDte->n.u3GstCr3TableRootPtrLo << 12));
3801 pHlp->pfnPrintf(pHlp, "%sI/O Read = %s\n", pszPrefix, pDte->n.u1IoRead ? "allowed" : "denied");
3802 pHlp->pfnPrintf(pHlp, "%sI/O Write = %s\n", pszPrefix, pDte->n.u1IoWrite ? "allowed" : "denied");
3803 pHlp->pfnPrintf(pHlp, "%sReserved (MBZ) = %#x\n", pszPrefix, pDte->n.u1Rsvd0);
3804 pHlp->pfnPrintf(pHlp, "%sDomain ID = %u (%#x)\n", pszPrefix, pDte->n.u16DomainId, pDte->n.u16DomainId);
3805 pHlp->pfnPrintf(pHlp, "%sIOTLB Enable = %RTbool\n", pszPrefix, pDte->n.u1IoTlbEnable);
3806 pHlp->pfnPrintf(pHlp, "%sSuppress I/O PFs = %RTbool\n", pszPrefix, pDte->n.u1SuppressPfEvents);
3807 pHlp->pfnPrintf(pHlp, "%sSuppress all I/O PFs = %RTbool\n", pszPrefix, pDte->n.u1SuppressAllPfEvents);
3808 pHlp->pfnPrintf(pHlp, "%sPort I/O Control = %#x\n", pszPrefix, pDte->n.u2IoCtl);
3809 pHlp->pfnPrintf(pHlp, "%sIOTLB Cache Hint = %s\n", pszPrefix, pDte->n.u1Cache ? "no caching" : "cache");
3810 pHlp->pfnPrintf(pHlp, "%sSnoop Disable = %RTbool\n", pszPrefix, pDte->n.u1SnoopDisable);
3811 pHlp->pfnPrintf(pHlp, "%sAllow Exclusion = %RTbool\n", pszPrefix, pDte->n.u1AllowExclusion);
3812 pHlp->pfnPrintf(pHlp, "%sSysMgt Message Enable = %RTbool\n", pszPrefix, pDte->n.u2SysMgt);
3813 pHlp->pfnPrintf(pHlp, "\n");
3814
3815 pHlp->pfnPrintf(pHlp, "%sInterrupt Map Valid = %RTbool\n", pszPrefix, pDte->n.u1IntrMapValid);
3816 uint8_t const uIntrTabLen = pDte->n.u4IntrTableLength;
3817 if (uIntrTabLen < IOMMU_DTE_INTR_TAB_LEN_MAX)
3818 {
3819 uint16_t const cEntries = IOMMU_GET_INTR_TAB_ENTRIES(pDte);
3820 uint16_t const cbIntrTable = IOMMU_GET_INTR_TAB_LEN(pDte);
3821 pHlp->pfnPrintf(pHlp, "%sInterrupt Table Length = %#x (%u entries, %u bytes)\n", pszPrefix, uIntrTabLen, cEntries,
3822 cbIntrTable);
3823 }
3824 else
3825 pHlp->pfnPrintf(pHlp, "%sInterrupt Table Length = %#x (invalid!)\n", pszPrefix, uIntrTabLen);
3826 pHlp->pfnPrintf(pHlp, "%sIgnore Unmapped Interrupts = %RTbool\n", pszPrefix, pDte->n.u1IgnoreUnmappedIntrs);
3827 pHlp->pfnPrintf(pHlp, "%sInterrupt Table Root Ptr = %#RX64 (addr=%#RGp)\n", pszPrefix,
3828 pDte->n.u46IntrTableRootPtr, pDte->au64[2] & IOMMU_DTE_IRTE_ROOT_PTR_MASK);
3829 pHlp->pfnPrintf(pHlp, "%sReserved (MBZ) = %#x\n", pszPrefix, pDte->n.u4Rsvd0);
3830 pHlp->pfnPrintf(pHlp, "%sINIT passthru = %RTbool\n", pszPrefix, pDte->n.u1InitPassthru);
3831 pHlp->pfnPrintf(pHlp, "%sExtInt passthru = %RTbool\n", pszPrefix, pDte->n.u1ExtIntPassthru);
3832 pHlp->pfnPrintf(pHlp, "%sNMI passthru = %RTbool\n", pszPrefix, pDte->n.u1NmiPassthru);
3833 pHlp->pfnPrintf(pHlp, "%sReserved (MBZ) = %#x\n", pszPrefix, pDte->n.u1Rsvd2);
3834 pHlp->pfnPrintf(pHlp, "%sInterrupt Control = %#x\n", pszPrefix, pDte->n.u2IntrCtrl);
3835 pHlp->pfnPrintf(pHlp, "%sLINT0 passthru = %RTbool\n", pszPrefix, pDte->n.u1Lint0Passthru);
3836 pHlp->pfnPrintf(pHlp, "%sLINT1 passthru = %RTbool\n", pszPrefix, pDte->n.u1Lint1Passthru);
3837 pHlp->pfnPrintf(pHlp, "%sReserved (MBZ) = %#x\n", pszPrefix, pDte->n.u32Rsvd0);
3838 pHlp->pfnPrintf(pHlp, "%sReserved (MBZ) = %#x\n", pszPrefix, pDte->n.u22Rsvd0);
3839 pHlp->pfnPrintf(pHlp, "%sAttribute Override Valid = %RTbool\n", pszPrefix, pDte->n.u1AttrOverride);
3840 pHlp->pfnPrintf(pHlp, "%sMode0FC = %#x\n", pszPrefix, pDte->n.u1Mode0FC);
3841 pHlp->pfnPrintf(pHlp, "%sSnoop Attribute = %#x\n", pszPrefix, pDte->n.u8SnoopAttr);
3842}
3843
3844
3845/**
3846 * @callback_method_impl{FNDBGFHANDLERDEV}
3847 */
3848static DECLCALLBACK(void) iommuAmdR3DbgInfoDte(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
3849{
3850 if (pszArgs)
3851 {
3852 uint16_t uDevId = 0;
3853 int rc = RTStrToUInt16Full(pszArgs, 0 /* uBase */, &uDevId);
3854 if (RT_SUCCESS(rc))
3855 {
3856 DTE_T Dte;
3857 rc = iommuAmdReadDte(pDevIns, uDevId, IOMMUOP_TRANSLATE_REQ, &Dte);
3858 if (RT_SUCCESS(rc))
3859 {
3860 iommuAmdR3DbgInfoDteWorker(pHlp, &Dte, " ");
3861 return;
3862 }
3863
3864 pHlp->pfnPrintf(pHlp, "Failed to read DTE for device ID %u (%#x). rc=%Rrc\n", uDevId, uDevId, rc);
3865 }
3866 else
3867 pHlp->pfnPrintf(pHlp, "Failed to parse a valid 16-bit device ID. rc=%Rrc\n", rc);
3868 }
3869 else
3870 pHlp->pfnPrintf(pHlp, "Missing device ID.\n");
3871}
3872
3873
3874#if 0
3875/**
3876 * @callback_method_impl{FNDBGFHANDLERDEV}
3877 */
3878static DECLCALLBACK(void) iommuAmdR3DbgInfoDevTabs(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
3879{
3880 RT_NOREF(pszArgs);
3881
3882 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3883 PCPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3884 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3885
3886 uint8_t cTables = 0;
3887 for (uint8_t i = 0; i < RT_ELEMENTS(pThis->aDevTabBaseAddrs); i++)
3888 {
3889 DEV_TAB_BAR_T DevTabBar = pThis->aDevTabBaseAddrs[i];
3890 RTGCPHYS const GCPhysDevTab = DevTabBar.n.u40Base << X86_PAGE_4K_SHIFT;
3891 if (GCPhysDevTab)
3892 ++cTables;
3893 }
3894
3895 pHlp->pfnPrintf(pHlp, "AMD-IOMMU Device Tables:\n");
3896 pHlp->pfnPrintf(pHlp, " Tables active: %u\n", cTables);
3897 if (!cTables)
3898 return;
3899
3900 for (uint8_t i = 0; i < RT_ELEMENTS(pThis->aDevTabBaseAddrs); i++)
3901 {
3902 DEV_TAB_BAR_T DevTabBar = pThis->aDevTabBaseAddrs[i];
3903 RTGCPHYS const GCPhysDevTab = DevTabBar.n.u40Base << X86_PAGE_4K_SHIFT;
3904 if (GCPhysDevTab)
3905 {
3906 uint32_t const cbDevTab = IOMMU_GET_DEV_TAB_LEN(&DevTabBar);
3907 uint32_t const cDtes = cbDevTab / sizeof(DTE_T);
3908 pHlp->pfnPrintf(pHlp, " Table %u (base=%#RGp size=%u bytes entries=%u):\n", i, GCPhysDevTab, cbDevTab, cDtes);
3909
3910 void *pvDevTab = RTMemAllocZ(cbDevTab);
3911 if (RT_LIKELY(pvDevTab))
3912 {
3913 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysDevTab, pvDevTab, cbDevTab);
3914 if (RT_SUCCESS(rc))
3915 {
3916 for (uint32_t idxDte = 0; idxDte < cDtes; idxDte++)
3917 {
3918 PCDTE_T pDte = (PCDTE_T)((char *)pvDevTab + idxDte * sizeof(DTE_T));
3919 if ( pDte->n.u1Valid
3920 || pDte->n.u1IntrMapValid)
3921 {
3922 pHlp->pfnPrintf(pHlp, " DTE %u:\n", idxDte);
3923 iommuAmdR3DbgInfoDteWorker(pHlp, pDte, " ");
3924 }
3925 }
3926 pHlp->pfnPrintf(pHlp, "\n");
3927 }
3928 else
3929 {
3930 pHlp->pfnPrintf(pHlp, " Failed to read table at %#RGp of size %u bytes. rc=%Rrc!\n", GCPhysDevTab,
3931 cbDevTab, rc);
3932 }
3933
3934 RTMemFree(pvDevTab);
3935 }
3936 else
3937 {
3938 pHlp->pfnPrintf(pHlp, " Allocating %u bytes for reading the device table failed!\n", cbDevTab);
3939 return;
3940 }
3941 }
3942 }
3943}
3944#endif
3945
3946/**
3947 * @callback_method_impl{FNSSMDEVSAVEEXEC}
3948 */
3949static DECLCALLBACK(int) iommuAmdR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
3950{
3951 /** @todo IOMMU: Save state. */
3952 RT_NOREF2(pDevIns, pSSM);
3953 LogFlowFunc(("\n"));
3954 return VERR_NOT_IMPLEMENTED;
3955}
3956
3957
3958/**
3959 * @callback_method_impl{FNSSMDEVLOADEXEC}
3960 */
3961static DECLCALLBACK(int) iommuAmdR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3962{
3963 /** @todo IOMMU: Load state. */
3964 RT_NOREF4(pDevIns, pSSM, uVersion, uPass);
3965 LogFlowFunc(("\n"));
3966 return VERR_NOT_IMPLEMENTED;
3967}
3968
3969
3970/**
3971 * @interface_method_impl{PDMDEVREG,pfnReset}
3972 */
3973static DECLCALLBACK(void) iommuAmdR3Reset(PPDMDEVINS pDevIns)
3974{
3975 /*
3976 * Resets read-write portion of the IOMMU state.
3977 *
3978 * State data not initialized here is expected to be initialized during
3979 * device construction and remain read-only through the lifetime of the VM.
3980 */
3981 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3982 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3983 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3984
3985 IOMMU_LOCK_NORET(pDevIns);
3986
3987 LogFlowFunc(("\n"));
3988
3989 memset(&pThis->aDevTabBaseAddrs[0], 0, sizeof(pThis->aDevTabBaseAddrs));
3990
3991 pThis->CmdBufBaseAddr.u64 = 0;
3992 pThis->CmdBufBaseAddr.n.u4Len = 8;
3993
3994 pThis->EvtLogBaseAddr.u64 = 0;
3995 pThis->EvtLogBaseAddr.n.u4Len = 8;
3996
3997 pThis->Ctrl.u64 = 0;
3998 pThis->Ctrl.n.u1Coherent = 1;
3999 Assert(!pThis->ExtFeat.n.u1BlockStopMarkSup);
4000
4001 pThis->ExclRangeBaseAddr.u64 = 0;
4002 pThis->ExclRangeLimit.u64 = 0;
4003
4004 pThis->PprLogBaseAddr.u64 = 0;
4005 pThis->PprLogBaseAddr.n.u4Len = 8;
4006
4007 pThis->HwEvtHi.u64 = 0;
4008 pThis->HwEvtLo = 0;
4009 pThis->HwEvtStatus.u64 = 0;
4010
4011 pThis->GALogBaseAddr.u64 = 0;
4012 pThis->GALogBaseAddr.n.u4Len = 8;
4013 pThis->GALogTailAddr.u64 = 0;
4014
4015 pThis->PprLogBBaseAddr.u64 = 0;
4016 pThis->PprLogBBaseAddr.n.u4Len = 8;
4017
4018 pThis->EvtLogBBaseAddr.u64 = 0;
4019 pThis->EvtLogBBaseAddr.n.u4Len = 8;
4020
4021 pThis->PerfOptCtrl.u32 = 0;
4022
4023 pThis->XtGenIntrCtrl.u64 = 0;
4024 pThis->XtPprIntrCtrl.u64 = 0;
4025 pThis->XtGALogIntrCtrl.u64 = 0;
4026
4027 memset(&pThis->aMarcApers[0], 0, sizeof(pThis->aMarcApers));
4028
4029 pThis->CmdBufHeadPtr.u64 = 0;
4030 pThis->CmdBufTailPtr.u64 = 0;
4031 pThis->EvtLogHeadPtr.u64 = 0;
4032 pThis->EvtLogTailPtr.u64 = 0;
4033
4034 pThis->Status.u64 = 0;
4035
4036 pThis->PprLogHeadPtr.u64 = 0;
4037 pThis->PprLogTailPtr.u64 = 0;
4038
4039 pThis->GALogHeadPtr.u64 = 0;
4040 pThis->GALogTailPtr.u64 = 0;
4041
4042 pThis->PprLogBHeadPtr.u64 = 0;
4043 pThis->PprLogBTailPtr.u64 = 0;
4044
4045 pThis->EvtLogBHeadPtr.u64 = 0;
4046 pThis->EvtLogBTailPtr.u64 = 0;
4047
4048 pThis->PprLogAutoResp.u64 = 0;
4049 pThis->PprLogOverflowEarly.u64 = 0;
4050 pThis->PprLogBOverflowEarly.u64 = 0;
4051
4052 pThis->IommuBar.u64 = 0;
4053 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_LO, 0);
4054 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_HI, 0);
4055
4056 PDMPciDevSetCommand(pPciDev, VBOX_PCI_COMMAND_MASTER);
4057
4058 IOMMU_UNLOCK(pDevIns);
4059}
4060
4061
4062/**
4063 * @interface_method_impl{PDMDEVREG,pfnDestruct}
4064 */
4065static DECLCALLBACK(int) iommuAmdR3Destruct(PPDMDEVINS pDevIns)
4066{
4067 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
4068 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4069 LogFlowFunc(("\n"));
4070
4071 /* Close the command thread semaphore. */
4072 if (pThis->hEvtCmdThread != NIL_SUPSEMEVENT)
4073 {
4074 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEvtCmdThread);
4075 pThis->hEvtCmdThread = NIL_SUPSEMEVENT;
4076 }
4077 return VINF_SUCCESS;
4078}
4079
4080
4081/**
4082 * @interface_method_impl{PDMDEVREG,pfnConstruct}
4083 */
4084static DECLCALLBACK(int) iommuAmdR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
4085{
4086 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
4087 RT_NOREF(pCfg);
4088
4089 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4090 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC);
4091 pThisCC->pDevInsR3 = pDevIns;
4092
4093 LogFlowFunc(("iInstance=%d\n", iInstance));
4094
4095 /*
4096 * Register the IOMMU with PDM.
4097 */
4098 PDMIOMMUREGR3 IommuReg;
4099 RT_ZERO(IommuReg);
4100 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
4101 IommuReg.pfnMemRead = iommuAmdDeviceMemRead;
4102 IommuReg.pfnMemWrite = iommuAmdDeviceMemWrite;
4103 IommuReg.pfnMsiRemap = iommuAmdDeviceMsiRemap;
4104 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
4105 int rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
4106 if (RT_FAILURE(rc))
4107 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
4108 if (pThisCC->CTX_SUFF(pIommuHlp)->u32Version != PDM_IOMMUHLPR3_VERSION)
4109 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
4110 N_("IOMMU helper version mismatch; got %#x expected %#x"),
4111 pThisCC->CTX_SUFF(pIommuHlp)->u32Version, PDM_IOMMUHLPR3_VERSION);
4112 if (pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd != PDM_IOMMUHLPR3_VERSION)
4113 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
4114 N_("IOMMU helper end-version mismatch; got %#x expected %#x"),
4115 pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd, PDM_IOMMUHLPR3_VERSION);
4116
4117 /*
4118 * Initialize read-only PCI configuration space.
4119 */
4120 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
4121 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
4122
4123 /* Header. */
4124 PDMPciDevSetVendorId(pPciDev, IOMMU_PCI_VENDOR_ID); /* AMD */
4125 PDMPciDevSetDeviceId(pPciDev, IOMMU_PCI_DEVICE_ID); /* VirtualBox IOMMU device */
4126 PDMPciDevSetCommand(pPciDev, VBOX_PCI_COMMAND_MASTER); /* Enable bus master (as we directly access main memory) */
4127 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST); /* Capability list supported */
4128 PDMPciDevSetRevisionId(pPciDev, IOMMU_PCI_REVISION_ID); /* VirtualBox specific device implementation revision */
4129 PDMPciDevSetClassBase(pPciDev, VBOX_PCI_CLASS_SYSTEM); /* System Base Peripheral */
4130 PDMPciDevSetClassSub(pPciDev, VBOX_PCI_SUB_SYSTEM_IOMMU); /* IOMMU */
4131 PDMPciDevSetClassProg(pPciDev, 0x0); /* IOMMU Programming interface */
4132 PDMPciDevSetHeaderType(pPciDev, 0x0); /* Single function, type 0 */
4133 PDMPciDevSetSubSystemId(pPciDev, IOMMU_PCI_DEVICE_ID); /* AMD */
4134 PDMPciDevSetSubSystemVendorId(pPciDev, IOMMU_PCI_VENDOR_ID); /* VirtualBox IOMMU device */
4135 PDMPciDevSetCapabilityList(pPciDev, IOMMU_PCI_OFF_CAP_HDR); /* Offset into capability registers */
4136 PDMPciDevSetInterruptPin(pPciDev, 0x1); /* INTA#. */
4137 PDMPciDevSetInterruptLine(pPciDev, 0x0); /* For software compatibility; no effect on hardware */
4138
4139 /* Capability Header. */
4140 /* NOTE! Fields (e.g, EFR) must match what we expose in the ACPI tables. */
4141 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_CAP_HDR,
4142 RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_ID, 0xf) /* RO - Secure Device capability block */
4143 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_PTR, IOMMU_PCI_OFF_MSI_CAP_HDR) /* RO - Offset to next capability */
4144 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_TYPE, 0x3) /* RO - IOMMU capability block */
4145 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_REV, 0x1) /* RO - IOMMU interface revision */
4146 | RT_BF_MAKE(IOMMU_BF_CAPHDR_IOTLB_SUP, 0x0) /* RO - Remote IOTLB support */
4147 | RT_BF_MAKE(IOMMU_BF_CAPHDR_HT_TUNNEL, 0x0) /* RO - HyperTransport Tunnel support */
4148 | RT_BF_MAKE(IOMMU_BF_CAPHDR_NP_CACHE, 0x0) /* RO - Cache NP page table entries */
4149 | RT_BF_MAKE(IOMMU_BF_CAPHDR_EFR_SUP, 0x1) /* RO - Extended Feature Register support */
4150 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_EXT, 0x1)); /* RO - Misc. Information Register support */
4151
4152 /* Base Address Register. */
4153 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_LO, 0x0); /* RW - Base address (Lo) and enable bit */
4154 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_HI, 0x0); /* RW - Base address (Hi) */
4155
4156 /* IOMMU Range Register. */
4157 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_RANGE_REG, 0x0); /* RW - Range register (implemented as RO by us) */
4158
4159 /* Misc. Information Register. */
4160 /* NOTE! Fields (e.g, GVA size) must match what we expose in the ACPI tables. */
4161 uint32_t const uMiscInfoReg0 = RT_BF_MAKE(IOMMU_BF_MISCINFO_0_MSI_NUM, 0) /* RO - MSI number */
4162 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_GVA_SIZE, 2) /* RO - Guest Virt. Addr size (2=48 bits) */
4163 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_PA_SIZE, 48) /* RO - Physical Addr size (48 bits) */
4164 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_VA_SIZE, 64) /* RO - Virt. Addr size (64 bits) */
4165 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_HT_ATS_RESV, 0) /* RW - HT ATS reserved */
4166 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_MSI_NUM_PPR, 0); /* RW - PPR interrupt number */
4167 uint32_t const uMiscInfoReg1 = 0;
4168 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MISCINFO_REG_0, uMiscInfoReg0);
4169 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MISCINFO_REG_1, uMiscInfoReg1);
4170
4171 /* MSI Capability Header register. */
4172 PDMMSIREG MsiReg;
4173 RT_ZERO(MsiReg);
4174 MsiReg.cMsiVectors = 1;
4175 MsiReg.iMsiCapOffset = IOMMU_PCI_OFF_MSI_CAP_HDR;
4176 MsiReg.iMsiNextOffset = 0; /* IOMMU_PCI_OFF_MSI_MAP_CAP_HDR */
4177 MsiReg.fMsi64bit = 1; /* 64-bit addressing support is mandatory; See AMD spec. 2.8 "IOMMU Interrupt Support". */
4178
4179 /* MSI Address (Lo, Hi) and MSI data are read-write PCI config registers handled by our generic PCI config space code. */
4180#if 0
4181 /* MSI Address Lo. */
4182 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO, 0); /* RW - MSI message address (Lo) */
4183 /* MSI Address Hi. */
4184 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI, 0); /* RW - MSI message address (Hi) */
4185 /* MSI Data. */
4186 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA, 0); /* RW - MSI data */
4187#endif
4188
4189#if 0
4190 /** @todo IOMMU: I don't know if we need to support this, enable later if
4191 * required. */
4192 /* MSI Mapping Capability Header register. */
4193 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_MAP_CAP_HDR,
4194 RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_ID, 0x8) /* RO - Capability ID */
4195 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_PTR, 0x0) /* RO - Offset to next capability (NULL) */
4196 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_EN, 0x1) /* RO - MSI mapping capability enable */
4197 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_FIXED, 0x1) /* RO - MSI mapping range is fixed */
4198 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_TYPE, 0x15)); /* RO - MSI mapping capability */
4199 /* When implementing don't forget to copy this to its MMIO shadow register (MsiMapCapHdr) in iommuAmdR3Init. */
4200#endif
4201
4202 /*
4203 * Register the PCI function with PDM.
4204 */
4205 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
4206 AssertLogRelRCReturn(rc, rc);
4207
4208 /*
4209 * Register MSI support for the PCI device.
4210 * This must be done -after- register it as a PCI device!
4211 */
4212 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
4213 AssertRCReturn(rc, rc);
4214
4215 /*
4216 * Intercept PCI config. space accesses.
4217 */
4218 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, iommuAmdR3PciConfigRead, iommuAmdR3PciConfigWrite);
4219 AssertLogRelRCReturn(rc, rc);
4220
4221 /*
4222 * Create the MMIO region.
4223 * Mapping of the region is done when software configures it via PCI config space.
4224 */
4225 rc = PDMDevHlpMmioCreate(pDevIns, IOMMU_MMIO_REGION_SIZE, pPciDev, 0 /* iPciRegion */, iommuAmdMmioWrite, iommuAmdMmioRead,
4226 NULL /* pvUser */, IOMMMIO_FLAGS_READ_DWORD_QWORD | IOMMMIO_FLAGS_WRITE_DWORD_QWORD_ZEROED,
4227 "AMD-IOMMU", &pThis->hMmio);
4228 AssertLogRelRCReturn(rc, rc);
4229
4230 /*
4231 * Register saved state.
4232 */
4233 rc = PDMDevHlpSSMRegisterEx(pDevIns, IOMMU_SAVED_STATE_VERSION, sizeof(IOMMU), NULL,
4234 NULL, NULL, NULL,
4235 NULL, iommuAmdR3SaveExec, NULL,
4236 NULL, iommuAmdR3LoadExec, NULL);
4237 AssertLogRelRCReturn(rc, rc);
4238
4239 /*
4240 * Register debugger info items.
4241 */
4242 PDMDevHlpDBGFInfoRegister(pDevIns, "iommu", "Display IOMMU state.", iommuAmdR3DbgInfo);
4243 PDMDevHlpDBGFInfoRegister(pDevIns, "iommudte", "Display the DTE for a device. Arguments: DeviceID.", iommuAmdR3DbgInfoDte);
4244#if 0
4245 PDMDevHlpDBGFInfoRegister(pDevIns, "iommudevtabs", "Display IOMMU device tables.", iommuAmdR3DbgInfoDevTabs);
4246#endif
4247
4248# ifdef VBOX_WITH_STATISTICS
4249 /*
4250 * Statistics.
4251 */
4252 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadR3, STAMTYPE_COUNTER, "R3/MmioReadR3", STAMUNIT_OCCURENCES, "Number of MMIO reads in R3");
4253 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadRZ, STAMTYPE_COUNTER, "RZ/MmioReadRZ", STAMUNIT_OCCURENCES, "Number of MMIO reads in RZ.");
4254
4255 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteR3, STAMTYPE_COUNTER, "R3/MmioWriteR3", STAMUNIT_OCCURENCES, "Number of MMIO writes in R3.");
4256 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteRZ, STAMTYPE_COUNTER, "RZ/MmioWriteRZ", STAMUNIT_OCCURENCES, "Number of MMIO writes in RZ.");
4257
4258 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapR3, STAMTYPE_COUNTER, "R3/MsiRemapR3", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in R3.");
4259 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRZ, STAMTYPE_COUNTER, "RZ/MsiRemapRZ", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in RZ.");
4260
4261 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmd, STAMTYPE_COUNTER, "R3/Commands", STAMUNIT_OCCURENCES, "Number of commands processed (total).");
4262 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdCompWait, STAMTYPE_COUNTER, "R3/Commands/CompWait", STAMUNIT_OCCURENCES, "Number of Completion Wait commands processed.");
4263 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvDte, STAMTYPE_COUNTER, "R3/Commands/InvDte", STAMUNIT_OCCURENCES, "Number of Invalidate DTE commands processed.");
4264 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIommuPages, STAMTYPE_COUNTER, "R3/Commands/InvIommuPages", STAMUNIT_OCCURENCES, "Number of Invalidate IOMMU Pages commands processed.");
4265 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIotlbPages, STAMTYPE_COUNTER, "R3/Commands/InvIotlbPages", STAMUNIT_OCCURENCES, "Number of Invalidate IOTLB Pages commands processed.");
4266 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIntrTable, STAMTYPE_COUNTER, "R3/Commands/InvIntrTable", STAMUNIT_OCCURENCES, "Number of Invalidate Interrupt Table commands processed.");
4267 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdPrefIommuPages, STAMTYPE_COUNTER, "R3/Commands/PrefIommuPages", STAMUNIT_OCCURENCES, "Number of Prefetch IOMMU Pages commands processed.");
4268 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdCompletePprReq, STAMTYPE_COUNTER, "R3/Commands/CompletePprReq", STAMUNIT_OCCURENCES, "Number of Complete PPR Requests commands processed.");
4269 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIommuAll, STAMTYPE_COUNTER, "R3/Commands/InvIommuAll", STAMUNIT_OCCURENCES, "Number of Invalidate IOMMU All commands processed.");
4270# endif
4271
4272 /*
4273 * Create the command thread and its event semaphore.
4274 */
4275 char szDevIommu[64];
4276 RT_ZERO(szDevIommu);
4277 RTStrPrintf(szDevIommu, sizeof(szDevIommu), "IOMMU-%u", iInstance);
4278 rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->pCmdThread, pThis, iommuAmdR3CmdThread, iommuAmdR3CmdThreadWakeUp,
4279 0 /* cbStack */, RTTHREADTYPE_IO, szDevIommu);
4280 AssertLogRelRCReturn(rc, rc);
4281
4282 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEvtCmdThread);
4283 AssertLogRelRCReturn(rc, rc);
4284
4285 /*
4286 * Initialize read-only registers.
4287 * NOTE! Fields here must match their corresponding field in the ACPI tables.
4288 */
4289 /** @todo Don't remove the =0 assignment for now. It's just there so it's easier
4290 * for me to see existing features that we might want to implement. Do it
4291 * later. */
4292 pThis->ExtFeat.u64 = 0;
4293 pThis->ExtFeat.n.u1PrefetchSup = 0;
4294 pThis->ExtFeat.n.u1PprSup = 0;
4295 pThis->ExtFeat.n.u1X2ApicSup = 0;
4296 pThis->ExtFeat.n.u1NoExecuteSup = 0;
4297 pThis->ExtFeat.n.u1GstTranslateSup = 0;
4298 pThis->ExtFeat.n.u1InvAllSup = 1;
4299 pThis->ExtFeat.n.u1GstVirtApicSup = 0;
4300 pThis->ExtFeat.n.u1HwErrorSup = 1;
4301 pThis->ExtFeat.n.u1PerfCounterSup = 0;
4302 AssertCompile((IOMMU_MAX_HOST_PT_LEVEL & 0x3) < 3);
4303 pThis->ExtFeat.n.u2HostAddrTranslateSize = (IOMMU_MAX_HOST_PT_LEVEL & 0x3);
4304 pThis->ExtFeat.n.u2GstAddrTranslateSize = 0; /* Requires GstTranslateSup */
4305 pThis->ExtFeat.n.u2GstCr3RootTblLevel = 0; /* Requires GstTranslateSup */
4306 pThis->ExtFeat.n.u2SmiFilterSup = 0;
4307 pThis->ExtFeat.n.u3SmiFilterCount = 0;
4308 pThis->ExtFeat.n.u3GstVirtApicModeSup = 0; /* Requires GstVirtApicSup */
4309 pThis->ExtFeat.n.u2DualPprLogSup = 0;
4310 pThis->ExtFeat.n.u2DualEvtLogSup = 0;
4311 pThis->ExtFeat.n.u5MaxPasidSup = 0; /* Requires GstTranslateSup */
4312 pThis->ExtFeat.n.u1UserSupervisorSup = 0;
4313 AssertCompile(IOMMU_MAX_DEV_TAB_SEGMENTS <= 3);
4314 pThis->ExtFeat.n.u2DevTabSegSup = IOMMU_MAX_DEV_TAB_SEGMENTS;
4315 pThis->ExtFeat.n.u1PprLogOverflowWarn = 0;
4316 pThis->ExtFeat.n.u1PprAutoRespSup = 0;
4317 pThis->ExtFeat.n.u2MarcSup = 0;
4318 pThis->ExtFeat.n.u1BlockStopMarkSup = 0;
4319 pThis->ExtFeat.n.u1PerfOptSup = 0;
4320 pThis->ExtFeat.n.u1MsiCapMmioSup = 1;
4321 pThis->ExtFeat.n.u1GstIoSup = 0;
4322 pThis->ExtFeat.n.u1HostAccessSup = 0;
4323 pThis->ExtFeat.n.u1EnhancedPprSup = 0;
4324 pThis->ExtFeat.n.u1AttrForwardSup = 0;
4325 pThis->ExtFeat.n.u1HostDirtySup = 0;
4326 pThis->ExtFeat.n.u1InvIoTlbTypeSup = 0;
4327 pThis->ExtFeat.n.u1GstUpdateDisSup = 0;
4328 pThis->ExtFeat.n.u1ForcePhysDstSup = 0;
4329
4330 pThis->RsvdReg = 0;
4331
4332 pThis->DevSpecificFeat.u64 = 0;
4333 pThis->DevSpecificFeat.n.u4RevMajor = IOMMU_DEVSPEC_FEAT_MAJOR_VERSION;
4334 pThis->DevSpecificFeat.n.u4RevMinor = IOMMU_DEVSPEC_FEAT_MINOR_VERSION;
4335
4336 pThis->DevSpecificCtrl.u64 = 0;
4337 pThis->DevSpecificCtrl.n.u4RevMajor = IOMMU_DEVSPEC_CTRL_MAJOR_VERSION;
4338 pThis->DevSpecificCtrl.n.u4RevMinor = IOMMU_DEVSPEC_CTRL_MINOR_VERSION;
4339
4340 pThis->DevSpecificStatus.u64 = 0;
4341 pThis->DevSpecificStatus.n.u4RevMajor = IOMMU_DEVSPEC_STATUS_MAJOR_VERSION;
4342 pThis->DevSpecificStatus.n.u4RevMinor = IOMMU_DEVSPEC_STATUS_MINOR_VERSION;
4343
4344 pThis->MiscInfo.u64 = RT_MAKE_U64(uMiscInfoReg0, uMiscInfoReg1);
4345
4346 /*
4347 * Initialize parts of the IOMMU state as it would during reset.
4348 * Must be called -after- initializing PCI config. space registers.
4349 */
4350 iommuAmdR3Reset(pDevIns);
4351
4352 return VINF_SUCCESS;
4353}
4354
4355# else /* !IN_RING3 */
4356
4357/**
4358 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
4359 */
4360static DECLCALLBACK(int) iommuAmdRZConstruct(PPDMDEVINS pDevIns)
4361{
4362 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
4363 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4364 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC);
4365
4366 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
4367
4368 /* Set up the MMIO RZ handlers. */
4369 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, iommuAmdMmioWrite, iommuAmdMmioRead, NULL /* pvUser */);
4370 AssertRCReturn(rc, rc);
4371
4372 /* Set up the IOMMU RZ callbacks. */
4373 PDMIOMMUREGCC IommuReg;
4374 RT_ZERO(IommuReg);
4375 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
4376 IommuReg.idxIommu = pThis->idxIommu;
4377 IommuReg.pfnMemRead = iommuAmdDeviceMemRead;
4378 IommuReg.pfnMemWrite = iommuAmdDeviceMemWrite;
4379 IommuReg.pfnMsiRemap = iommuAmdDeviceMsiRemap;
4380 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
4381 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp));
4382 AssertRCReturn(rc, rc);
4383
4384 return VINF_SUCCESS;
4385}
4386
4387# endif /* !IN_RING3 */
4388
4389/**
4390 * The device registration structure.
4391 */
4392const PDMDEVREG g_DeviceIommuAmd =
4393{
4394 /* .u32Version = */ PDM_DEVREG_VERSION,
4395 /* .uReserved0 = */ 0,
4396 /* .szName = */ "iommu-amd",
4397 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
4398 /* .fClass = */ PDM_DEVREG_CLASS_PCI_BUILTIN,
4399 /* .cMaxInstances = */ ~0U,
4400 /* .uSharedVersion = */ 42,
4401 /* .cbInstanceShared = */ sizeof(IOMMU),
4402 /* .cbInstanceCC = */ sizeof(IOMMUCC),
4403 /* .cbInstanceRC = */ sizeof(IOMMURC),
4404 /* .cMaxPciDevices = */ 1,
4405 /* .cMaxMsixVectors = */ 0,
4406 /* .pszDescription = */ "IOMMU (AMD)",
4407#if defined(IN_RING3)
4408 /* .pszRCMod = */ "VBoxDDRC.rc",
4409 /* .pszR0Mod = */ "VBoxDDR0.r0",
4410 /* .pfnConstruct = */ iommuAmdR3Construct,
4411 /* .pfnDestruct = */ iommuAmdR3Destruct,
4412 /* .pfnRelocate = */ NULL,
4413 /* .pfnMemSetup = */ NULL,
4414 /* .pfnPowerOn = */ NULL,
4415 /* .pfnReset = */ iommuAmdR3Reset,
4416 /* .pfnSuspend = */ NULL,
4417 /* .pfnResume = */ NULL,
4418 /* .pfnAttach = */ NULL,
4419 /* .pfnDetach = */ NULL,
4420 /* .pfnQueryInterface = */ NULL,
4421 /* .pfnInitComplete = */ NULL,
4422 /* .pfnPowerOff = */ NULL,
4423 /* .pfnSoftReset = */ NULL,
4424 /* .pfnReserved0 = */ NULL,
4425 /* .pfnReserved1 = */ NULL,
4426 /* .pfnReserved2 = */ NULL,
4427 /* .pfnReserved3 = */ NULL,
4428 /* .pfnReserved4 = */ NULL,
4429 /* .pfnReserved5 = */ NULL,
4430 /* .pfnReserved6 = */ NULL,
4431 /* .pfnReserved7 = */ NULL,
4432#elif defined(IN_RING0)
4433 /* .pfnEarlyConstruct = */ NULL,
4434 /* .pfnConstruct = */ iommuAmdRZConstruct,
4435 /* .pfnDestruct = */ NULL,
4436 /* .pfnFinalDestruct = */ NULL,
4437 /* .pfnRequest = */ NULL,
4438 /* .pfnReserved0 = */ NULL,
4439 /* .pfnReserved1 = */ NULL,
4440 /* .pfnReserved2 = */ NULL,
4441 /* .pfnReserved3 = */ NULL,
4442 /* .pfnReserved4 = */ NULL,
4443 /* .pfnReserved5 = */ NULL,
4444 /* .pfnReserved6 = */ NULL,
4445 /* .pfnReserved7 = */ NULL,
4446#elif defined(IN_RC)
4447 /* .pfnConstruct = */ iommuAmdRZConstruct,
4448 /* .pfnReserved0 = */ NULL,
4449 /* .pfnReserved1 = */ NULL,
4450 /* .pfnReserved2 = */ NULL,
4451 /* .pfnReserved3 = */ NULL,
4452 /* .pfnReserved4 = */ NULL,
4453 /* .pfnReserved5 = */ NULL,
4454 /* .pfnReserved6 = */ NULL,
4455 /* .pfnReserved7 = */ NULL,
4456#else
4457# error "Not in IN_RING3, IN_RING0 or IN_RC!"
4458#endif
4459 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
4460};
4461
4462#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
4463
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette