VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp@ 86084

Last change on this file since 86084 was 86084, checked in by vboxsync, 4 years ago

AMD IOMMU: bugref:9654 Don't forget to multiply the size of each DTE entry when indexing into the DTE!
Also when device segmentation is used we need to shift by 15, 14 or 13 bits,
shifting always by 13 would include erroneous bits from the device ID.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 183.6 KB
Line 
1/* $Id: DevIommuAmd.cpp 86084 2020-09-10 13:12:30Z vboxsync $ */
2/** @file
3 * IOMMU - Input/Output Memory Management Unit - AMD implementation.
4 */
5
6/*
7 * Copyright (C) 2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_IOMMU
23#include <VBox/msi.h>
24#include <VBox/iommu-amd.h>
25#include <VBox/vmm/pdmdev.h>
26#include <VBox/AssertGuest.h>
27
28#include <iprt/x86.h>
29#include <iprt/string.h>
30
31#include "VBoxDD.h"
32#include "DevIommuAmd.h"
33
34
35/*********************************************************************************************************************************
36* Defined Constants And Macros *
37*********************************************************************************************************************************/
38/** Release log prefix string. */
39#define IOMMU_LOG_PFX "IOMMU-AMD"
40/** The current saved state version. */
41#define IOMMU_SAVED_STATE_VERSION 1
42/** The IOTLB entry magic. */
43#define IOMMU_IOTLBE_MAGIC 0x10acce55
44
45
46/*********************************************************************************************************************************
47* Structures and Typedefs *
48*********************************************************************************************************************************/
49/**
50 * Acquires the IOMMU PDM lock.
51 * This will make a long jump to ring-3 to acquire the lock if necessary.
52 */
53#define IOMMU_LOCK(a_pDevIns) \
54 do { \
55 int rcLock = PDMDevHlpCritSectEnter((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo), VINF_SUCCESS); \
56 if (RT_LIKELY(rcLock == VINF_SUCCESS)) \
57 { /* likely */ } \
58 else \
59 return rcLock; \
60 } while (0)
61
62/**
63 * Acquires the IOMMU PDM lock (asserts on failure rather than returning an error).
64 * This will make a long jump to ring-3 to acquire the lock if necessary.
65 */
66#define IOMMU_LOCK_NORET(a_pDevIns) \
67 do { \
68 int rcLock = PDMDevHlpCritSectEnter((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo), VINF_SUCCESS); \
69 AssertRC(rcLock); \
70 } while (0)
71
72/**
73 * Releases the IOMMU PDM lock.
74 */
75#define IOMMU_UNLOCK(a_pDevIns) \
76 do { \
77 PDMDevHlpCritSectLeave((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo)); \
78 } while (0)
79
80/**
81 * Asserts that the critsect is owned by this thread.
82 */
83#define IOMMU_ASSERT_LOCKED(a_pDevIns) \
84 do { \
85 Assert(PDMDevHlpCritSectIsOwner(pDevIns, pDevIns->CTX_SUFF(pCritSectRo))); \
86 } while (0)
87
88/**
89 * Asserts that the critsect is not owned by this thread.
90 */
91#define IOMMU_ASSERT_NOT_LOCKED(a_pDevIns) \
92 do { \
93 Assert(!PDMDevHlpCritSectIsOwner(pDevIns, pDevIns->CTX_SUFF(pCritSectRo))); \
94 } while (0)
95
96/**
97 * IOMMU operations (transaction) types.
98 */
99typedef enum IOMMUOP
100{
101 /** Address translation request. */
102 IOMMUOP_TRANSLATE_REQ = 0,
103 /** Memory read request. */
104 IOMMUOP_MEM_READ,
105 /** Memory write request. */
106 IOMMUOP_MEM_WRITE,
107 /** Interrupt request. */
108 IOMMUOP_INTR_REQ,
109 /** Command. */
110 IOMMUOP_CMD
111} IOMMUOP;
112AssertCompileSize(IOMMUOP, 4);
113
114/**
115 * I/O page walk result.
116 */
117typedef struct
118{
119 /** The translated system physical address. */
120 RTGCPHYS GCPhysSpa;
121 /** The number of offset bits in the system physical address. */
122 uint8_t cShift;
123 /** The I/O permissions allowed by the translation (IOMMU_IO_PERM_XXX). */
124 uint8_t fIoPerm;
125 /** Padding. */
126 uint8_t abPadding[2];
127} IOWALKRESULT;
128/** Pointer to an I/O walk result struct. */
129typedef IOWALKRESULT *PIOWALKRESULT;
130/** Pointer to a const I/O walk result struct. */
131typedef IOWALKRESULT *PCIOWALKRESULT;
132
133/**
134 * IOMMU I/O TLB Entry.
135 * Keep this as small and aligned as possible.
136 */
137typedef struct
138{
139 /** The translated system physical address (SPA) of the page. */
140 RTGCPHYS GCPhysSpa;
141 /** The index of the 4K page within a large page. */
142 uint32_t idxSubPage;
143 /** The I/O access permissions (IOMMU_IO_PERM_XXX). */
144 uint8_t fIoPerm;
145 /** The number of offset bits in the translation indicating page size. */
146 uint8_t cShift;
147 /** Alignment padding. */
148 uint8_t afPadding[2];
149} IOTLBE_T;
150AssertCompileSize(IOTLBE_T, 16);
151/** Pointer to an IOMMU I/O TLB entry struct. */
152typedef IOTLBE_T *PIOTLBE_T;
153/** Pointer to a const IOMMU I/O TLB entry struct. */
154typedef IOTLBE_T const *PCIOTLBE_T;
155
156/**
157 * The shared IOMMU device state.
158 */
159typedef struct IOMMU
160{
161 /** IOMMU device index (0 is at the top of the PCI tree hierarchy). */
162 uint32_t idxIommu;
163 /** Alignment padding. */
164 uint32_t uPadding0;
165
166 /** Whether the command thread is sleeping. */
167 bool volatile fCmdThreadSleeping;
168 /** Alignment padding. */
169 uint8_t afPadding0[3];
170 /** Whether the command thread has been signaled for wake up. */
171 bool volatile fCmdThreadSignaled;
172 /** Alignment padding. */
173 uint8_t afPadding1[3];
174
175 /** The event semaphore the command thread waits on. */
176 SUPSEMEVENT hEvtCmdThread;
177 /** The MMIO handle. */
178 IOMMMIOHANDLE hMmio;
179
180 /** @name PCI: Base capability block registers.
181 * @{ */
182 IOMMU_BAR_T IommuBar; /**< IOMMU base address register. */
183 /** @} */
184
185 /** @name MMIO: Control and status registers.
186 * @{ */
187 DEV_TAB_BAR_T aDevTabBaseAddrs[8]; /**< Device table base address registers. */
188 CMD_BUF_BAR_T CmdBufBaseAddr; /**< Command buffer base address register. */
189 EVT_LOG_BAR_T EvtLogBaseAddr; /**< Event log base address register. */
190 IOMMU_CTRL_T Ctrl; /**< IOMMU control register. */
191 IOMMU_EXCL_RANGE_BAR_T ExclRangeBaseAddr; /**< IOMMU exclusion range base register. */
192 IOMMU_EXCL_RANGE_LIMIT_T ExclRangeLimit; /**< IOMMU exclusion range limit. */
193 IOMMU_EXT_FEAT_T ExtFeat; /**< IOMMU extended feature register. */
194 /** @} */
195
196 /** @name MMIO: PPR Log registers.
197 * @{ */
198 PPR_LOG_BAR_T PprLogBaseAddr; /**< PPR Log base address register. */
199 IOMMU_HW_EVT_HI_T HwEvtHi; /**< IOMMU hardware event register (Hi). */
200 IOMMU_HW_EVT_LO_T HwEvtLo; /**< IOMMU hardware event register (Lo). */
201 IOMMU_HW_EVT_STATUS_T HwEvtStatus; /**< IOMMU hardware event status. */
202 /** @} */
203
204 /** @todo IOMMU: SMI filter. */
205
206 /** @name MMIO: Guest Virtual-APIC Log registers.
207 * @{ */
208 GALOG_BAR_T GALogBaseAddr; /**< Guest Virtual-APIC Log base address register. */
209 GALOG_TAIL_ADDR_T GALogTailAddr; /**< Guest Virtual-APIC Log Tail address register. */
210 /** @} */
211
212 /** @name MMIO: Alternate PPR and Event Log registers.
213 * @{ */
214 PPR_LOG_B_BAR_T PprLogBBaseAddr; /**< PPR Log B base address register. */
215 EVT_LOG_B_BAR_T EvtLogBBaseAddr; /**< Event Log B base address register. */
216 /** @} */
217
218 /** @name MMIO: Device-specific feature registers.
219 * @{ */
220 DEV_SPECIFIC_FEAT_T DevSpecificFeat; /**< Device-specific feature extension register (DSFX). */
221 DEV_SPECIFIC_CTRL_T DevSpecificCtrl; /**< Device-specific control extension register (DSCX). */
222 DEV_SPECIFIC_STATUS_T DevSpecificStatus; /**< Device-specific status extension register (DSSX). */
223 /** @} */
224
225 /** @name MMIO: MSI Capability Block registers.
226 * @{ */
227 MSI_MISC_INFO_T MiscInfo; /**< MSI Misc. info registers / MSI Vector registers. */
228 /** @} */
229
230 /** @name MMIO: Performance Optimization Control registers.
231 * @{ */
232 IOMMU_PERF_OPT_CTRL_T PerfOptCtrl; /**< IOMMU Performance optimization control register. */
233 /** @} */
234
235 /** @name MMIO: x2APIC Control registers.
236 * @{ */
237 IOMMU_XT_GEN_INTR_CTRL_T XtGenIntrCtrl; /**< IOMMU X2APIC General interrupt control register. */
238 IOMMU_XT_PPR_INTR_CTRL_T XtPprIntrCtrl; /**< IOMMU X2APIC PPR interrupt control register. */
239 IOMMU_XT_GALOG_INTR_CTRL_T XtGALogIntrCtrl; /**< IOMMU X2APIC Guest Log interrupt control register. */
240 /** @} */
241
242 /** @name MMIO: MARC registers.
243 * @{ */
244 MARC_APER_T aMarcApers[4]; /**< MARC Aperture Registers. */
245 /** @} */
246
247 /** @name MMIO: Reserved register.
248 * @{ */
249 IOMMU_RSVD_REG_T RsvdReg; /**< IOMMU Reserved Register. */
250 /** @} */
251
252 /** @name MMIO: Command and Event Log pointer registers.
253 * @{ */
254 CMD_BUF_HEAD_PTR_T CmdBufHeadPtr; /**< Command buffer head pointer register. */
255 CMD_BUF_TAIL_PTR_T CmdBufTailPtr; /**< Command buffer tail pointer register. */
256 EVT_LOG_HEAD_PTR_T EvtLogHeadPtr; /**< Event log head pointer register. */
257 EVT_LOG_TAIL_PTR_T EvtLogTailPtr; /**< Event log tail pointer register. */
258 /** @} */
259
260 /** @name MMIO: Command and Event Status register.
261 * @{ */
262 IOMMU_STATUS_T Status; /**< IOMMU status register. */
263 /** @} */
264
265 /** @name MMIO: PPR Log Head and Tail pointer registers.
266 * @{ */
267 PPR_LOG_HEAD_PTR_T PprLogHeadPtr; /**< IOMMU PPR log head pointer register. */
268 PPR_LOG_TAIL_PTR_T PprLogTailPtr; /**< IOMMU PPR log tail pointer register. */
269 /** @} */
270
271 /** @name MMIO: Guest Virtual-APIC Log Head and Tail pointer registers.
272 * @{ */
273 GALOG_HEAD_PTR_T GALogHeadPtr; /**< Guest Virtual-APIC log head pointer register. */
274 GALOG_TAIL_PTR_T GALogTailPtr; /**< Guest Virtual-APIC log tail pointer register. */
275 /** @} */
276
277 /** @name MMIO: PPR Log B Head and Tail pointer registers.
278 * @{ */
279 PPR_LOG_B_HEAD_PTR_T PprLogBHeadPtr; /**< PPR log B head pointer register. */
280 PPR_LOG_B_TAIL_PTR_T PprLogBTailPtr; /**< PPR log B tail pointer register. */
281 /** @} */
282
283 /** @name MMIO: Event Log B Head and Tail pointer registers.
284 * @{ */
285 EVT_LOG_B_HEAD_PTR_T EvtLogBHeadPtr; /**< Event log B head pointer register. */
286 EVT_LOG_B_TAIL_PTR_T EvtLogBTailPtr; /**< Event log B tail pointer register. */
287 /** @} */
288
289 /** @name MMIO: PPR Log Overflow protection registers.
290 * @{ */
291 PPR_LOG_AUTO_RESP_T PprLogAutoResp; /**< PPR Log Auto Response register. */
292 PPR_LOG_OVERFLOW_EARLY_T PprLogOverflowEarly; /**< PPR Log Overflow Early Indicator register. */
293 PPR_LOG_B_OVERFLOW_EARLY_T PprLogBOverflowEarly; /**< PPR Log B Overflow Early Indicator register. */
294 /** @} */
295
296 /** @todo IOMMU: IOMMU Event counter registers. */
297
298#ifdef VBOX_WITH_STATISTICS
299 /** @name IOMMU: Stat counters.
300 * @{ */
301 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */
302 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */
303
304 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */
305 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */
306
307 STAMCOUNTER StatMsiRemapR3; /**< Number of MSI remap requests in R3. */
308 STAMCOUNTER StatMsiRemapRZ; /**< Number of MSI remap requests in RZ. */
309
310 STAMCOUNTER StatCmd; /**< Number of commands processed. */
311 STAMCOUNTER StatCmdCompWait; /**< Number of Completion Wait commands processed. */
312 STAMCOUNTER StatCmdInvDte; /**< Number of Invalidate DTE commands processed. */
313 STAMCOUNTER StatCmdInvIommuPages; /**< Number of Invalidate IOMMU pages commands processed. */
314 STAMCOUNTER StatCmdInvIotlbPages; /**< Number of Invalidate IOTLB pages commands processed. */
315 STAMCOUNTER StatCmdInvIntrTable; /**< Number of Invalidate Interrupt Table commands processed. */
316 STAMCOUNTER StatCmdPrefIommuPages; /**< Number of Prefetch IOMMU Pages commands processed. */
317 STAMCOUNTER StatCmdCompletePprReq; /**< Number of Complete PPR Requests commands processed. */
318 STAMCOUNTER StatCmdInvIommuAll; /**< Number of Invalidate IOMMU All commands processed. */
319 /** @} */
320#endif
321} IOMMU;
322/** Pointer to the IOMMU device state. */
323typedef struct IOMMU *PIOMMU;
324/** Pointer to the const IOMMU device state. */
325typedef const struct IOMMU *PCIOMMU;
326AssertCompileMemberAlignment(IOMMU, fCmdThreadSleeping, 4);
327AssertCompileMemberAlignment(IOMMU, fCmdThreadSignaled, 4);
328AssertCompileMemberAlignment(IOMMU, hEvtCmdThread, 8);
329AssertCompileMemberAlignment(IOMMU, hMmio, 8);
330AssertCompileMemberAlignment(IOMMU, IommuBar, 8);
331
332/**
333 * The ring-3 IOMMU device state.
334 */
335typedef struct IOMMUR3
336{
337 /** Device instance. */
338 PPDMDEVINSR3 pDevInsR3;
339 /** The IOMMU helpers. */
340 PCPDMIOMMUHLPR3 pIommuHlpR3;
341 /** The command thread handle. */
342 R3PTRTYPE(PPDMTHREAD) pCmdThread;
343} IOMMUR3;
344/** Pointer to the ring-3 IOMMU device state. */
345typedef IOMMUR3 *PIOMMUR3;
346
347/**
348 * The ring-0 IOMMU device state.
349 */
350typedef struct IOMMUR0
351{
352 /** Device instance. */
353 PPDMDEVINSR0 pDevInsR0;
354 /** The IOMMU helpers. */
355 PCPDMIOMMUHLPR0 pIommuHlpR0;
356} IOMMUR0;
357/** Pointer to the ring-0 IOMMU device state. */
358typedef IOMMUR0 *PIOMMUR0;
359
360/**
361 * The raw-mode IOMMU device state.
362 */
363typedef struct IOMMURC
364{
365 /** Device instance. */
366 PPDMDEVINSR0 pDevInsRC;
367 /** The IOMMU helpers. */
368 PCPDMIOMMUHLPRC pIommuHlpRC;
369} IOMMURC;
370/** Pointer to the raw-mode IOMMU device state. */
371typedef IOMMURC *PIOMMURC;
372
373/** The IOMMU device state for the current context. */
374typedef CTX_SUFF(IOMMU) IOMMUCC;
375/** Pointer to the IOMMU device state for the current context. */
376typedef CTX_SUFF(PIOMMU) PIOMMUCC;
377
378/**
379 * IOMMU register access routines.
380 */
381typedef struct
382{
383 const char *pszName;
384 VBOXSTRICTRC (*pfnRead )(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t *pu64Value);
385 VBOXSTRICTRC (*pfnWrite)(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value);
386 bool f64BitReg;
387} IOMMUREGACC;
388
389
390/*********************************************************************************************************************************
391* Global Variables *
392*********************************************************************************************************************************/
393/**
394 * An array of the number of device table segments supported.
395 * Indexed by u2DevTabSegSup.
396 */
397static uint8_t const g_acDevTabSegs[] = { 0, 2, 4, 8 };
398
399/**
400 * An array of the masks to select the device table segment index from a device ID.
401 */
402static uint16_t const g_auDevTabSegMasks[] = { 0x0, 0x8000, 0xc000, 0xe000 };
403
404/**
405 * An array of the shift values to select the device table segment index from a
406 * device ID.
407 */
408static uint8_t const g_auDevTabSegShifts[] = { 0, 15, 14, 13 };
409
410/**
411 * The maximum size (inclusive) of each device table segment (0 to 7).
412 * Indexed by the device table segment index.
413 */
414static uint16_t const g_auDevTabSegMaxSizes[] = { 0x1ff, 0xff, 0x7f, 0x7f, 0x3f, 0x3f, 0x3f, 0x3f };
415
416
417#ifndef VBOX_DEVICE_STRUCT_TESTCASE
418/**
419 * Gets the maximum number of buffer entries for the given buffer length.
420 *
421 * @returns Number of buffer entries.
422 * @param uEncodedLen The length (power-of-2 encoded).
423 */
424DECLINLINE(uint32_t) iommuAmdGetBufMaxEntries(uint8_t uEncodedLen)
425{
426 Assert(uEncodedLen > 7);
427 return 2 << (uEncodedLen - 1);
428}
429
430
431/**
432 * Gets the total length of the buffer given a base register's encoded length.
433 *
434 * @returns The length of the buffer in bytes.
435 * @param uEncodedLen The length (power-of-2 encoded).
436 */
437DECLINLINE(uint32_t) iommuAmdGetTotalBufLength(uint8_t uEncodedLen)
438{
439 Assert(uEncodedLen > 7);
440 return (2 << (uEncodedLen - 1)) << 4;
441}
442
443
444/**
445 * Gets the number of (unconsumed) entries in the event log.
446 *
447 * @returns The number of entries in the event log.
448 * @param pThis The IOMMU device state.
449 */
450static uint32_t iommuAmdGetEvtLogEntryCount(PIOMMU pThis)
451{
452 uint32_t const idxTail = pThis->EvtLogTailPtr.n.off >> IOMMU_EVT_GENERIC_SHIFT;
453 uint32_t const idxHead = pThis->EvtLogHeadPtr.n.off >> IOMMU_EVT_GENERIC_SHIFT;
454 if (idxTail >= idxHead)
455 return idxTail - idxHead;
456
457 uint32_t const cMaxEvts = iommuAmdGetBufMaxEntries(pThis->EvtLogBaseAddr.n.u4Len);
458 return cMaxEvts - idxHead + idxTail;
459}
460
461
462/**
463 * Gets the number of (unconsumed) commands in the command buffer.
464 *
465 * @returns The number of commands in the command buffer.
466 * @param pThis The IOMMU device state.
467 */
468static uint32_t iommuAmdGetCmdBufEntryCount(PIOMMU pThis)
469{
470 uint32_t const idxTail = pThis->CmdBufTailPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;
471 uint32_t const idxHead = pThis->CmdBufHeadPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;
472 if (idxTail >= idxHead)
473 return idxTail - idxHead;
474
475 uint32_t const cMaxCmds = iommuAmdGetBufMaxEntries(pThis->CmdBufBaseAddr.n.u4Len);
476 return cMaxCmds - idxHead + idxTail;
477}
478
479
480DECL_FORCE_INLINE(IOMMU_STATUS_T) iommuAmdGetStatus(PCIOMMU pThis)
481{
482 IOMMU_STATUS_T Status;
483 Status.u64 = ASMAtomicReadU64((volatile uint64_t *)&pThis->Status.u64);
484 return Status;
485}
486
487
488DECL_FORCE_INLINE(IOMMU_CTRL_T) iommuAmdGetCtrl(PCIOMMU pThis)
489{
490 IOMMU_CTRL_T Ctrl;
491 Ctrl.u64 = ASMAtomicReadU64((volatile uint64_t *)&pThis->Ctrl.u64);
492 return Ctrl;
493}
494
495
496/**
497 * Returns whether MSI is enabled for the IOMMU.
498 *
499 * @returns Whether MSI is enabled.
500 * @param pDevIns The IOMMU device instance.
501 *
502 * @note There should be a PCIDevXxx function for this.
503 */
504static bool iommuAmdIsMsiEnabled(PPDMDEVINS pDevIns)
505{
506 MSI_CAP_HDR_T MsiCapHdr;
507 MsiCapHdr.u32 = PDMPciDevGetDWord(pDevIns->apPciDevs[0], IOMMU_PCI_OFF_MSI_CAP_HDR);
508 return MsiCapHdr.n.u1MsiEnable;
509}
510
511
512/**
513 * Signals a PCI target abort.
514 *
515 * @param pDevIns The IOMMU device instance.
516 */
517static void iommuAmdSetPciTargetAbort(PPDMDEVINS pDevIns)
518{
519 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
520 uint16_t const u16Status = PDMPciDevGetStatus(pPciDev) | VBOX_PCI_STATUS_SIG_TARGET_ABORT;
521 PDMPciDevSetStatus(pPciDev, u16Status);
522}
523
524
525/**
526 * Wakes up the command thread if there are commands to be processed or if
527 * processing is requested to be stopped by software.
528 *
529 * @param pDevIns The IOMMU device instance.
530 */
531static void iommuAmdCmdThreadWakeUpIfNeeded(PPDMDEVINS pDevIns)
532{
533 IOMMU_ASSERT_LOCKED(pDevIns);
534 Log5Func(("\n"));
535
536 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
537 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
538 if (Status.n.u1CmdBufRunning)
539 {
540 Log5Func(("Signaling command thread\n"));
541 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
542 }
543}
544
545
546/**
547 * Writes to a read-only register.
548 */
549static VBOXSTRICTRC iommuAmdIgnore_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
550{
551 RT_NOREF(pDevIns, pThis, iReg, u64Value);
552 LogFunc(("Write to read-only register (%#x) with value %#RX64 ignored\n", iReg, u64Value));
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Writes the Device Table Base Address Register.
559 */
560static VBOXSTRICTRC iommuAmdDevTabBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
561{
562 RT_NOREF(pDevIns, iReg);
563
564 /* Mask out all unrecognized bits. */
565 u64Value &= IOMMU_DEV_TAB_BAR_VALID_MASK;
566
567 /* Update the register. */
568 pThis->aDevTabBaseAddrs[0].u64 = u64Value;
569 return VINF_SUCCESS;
570}
571
572
573/**
574 * Writes the Command Buffer Base Address Register.
575 */
576static VBOXSTRICTRC iommuAmdCmdBufBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
577{
578 RT_NOREF(pDevIns, iReg);
579
580 /*
581 * While this is not explicitly specified like the event log base address register,
582 * the AMD spec. does specify "CmdBufRun must be 0b to modify the command buffer registers properly".
583 * Inconsistent specs :/
584 */
585 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
586 if (Status.n.u1CmdBufRunning)
587 {
588 LogFunc(("Setting CmdBufBar (%#RX64) when command buffer is running -> Ignored\n", u64Value));
589 return VINF_SUCCESS;
590 }
591
592 /* Mask out all unrecognized bits. */
593 CMD_BUF_BAR_T CmdBufBaseAddr;
594 CmdBufBaseAddr.u64 = u64Value & IOMMU_CMD_BUF_BAR_VALID_MASK;
595
596 /* Validate the length. */
597 if (CmdBufBaseAddr.n.u4Len >= 8)
598 {
599 /* Update the register. */
600 pThis->CmdBufBaseAddr.u64 = CmdBufBaseAddr.u64;
601
602 /*
603 * Writing the command buffer base address, clears the command buffer head and tail pointers.
604 * See AMD spec. 2.4 "Commands".
605 */
606 pThis->CmdBufHeadPtr.u64 = 0;
607 pThis->CmdBufTailPtr.u64 = 0;
608 }
609 else
610 LogFunc(("Command buffer length (%#x) invalid -> Ignored\n", CmdBufBaseAddr.n.u4Len));
611
612 return VINF_SUCCESS;
613}
614
615
616/**
617 * Writes the Event Log Base Address Register.
618 */
619static VBOXSTRICTRC iommuAmdEvtLogBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
620{
621 RT_NOREF(pDevIns, iReg);
622
623 /*
624 * IOMMU behavior is undefined when software writes this register when event logging is running.
625 * In our emulation, we ignore the write entirely.
626 * See AMD IOMMU spec. "Event Log Base Address Register".
627 */
628 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
629 if (Status.n.u1EvtLogRunning)
630 {
631 LogFunc(("Setting EvtLogBar (%#RX64) when event logging is running -> Ignored\n", u64Value));
632 return VINF_SUCCESS;
633 }
634
635 /* Mask out all unrecognized bits. */
636 u64Value &= IOMMU_EVT_LOG_BAR_VALID_MASK;
637 EVT_LOG_BAR_T EvtLogBaseAddr;
638 EvtLogBaseAddr.u64 = u64Value;
639
640 /* Validate the length. */
641 if (EvtLogBaseAddr.n.u4Len >= 8)
642 {
643 /* Update the register. */
644 pThis->EvtLogBaseAddr.u64 = EvtLogBaseAddr.u64;
645
646 /*
647 * Writing the event log base address, clears the event log head and tail pointers.
648 * See AMD spec. 2.5 "Event Logging".
649 */
650 pThis->EvtLogHeadPtr.u64 = 0;
651 pThis->EvtLogTailPtr.u64 = 0;
652 }
653 else
654 LogFunc(("Event log length (%#x) invalid -> Ignored\n", EvtLogBaseAddr.n.u4Len));
655
656 return VINF_SUCCESS;
657}
658
659
660/**
661 * Writes the Control Register.
662 */
663static VBOXSTRICTRC iommuAmdCtrl_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
664{
665 RT_NOREF(pDevIns, iReg);
666
667 /* Mask out all unrecognized bits. */
668 u64Value &= IOMMU_CTRL_VALID_MASK;
669
670 IOMMU_CTRL_T const OldCtrl = iommuAmdGetCtrl(pThis);
671 IOMMU_CTRL_T NewCtrl;
672 NewCtrl.u64 = u64Value;
673
674 /* Update the register. */
675 ASMAtomicWriteU64(&pThis->Ctrl.u64, NewCtrl.u64);
676
677 bool const fNewIommuEn = NewCtrl.n.u1IommuEn;
678 bool const fOldIommuEn = OldCtrl.n.u1IommuEn;
679
680 /* Enable or disable event logging when the bit transitions. */
681 bool const fOldEvtLogEn = OldCtrl.n.u1EvtLogEn;
682 bool const fNewEvtLogEn = NewCtrl.n.u1EvtLogEn;
683 if ( fOldEvtLogEn != fNewEvtLogEn
684 || fOldIommuEn != fNewIommuEn)
685 {
686 if ( fNewIommuEn
687 && fNewEvtLogEn)
688 {
689 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_EVT_LOG_OVERFLOW);
690 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_RUNNING);
691 }
692 else
693 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_EVT_LOG_RUNNING);
694 }
695
696 /* Enable or disable command buffer processing when the bit transitions. */
697 bool const fOldCmdBufEn = OldCtrl.n.u1CmdBufEn;
698 bool const fNewCmdBufEn = NewCtrl.n.u1CmdBufEn;
699 if ( fOldCmdBufEn != fNewCmdBufEn
700 || fOldIommuEn != fNewIommuEn)
701 {
702 if ( fNewCmdBufEn
703 && fNewIommuEn)
704 {
705 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_CMD_BUF_RUNNING);
706 LogFunc(("Command buffer enabled\n"));
707
708 /* Wake up the command thread to start processing commands. */
709 iommuAmdCmdThreadWakeUpIfNeeded(pDevIns);
710 }
711 else
712 {
713 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING);
714 LogFunc(("Command buffer disabled\n"));
715 }
716 }
717
718 return VINF_SUCCESS;
719}
720
721
722/**
723 * Writes to the Excluse Range Base Address Register.
724 */
725static VBOXSTRICTRC iommuAmdExclRangeBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
726{
727 RT_NOREF(pDevIns, iReg);
728 pThis->ExclRangeBaseAddr.u64 = u64Value & IOMMU_EXCL_RANGE_BAR_VALID_MASK;
729 return VINF_SUCCESS;
730}
731
732
733/**
734 * Writes to the Excluse Range Limit Register.
735 */
736static VBOXSTRICTRC iommuAmdExclRangeLimit_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
737{
738 RT_NOREF(pDevIns, iReg);
739 u64Value &= IOMMU_EXCL_RANGE_LIMIT_VALID_MASK;
740 u64Value |= UINT64_C(0xfff);
741 pThis->ExclRangeLimit.u64 = u64Value;
742 return VINF_SUCCESS;
743}
744
745
746/**
747 * Writes the Hardware Event Register (Hi).
748 */
749static VBOXSTRICTRC iommuAmdHwEvtHi_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
750{
751 /** @todo IOMMU: Why the heck is this marked read/write by the AMD IOMMU spec? */
752 RT_NOREF(pDevIns, iReg);
753 LogFlowFunc(("Writing %#RX64 to hardware event (Hi) register!\n", u64Value));
754 pThis->HwEvtHi.u64 = u64Value;
755 return VINF_SUCCESS;
756}
757
758
759/**
760 * Writes the Hardware Event Register (Lo).
761 */
762static VBOXSTRICTRC iommuAmdHwEvtLo_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
763{
764 /** @todo IOMMU: Why the heck is this marked read/write by the AMD IOMMU spec? */
765 RT_NOREF(pDevIns, iReg);
766 LogFlowFunc(("Writing %#RX64 to hardware event (Lo) register!\n", u64Value));
767 pThis->HwEvtLo = u64Value;
768 return VINF_SUCCESS;
769}
770
771
772/**
773 * Writes the Hardware Event Status Register.
774 */
775static VBOXSTRICTRC iommuAmdHwEvtStatus_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
776{
777 RT_NOREF(pDevIns, iReg);
778
779 /* Mask out all unrecognized bits. */
780 u64Value &= IOMMU_HW_EVT_STATUS_VALID_MASK;
781
782 /*
783 * The two bits (HEO and HEV) are RW1C (Read/Write 1-to-Clear; writing 0 has no effect).
784 * If the current status bits or the bits being written are both 0, we've nothing to do.
785 * The Overflow bit (bit 1) is only valid when the Valid bit (bit 0) is 1.
786 */
787 uint64_t HwStatus = pThis->HwEvtStatus.u64;
788 if (!(HwStatus & RT_BIT(0)))
789 return VINF_SUCCESS;
790 if (u64Value & HwStatus & RT_BIT_64(0))
791 HwStatus &= ~RT_BIT_64(0);
792 if (u64Value & HwStatus & RT_BIT_64(1))
793 HwStatus &= ~RT_BIT_64(1);
794
795 /* Update the register. */
796 pThis->HwEvtStatus.u64 = HwStatus;
797 return VINF_SUCCESS;
798}
799
800
801/**
802 * Writes the Device Table Segment Base Address Register.
803 */
804static VBOXSTRICTRC iommuAmdDevTabSegBar_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
805{
806 RT_NOREF(pDevIns);
807
808 /* Figure out which segment is being written. */
809 uint8_t const offSegment = (iReg - IOMMU_MMIO_OFF_DEV_TAB_SEG_FIRST) >> 3;
810 uint8_t const idxSegment = offSegment + 1;
811 Assert(idxSegment < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
812
813 /* Mask out all unrecognized bits. */
814 u64Value &= IOMMU_DEV_TAB_SEG_BAR_VALID_MASK;
815 DEV_TAB_BAR_T DevTabSegBar;
816 DevTabSegBar.u64 = u64Value;
817
818 /* Validate the size. */
819 uint16_t const uSegSize = DevTabSegBar.n.u9Size;
820 uint16_t const uMaxSegSize = g_auDevTabSegMaxSizes[idxSegment];
821 if (uSegSize <= uMaxSegSize)
822 {
823 /* Update the register. */
824 pThis->aDevTabBaseAddrs[idxSegment].u64 = u64Value;
825 }
826 else
827 LogFunc(("Device table segment (%u) size invalid (%#RX32) -> Ignored\n", idxSegment, uSegSize));
828
829 return VINF_SUCCESS;
830}
831
832
833/**
834 * Writes the MSI Capability Header Register.
835 */
836static VBOXSTRICTRC iommuAmdMsiCapHdr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
837{
838 RT_NOREF(pThis, iReg);
839 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
840 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
841 MSI_CAP_HDR_T MsiCapHdr;
842 MsiCapHdr.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
843 MsiCapHdr.n.u1MsiEnable = RT_BOOL(u64Value & IOMMU_MSI_CAP_HDR_MSI_EN_MASK);
844 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR, MsiCapHdr.u32);
845 return VINF_SUCCESS;
846}
847
848
849/**
850 * Writes the MSI Address (Lo) Register (32-bit).
851 */
852static VBOXSTRICTRC iommuAmdMsiAddrLo_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
853{
854 RT_NOREF(pThis, iReg);
855 Assert(!RT_HI_U32(u64Value));
856 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
857 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
858 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO, u64Value & VBOX_MSI_ADDR_VALID_MASK);
859 return VINF_SUCCESS;
860}
861
862
863/**
864 * Writes the MSI Address (Hi) Register (32-bit).
865 */
866static VBOXSTRICTRC iommuAmdMsiAddrHi_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
867{
868 RT_NOREF(pThis, iReg);
869 Assert(!RT_HI_U32(u64Value));
870 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
871 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
872 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI, u64Value);
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Writes the MSI Data Register (32-bit).
879 */
880static VBOXSTRICTRC iommuAmdMsiData_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
881{
882 RT_NOREF(pThis, iReg);
883 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
884 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
885 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA, u64Value & VBOX_MSI_DATA_VALID_MASK);
886 return VINF_SUCCESS;
887}
888
889
890/**
891 * Writes the Command Buffer Head Pointer Register (32-bit).
892 */
893static VBOXSTRICTRC iommuAmdCmdBufHeadPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
894{
895 RT_NOREF(pDevIns, iReg);
896
897 /*
898 * IOMMU behavior is undefined when software writes this register when the command buffer is running.
899 * In our emulation, we ignore the write entirely.
900 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
901 */
902 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
903 if (Status.n.u1CmdBufRunning)
904 {
905 LogFunc(("Setting CmdBufHeadPtr (%#RX64) when command buffer is running -> Ignored\n", u64Value));
906 return VINF_SUCCESS;
907 }
908
909 /*
910 * IOMMU behavior is undefined when software writes a value outside the buffer length.
911 * In our emulation, we ignore the write entirely.
912 */
913 uint32_t const offBuf = u64Value & IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK;
914 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->CmdBufBaseAddr.n.u4Len);
915 Assert(cbBuf <= _512K);
916 if (offBuf >= cbBuf)
917 {
918 LogFunc(("Setting CmdBufHeadPtr (%#RX32) to a value that exceeds buffer length (%#RX23) -> Ignored\n", offBuf, cbBuf));
919 return VINF_SUCCESS;
920 }
921
922 /* Update the register. */
923 pThis->CmdBufHeadPtr.au32[0] = offBuf;
924
925 iommuAmdCmdThreadWakeUpIfNeeded(pDevIns);
926
927 LogFlowFunc(("Set CmdBufHeadPtr to %#RX32\n", offBuf));
928 return VINF_SUCCESS;
929}
930
931
932/**
933 * Writes the Command Buffer Tail Pointer Register (32-bit).
934 */
935static VBOXSTRICTRC iommuAmdCmdBufTailPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
936{
937 RT_NOREF(pDevIns, iReg);
938
939 /*
940 * IOMMU behavior is undefined when software writes a value outside the buffer length.
941 * In our emulation, we ignore the write entirely.
942 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
943 */
944 uint32_t const offBuf = u64Value & IOMMU_CMD_BUF_TAIL_PTR_VALID_MASK;
945 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->CmdBufBaseAddr.n.u4Len);
946 Assert(cbBuf <= _512K);
947 if (offBuf >= cbBuf)
948 {
949 LogFunc(("Setting CmdBufTailPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n", offBuf, cbBuf));
950 return VINF_SUCCESS;
951 }
952
953 /*
954 * IOMMU behavior is undefined if software advances the tail pointer equal to or beyond the
955 * head pointer after adding one or more commands to the buffer.
956 *
957 * However, we cannot enforce this strictly because it's legal for software to shrink the
958 * command queue (by reducing the offset) as well as wrap around the pointer (when head isn't
959 * at 0). Software might even make the queue empty by making head and tail equal which is
960 * allowed. I don't think we can or should try too hard to prevent software shooting itself
961 * in the foot here. As long as we make sure the offset value is within the circular buffer
962 * bounds (which we do by masking bits above) it should be sufficient.
963 */
964 pThis->CmdBufTailPtr.au32[0] = offBuf;
965
966 iommuAmdCmdThreadWakeUpIfNeeded(pDevIns);
967
968 LogFlowFunc(("Set CmdBufTailPtr to %#RX32\n", offBuf));
969 return VINF_SUCCESS;
970}
971
972
973/**
974 * Writes the Event Log Head Pointer Register (32-bit).
975 */
976static VBOXSTRICTRC iommuAmdEvtLogHeadPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
977{
978 RT_NOREF(pDevIns, iReg);
979
980 /*
981 * IOMMU behavior is undefined when software writes a value outside the buffer length.
982 * In our emulation, we ignore the write entirely.
983 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
984 */
985 uint32_t const offBuf = u64Value & IOMMU_EVT_LOG_HEAD_PTR_VALID_MASK;
986 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->EvtLogBaseAddr.n.u4Len);
987 Assert(cbBuf <= _512K);
988 if (offBuf >= cbBuf)
989 {
990 LogFunc(("Setting EvtLogHeadPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n", offBuf, cbBuf));
991 return VINF_SUCCESS;
992 }
993
994 /* Update the register. */
995 pThis->EvtLogHeadPtr.au32[0] = offBuf;
996
997 LogFlowFunc(("Set EvtLogHeadPtr to %#RX32\n", offBuf));
998 return VINF_SUCCESS;
999}
1000
1001
1002/**
1003 * Writes the Event Log Tail Pointer Register (32-bit).
1004 */
1005static VBOXSTRICTRC iommuAmdEvtLogTailPtr_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
1006{
1007 RT_NOREF(pDevIns, iReg);
1008 NOREF(pThis);
1009
1010 /*
1011 * IOMMU behavior is undefined when software writes this register when the event log is running.
1012 * In our emulation, we ignore the write entirely.
1013 * See AMD IOMMU spec. 3.3.13 "Command and Event Log Pointer Registers".
1014 */
1015 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
1016 if (Status.n.u1EvtLogRunning)
1017 {
1018 LogFunc(("Setting EvtLogTailPtr (%#RX64) when event log is running -> Ignored\n", u64Value));
1019 return VINF_SUCCESS;
1020 }
1021
1022 /*
1023 * IOMMU behavior is undefined when software writes a value outside the buffer length.
1024 * In our emulation, we ignore the write entirely.
1025 */
1026 uint32_t const offBuf = u64Value & IOMMU_EVT_LOG_TAIL_PTR_VALID_MASK;
1027 uint32_t const cbBuf = iommuAmdGetTotalBufLength(pThis->EvtLogBaseAddr.n.u4Len);
1028 Assert(cbBuf <= _512K);
1029 if (offBuf >= cbBuf)
1030 {
1031 LogFunc(("Setting EvtLogTailPtr (%#RX32) to a value that exceeds buffer length (%#RX32) -> Ignored\n", offBuf, cbBuf));
1032 return VINF_SUCCESS;
1033 }
1034
1035 /* Update the register. */
1036 pThis->EvtLogTailPtr.au32[0] = offBuf;
1037
1038 LogFlowFunc(("Set EvtLogTailPtr to %#RX32\n", offBuf));
1039 return VINF_SUCCESS;
1040}
1041
1042
1043/**
1044 * Writes the Status Register (64-bit).
1045 */
1046static VBOXSTRICTRC iommuAmdStatus_w(PPDMDEVINS pDevIns, PIOMMU pThis, uint32_t iReg, uint64_t u64Value)
1047{
1048 RT_NOREF(pDevIns, iReg);
1049
1050 /* Mask out all unrecognized bits. */
1051 u64Value &= IOMMU_STATUS_VALID_MASK;
1052
1053 /*
1054 * Compute RW1C (read-only, write-1-to-clear) bits and preserve the rest (which are read-only).
1055 * Writing 0 to an RW1C bit has no effect. Writing 1 to an RW1C bit, clears the bit if it's already 1.
1056 */
1057 IOMMU_STATUS_T const OldStatus = iommuAmdGetStatus(pThis);
1058 uint64_t const fOldRw1cBits = (OldStatus.u64 & IOMMU_STATUS_RW1C_MASK);
1059 uint64_t const fOldRoBits = (OldStatus.u64 & ~IOMMU_STATUS_RW1C_MASK);
1060 uint64_t const fNewRw1cBits = (u64Value & IOMMU_STATUS_RW1C_MASK);
1061
1062 uint64_t const uNewStatus = (fOldRw1cBits & ~fNewRw1cBits) | fOldRoBits;
1063
1064 /* Update the register. */
1065 ASMAtomicWriteU64(&pThis->Status.u64, uNewStatus);
1066 return VINF_SUCCESS;
1067}
1068
1069
1070#if 0
1071/**
1072 * Table 0: Registers-access table.
1073 */
1074static const IOMMUREGACC g_aTable0Regs[] =
1075{
1076
1077};
1078
1079/**
1080 * Table 1: Registers-access table.
1081 */
1082static const IOMMUREGACC g_aTable1Regs[] =
1083{
1084};
1085#endif
1086
1087
1088/**
1089 * Writes an IOMMU register (32-bit and 64-bit).
1090 *
1091 * @returns Strict VBox status code.
1092 * @param pDevIns The IOMMU device instance.
1093 * @param off MMIO byte offset to the register.
1094 * @param cb The size of the write access.
1095 * @param uValue The value being written.
1096 *
1097 * @thread EMT.
1098 */
1099static VBOXSTRICTRC iommuAmdWriteRegister(PPDMDEVINS pDevIns, uint32_t off, uint8_t cb, uint64_t uValue)
1100{
1101 Assert(off < IOMMU_MMIO_REGION_SIZE);
1102 Assert(cb == 4 || cb == 8);
1103 Assert(!(off & (cb - 1)));
1104
1105 LogFlowFunc(("off=%#x cb=%u uValue=%#RX64\n", off, cb, uValue));
1106
1107 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1108 switch (off)
1109 {
1110 case IOMMU_MMIO_OFF_DEV_TAB_BAR: return iommuAmdDevTabBar_w(pDevIns, pThis, off, uValue);
1111 case IOMMU_MMIO_OFF_CMD_BUF_BAR: return iommuAmdCmdBufBar_w(pDevIns, pThis, off, uValue);
1112 case IOMMU_MMIO_OFF_EVT_LOG_BAR: return iommuAmdEvtLogBar_w(pDevIns, pThis, off, uValue);
1113 case IOMMU_MMIO_OFF_CTRL: return iommuAmdCtrl_w(pDevIns, pThis, off, uValue);
1114 case IOMMU_MMIO_OFF_EXCL_BAR: return iommuAmdExclRangeBar_w(pDevIns, pThis, off, uValue);
1115 case IOMMU_MMIO_OFF_EXCL_RANGE_LIMIT: return iommuAmdExclRangeLimit_w(pDevIns, pThis, off, uValue);
1116 case IOMMU_MMIO_OFF_EXT_FEAT: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1117
1118 case IOMMU_MMIO_OFF_PPR_LOG_BAR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1119 case IOMMU_MMIO_OFF_HW_EVT_HI: return iommuAmdHwEvtHi_w(pDevIns, pThis, off, uValue);
1120 case IOMMU_MMIO_OFF_HW_EVT_LO: return iommuAmdHwEvtLo_w(pDevIns, pThis, off, uValue);
1121 case IOMMU_MMIO_OFF_HW_EVT_STATUS: return iommuAmdHwEvtStatus_w(pDevIns, pThis, off, uValue);
1122
1123 case IOMMU_MMIO_OFF_GALOG_BAR:
1124 case IOMMU_MMIO_OFF_GALOG_TAIL_ADDR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1125
1126 case IOMMU_MMIO_OFF_PPR_LOG_B_BAR:
1127 case IOMMU_MMIO_OFF_PPR_EVT_B_BAR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1128
1129 case IOMMU_MMIO_OFF_DEV_TAB_SEG_1:
1130 case IOMMU_MMIO_OFF_DEV_TAB_SEG_2:
1131 case IOMMU_MMIO_OFF_DEV_TAB_SEG_3:
1132 case IOMMU_MMIO_OFF_DEV_TAB_SEG_4:
1133 case IOMMU_MMIO_OFF_DEV_TAB_SEG_5:
1134 case IOMMU_MMIO_OFF_DEV_TAB_SEG_6:
1135 case IOMMU_MMIO_OFF_DEV_TAB_SEG_7: return iommuAmdDevTabSegBar_w(pDevIns, pThis, off, uValue);
1136
1137 case IOMMU_MMIO_OFF_DEV_SPECIFIC_FEAT:
1138 case IOMMU_MMIO_OFF_DEV_SPECIFIC_CTRL:
1139 case IOMMU_MMIO_OFF_DEV_SPECIFIC_STATUS: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1140
1141 case IOMMU_MMIO_OFF_MSI_VECTOR_0:
1142 case IOMMU_MMIO_OFF_MSI_VECTOR_1: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1143 case IOMMU_MMIO_OFF_MSI_CAP_HDR:
1144 {
1145 VBOXSTRICTRC rcStrict = iommuAmdMsiCapHdr_w(pDevIns, pThis, off, (uint32_t)uValue);
1146 if (cb == 4 || RT_FAILURE(rcStrict))
1147 return rcStrict;
1148 uValue >>= 32;
1149 RT_FALL_THRU();
1150 }
1151 case IOMMU_MMIO_OFF_MSI_ADDR_LO: return iommuAmdMsiAddrLo_w(pDevIns, pThis, off, uValue);
1152 case IOMMU_MMIO_OFF_MSI_ADDR_HI:
1153 {
1154 VBOXSTRICTRC rcStrict = iommuAmdMsiAddrHi_w(pDevIns, pThis, off, (uint32_t)uValue);
1155 if (cb == 4 || RT_FAILURE(rcStrict))
1156 return rcStrict;
1157 uValue >>= 32;
1158 RT_FALL_THRU();
1159 }
1160 case IOMMU_MMIO_OFF_MSI_DATA: return iommuAmdMsiData_w(pDevIns, pThis, off, uValue);
1161 case IOMMU_MMIO_OFF_MSI_MAPPING_CAP_HDR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1162
1163 case IOMMU_MMIO_OFF_PERF_OPT_CTRL: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1164
1165 case IOMMU_MMIO_OFF_XT_GEN_INTR_CTRL:
1166 case IOMMU_MMIO_OFF_XT_PPR_INTR_CTRL:
1167 case IOMMU_MMIO_OFF_XT_GALOG_INT_CTRL: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1168
1169 case IOMMU_MMIO_OFF_MARC_APER_BAR_0:
1170 case IOMMU_MMIO_OFF_MARC_APER_RELOC_0:
1171 case IOMMU_MMIO_OFF_MARC_APER_LEN_0:
1172 case IOMMU_MMIO_OFF_MARC_APER_BAR_1:
1173 case IOMMU_MMIO_OFF_MARC_APER_RELOC_1:
1174 case IOMMU_MMIO_OFF_MARC_APER_LEN_1:
1175 case IOMMU_MMIO_OFF_MARC_APER_BAR_2:
1176 case IOMMU_MMIO_OFF_MARC_APER_RELOC_2:
1177 case IOMMU_MMIO_OFF_MARC_APER_LEN_2:
1178 case IOMMU_MMIO_OFF_MARC_APER_BAR_3:
1179 case IOMMU_MMIO_OFF_MARC_APER_RELOC_3:
1180 case IOMMU_MMIO_OFF_MARC_APER_LEN_3: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1181
1182 case IOMMU_MMIO_OFF_RSVD_REG: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1183
1184 case IOMMU_MMIO_CMD_BUF_HEAD_PTR: return iommuAmdCmdBufHeadPtr_w(pDevIns, pThis, off, uValue);
1185 case IOMMU_MMIO_CMD_BUF_TAIL_PTR: return iommuAmdCmdBufTailPtr_w(pDevIns, pThis, off, uValue);
1186 case IOMMU_MMIO_EVT_LOG_HEAD_PTR: return iommuAmdEvtLogHeadPtr_w(pDevIns, pThis, off, uValue);
1187 case IOMMU_MMIO_EVT_LOG_TAIL_PTR: return iommuAmdEvtLogTailPtr_w(pDevIns, pThis, off, uValue);
1188
1189 case IOMMU_MMIO_OFF_STATUS: return iommuAmdStatus_w(pDevIns, pThis, off, uValue);
1190
1191 case IOMMU_MMIO_OFF_PPR_LOG_HEAD_PTR:
1192 case IOMMU_MMIO_OFF_PPR_LOG_TAIL_PTR:
1193
1194 case IOMMU_MMIO_OFF_GALOG_HEAD_PTR:
1195 case IOMMU_MMIO_OFF_GALOG_TAIL_PTR:
1196
1197 case IOMMU_MMIO_OFF_PPR_LOG_B_HEAD_PTR:
1198 case IOMMU_MMIO_OFF_PPR_LOG_B_TAIL_PTR:
1199
1200 case IOMMU_MMIO_OFF_EVT_LOG_B_HEAD_PTR:
1201 case IOMMU_MMIO_OFF_EVT_LOG_B_TAIL_PTR: return iommuAmdIgnore_w(pDevIns, pThis, off, uValue);
1202
1203 case IOMMU_MMIO_OFF_PPR_LOG_AUTO_RESP:
1204 case IOMMU_MMIO_OFF_PPR_LOG_OVERFLOW_EARLY:
1205 case IOMMU_MMIO_OFF_PPR_LOG_B_OVERFLOW_EARLY:
1206
1207 /* Not implemented. */
1208 case IOMMU_MMIO_OFF_SMI_FLT_FIRST:
1209 case IOMMU_MMIO_OFF_SMI_FLT_LAST:
1210 {
1211 LogFunc(("Writing unsupported register: SMI filter %u -> Ignored\n", (off - IOMMU_MMIO_OFF_SMI_FLT_FIRST) >> 3));
1212 return VINF_SUCCESS;
1213 }
1214
1215 /* Unknown. */
1216 default:
1217 {
1218 LogFunc(("Writing unknown register %u (%#x) with %#RX64 -> Ignored\n", off, off, uValue));
1219 return VINF_SUCCESS;
1220 }
1221 }
1222}
1223
1224
1225/**
1226 * Reads an IOMMU register (64-bit) given its MMIO offset.
1227 *
1228 * All reads are 64-bit but reads to 32-bit registers that are aligned on an 8-byte
1229 * boundary include the lower half of the subsequent register.
1230 *
1231 * This is because most registers are 64-bit and aligned on 8-byte boundaries but
1232 * some are really 32-bit registers aligned on an 8-byte boundary. We cannot assume
1233 * software will only perform 32-bit reads on those 32-bit registers that are
1234 * aligned on 8-byte boundaries.
1235 *
1236 * @returns Strict VBox status code.
1237 * @param pDevIns The IOMMU device instance.
1238 * @param off The MMIO offset of the register in bytes.
1239 * @param puResult Where to store the value being read.
1240 *
1241 * @thread EMT.
1242 */
1243static VBOXSTRICTRC iommuAmdReadRegister(PPDMDEVINS pDevIns, uint32_t off, uint64_t *puResult)
1244{
1245 Assert(off < IOMMU_MMIO_REGION_SIZE);
1246 Assert(!(off & 7) || !(off & 3));
1247
1248 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1249 PCPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1250 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1251
1252 LogFlowFunc(("off=%#x\n", off));
1253
1254 /** @todo IOMMU: fine-grained locking? */
1255 uint64_t uReg;
1256 switch (off)
1257 {
1258 case IOMMU_MMIO_OFF_DEV_TAB_BAR: uReg = pThis->aDevTabBaseAddrs[0].u64; break;
1259 case IOMMU_MMIO_OFF_CMD_BUF_BAR: uReg = pThis->CmdBufBaseAddr.u64; break;
1260 case IOMMU_MMIO_OFF_EVT_LOG_BAR: uReg = pThis->EvtLogBaseAddr.u64; break;
1261 case IOMMU_MMIO_OFF_CTRL: uReg = pThis->Ctrl.u64; break;
1262 case IOMMU_MMIO_OFF_EXCL_BAR: uReg = pThis->ExclRangeBaseAddr.u64; break;
1263 case IOMMU_MMIO_OFF_EXCL_RANGE_LIMIT: uReg = pThis->ExclRangeLimit.u64; break;
1264 case IOMMU_MMIO_OFF_EXT_FEAT: uReg = pThis->ExtFeat.u64; break;
1265
1266 case IOMMU_MMIO_OFF_PPR_LOG_BAR: uReg = pThis->PprLogBaseAddr.u64; break;
1267 case IOMMU_MMIO_OFF_HW_EVT_HI: uReg = pThis->HwEvtHi.u64; break;
1268 case IOMMU_MMIO_OFF_HW_EVT_LO: uReg = pThis->HwEvtLo; break;
1269 case IOMMU_MMIO_OFF_HW_EVT_STATUS: uReg = pThis->HwEvtStatus.u64; break;
1270
1271 case IOMMU_MMIO_OFF_GALOG_BAR: uReg = pThis->GALogBaseAddr.u64; break;
1272 case IOMMU_MMIO_OFF_GALOG_TAIL_ADDR: uReg = pThis->GALogTailAddr.u64; break;
1273
1274 case IOMMU_MMIO_OFF_PPR_LOG_B_BAR: uReg = pThis->PprLogBBaseAddr.u64; break;
1275 case IOMMU_MMIO_OFF_PPR_EVT_B_BAR: uReg = pThis->EvtLogBBaseAddr.u64; break;
1276
1277 case IOMMU_MMIO_OFF_DEV_TAB_SEG_1:
1278 case IOMMU_MMIO_OFF_DEV_TAB_SEG_2:
1279 case IOMMU_MMIO_OFF_DEV_TAB_SEG_3:
1280 case IOMMU_MMIO_OFF_DEV_TAB_SEG_4:
1281 case IOMMU_MMIO_OFF_DEV_TAB_SEG_5:
1282 case IOMMU_MMIO_OFF_DEV_TAB_SEG_6:
1283 case IOMMU_MMIO_OFF_DEV_TAB_SEG_7:
1284 {
1285 uint8_t const offDevTabSeg = (off - IOMMU_MMIO_OFF_DEV_TAB_SEG_FIRST) >> 3;
1286 uint8_t const idxDevTabSeg = offDevTabSeg + 1;
1287 Assert(idxDevTabSeg < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
1288 uReg = pThis->aDevTabBaseAddrs[idxDevTabSeg].u64;
1289 break;
1290 }
1291
1292 case IOMMU_MMIO_OFF_DEV_SPECIFIC_FEAT: uReg = pThis->DevSpecificFeat.u64; break;
1293 case IOMMU_MMIO_OFF_DEV_SPECIFIC_CTRL: uReg = pThis->DevSpecificCtrl.u64; break;
1294 case IOMMU_MMIO_OFF_DEV_SPECIFIC_STATUS: uReg = pThis->DevSpecificStatus.u64; break;
1295
1296 case IOMMU_MMIO_OFF_MSI_VECTOR_0: uReg = pThis->MiscInfo.u64; break;
1297 case IOMMU_MMIO_OFF_MSI_VECTOR_1: uReg = pThis->MiscInfo.au32[1]; break;
1298 case IOMMU_MMIO_OFF_MSI_CAP_HDR:
1299 {
1300 uint32_t const uMsiCapHdr = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
1301 uint32_t const uMsiAddrLo = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
1302 uReg = RT_MAKE_U64(uMsiCapHdr, uMsiAddrLo);
1303 break;
1304 }
1305 case IOMMU_MMIO_OFF_MSI_ADDR_LO:
1306 {
1307 uReg = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
1308 break;
1309 }
1310 case IOMMU_MMIO_OFF_MSI_ADDR_HI:
1311 {
1312 uint32_t const uMsiAddrHi = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI);
1313 uint32_t const uMsiData = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
1314 uReg = RT_MAKE_U64(uMsiAddrHi, uMsiData);
1315 break;
1316 }
1317 case IOMMU_MMIO_OFF_MSI_DATA:
1318 {
1319 uReg = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
1320 break;
1321 }
1322 case IOMMU_MMIO_OFF_MSI_MAPPING_CAP_HDR:
1323 {
1324 /*
1325 * The PCI spec. lists MSI Mapping Capability 08H as related to HyperTransport capability.
1326 * The AMD IOMMU spec. fails to mention it explicitly and lists values for this register as
1327 * though HyperTransport is supported. We don't support HyperTransport, we thus just return
1328 * 0 for this register.
1329 */
1330 uReg = RT_MAKE_U64(0, pThis->PerfOptCtrl.u32);
1331 break;
1332 }
1333
1334 case IOMMU_MMIO_OFF_PERF_OPT_CTRL: uReg = pThis->PerfOptCtrl.u32; break;
1335
1336 case IOMMU_MMIO_OFF_XT_GEN_INTR_CTRL: uReg = pThis->XtGenIntrCtrl.u64; break;
1337 case IOMMU_MMIO_OFF_XT_PPR_INTR_CTRL: uReg = pThis->XtPprIntrCtrl.u64; break;
1338 case IOMMU_MMIO_OFF_XT_GALOG_INT_CTRL: uReg = pThis->XtGALogIntrCtrl.u64; break;
1339
1340 case IOMMU_MMIO_OFF_MARC_APER_BAR_0: uReg = pThis->aMarcApers[0].Base.u64; break;
1341 case IOMMU_MMIO_OFF_MARC_APER_RELOC_0: uReg = pThis->aMarcApers[0].Reloc.u64; break;
1342 case IOMMU_MMIO_OFF_MARC_APER_LEN_0: uReg = pThis->aMarcApers[0].Length.u64; break;
1343 case IOMMU_MMIO_OFF_MARC_APER_BAR_1: uReg = pThis->aMarcApers[1].Base.u64; break;
1344 case IOMMU_MMIO_OFF_MARC_APER_RELOC_1: uReg = pThis->aMarcApers[1].Reloc.u64; break;
1345 case IOMMU_MMIO_OFF_MARC_APER_LEN_1: uReg = pThis->aMarcApers[1].Length.u64; break;
1346 case IOMMU_MMIO_OFF_MARC_APER_BAR_2: uReg = pThis->aMarcApers[2].Base.u64; break;
1347 case IOMMU_MMIO_OFF_MARC_APER_RELOC_2: uReg = pThis->aMarcApers[2].Reloc.u64; break;
1348 case IOMMU_MMIO_OFF_MARC_APER_LEN_2: uReg = pThis->aMarcApers[2].Length.u64; break;
1349 case IOMMU_MMIO_OFF_MARC_APER_BAR_3: uReg = pThis->aMarcApers[3].Base.u64; break;
1350 case IOMMU_MMIO_OFF_MARC_APER_RELOC_3: uReg = pThis->aMarcApers[3].Reloc.u64; break;
1351 case IOMMU_MMIO_OFF_MARC_APER_LEN_3: uReg = pThis->aMarcApers[3].Length.u64; break;
1352
1353 case IOMMU_MMIO_OFF_RSVD_REG: uReg = pThis->RsvdReg; break;
1354
1355 case IOMMU_MMIO_CMD_BUF_HEAD_PTR: uReg = pThis->CmdBufHeadPtr.u64; break;
1356 case IOMMU_MMIO_CMD_BUF_TAIL_PTR: uReg = pThis->CmdBufTailPtr.u64; break;
1357 case IOMMU_MMIO_EVT_LOG_HEAD_PTR: uReg = pThis->EvtLogHeadPtr.u64; break;
1358 case IOMMU_MMIO_EVT_LOG_TAIL_PTR: uReg = pThis->EvtLogTailPtr.u64; break;
1359
1360 case IOMMU_MMIO_OFF_STATUS: uReg = pThis->Status.u64; break;
1361
1362 case IOMMU_MMIO_OFF_PPR_LOG_HEAD_PTR: uReg = pThis->PprLogHeadPtr.u64; break;
1363 case IOMMU_MMIO_OFF_PPR_LOG_TAIL_PTR: uReg = pThis->PprLogTailPtr.u64; break;
1364
1365 case IOMMU_MMIO_OFF_GALOG_HEAD_PTR: uReg = pThis->GALogHeadPtr.u64; break;
1366 case IOMMU_MMIO_OFF_GALOG_TAIL_PTR: uReg = pThis->GALogTailPtr.u64; break;
1367
1368 case IOMMU_MMIO_OFF_PPR_LOG_B_HEAD_PTR: uReg = pThis->PprLogBHeadPtr.u64; break;
1369 case IOMMU_MMIO_OFF_PPR_LOG_B_TAIL_PTR: uReg = pThis->PprLogBTailPtr.u64; break;
1370
1371 case IOMMU_MMIO_OFF_EVT_LOG_B_HEAD_PTR: uReg = pThis->EvtLogBHeadPtr.u64; break;
1372 case IOMMU_MMIO_OFF_EVT_LOG_B_TAIL_PTR: uReg = pThis->EvtLogBTailPtr.u64; break;
1373
1374 case IOMMU_MMIO_OFF_PPR_LOG_AUTO_RESP: uReg = pThis->PprLogAutoResp.u64; break;
1375 case IOMMU_MMIO_OFF_PPR_LOG_OVERFLOW_EARLY: uReg = pThis->PprLogOverflowEarly.u64; break;
1376 case IOMMU_MMIO_OFF_PPR_LOG_B_OVERFLOW_EARLY: uReg = pThis->PprLogBOverflowEarly.u64; break;
1377
1378 /* Not implemented. */
1379 case IOMMU_MMIO_OFF_SMI_FLT_FIRST:
1380 case IOMMU_MMIO_OFF_SMI_FLT_LAST:
1381 {
1382 LogFunc(("Reading unsupported register: SMI filter %u\n", (off - IOMMU_MMIO_OFF_SMI_FLT_FIRST) >> 3));
1383 uReg = 0;
1384 break;
1385 }
1386
1387 /* Unknown. */
1388 default:
1389 {
1390 LogFunc(("Reading unknown register %u (%#x) -> 0\n", off, off));
1391 uReg = 0;
1392 return VINF_IOM_MMIO_UNUSED_00;
1393 }
1394 }
1395
1396 *puResult = uReg;
1397 return VINF_SUCCESS;
1398}
1399
1400
1401/**
1402 * Raises the MSI interrupt for the IOMMU device.
1403 *
1404 * @param pDevIns The IOMMU device instance.
1405 *
1406 * @thread Any.
1407 * @remarks The IOMMU lock may or may not be held.
1408 */
1409static void iommuAmdRaiseMsiInterrupt(PPDMDEVINS pDevIns)
1410{
1411 if (iommuAmdIsMsiEnabled(pDevIns))
1412 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1413}
1414
1415
1416/**
1417 * Clears the MSI interrupt for the IOMMU device.
1418 *
1419 * @param pDevIns The IOMMU device instance.
1420 *
1421 * @thread Any.
1422 * @remarks The IOMMU lock may or may not be held.
1423 */
1424static void iommuAmdClearMsiInterrupt(PPDMDEVINS pDevIns)
1425{
1426 if (iommuAmdIsMsiEnabled(pDevIns))
1427 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1428}
1429
1430
1431/**
1432 * Writes an entry to the event log in memory.
1433 *
1434 * @returns VBox status code.
1435 * @param pDevIns The IOMMU device instance.
1436 * @param pEvent The event to log.
1437 *
1438 * @thread Any.
1439 */
1440static int iommuAmdWriteEvtLogEntry(PPDMDEVINS pDevIns, PCEVT_GENERIC_T pEvent)
1441{
1442 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1443
1444 IOMMU_ASSERT_LOCKED(pDevIns);
1445
1446 /* Check if event logging is active and the log has not overflowed. */
1447 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
1448 if ( Status.n.u1EvtLogRunning
1449 && !Status.n.u1EvtOverflow)
1450 {
1451 uint32_t const cbEvt = sizeof(*pEvent);
1452
1453 /* Get the offset we need to write the event to in memory (circular buffer offset). */
1454 uint32_t const offEvt = pThis->EvtLogTailPtr.n.off;
1455 Assert(!(offEvt & ~IOMMU_EVT_LOG_TAIL_PTR_VALID_MASK));
1456
1457 /* Ensure we have space in the event log. */
1458 uint32_t const cMaxEvts = iommuAmdGetBufMaxEntries(pThis->EvtLogBaseAddr.n.u4Len);
1459 uint32_t const cEvts = iommuAmdGetEvtLogEntryCount(pThis);
1460 if (cEvts + 1 < cMaxEvts)
1461 {
1462 /* Write the event log entry to memory. */
1463 RTGCPHYS const GCPhysEvtLog = pThis->EvtLogBaseAddr.n.u40Base << X86_PAGE_4K_SHIFT;
1464 RTGCPHYS const GCPhysEvtLogEntry = GCPhysEvtLog + offEvt;
1465 int rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhysEvtLogEntry, pEvent, cbEvt);
1466 if (RT_FAILURE(rc))
1467 LogFunc(("Failed to write event log entry at %#RGp. rc=%Rrc\n", GCPhysEvtLogEntry, rc));
1468
1469 /* Increment the event log tail pointer. */
1470 uint32_t const cbEvtLog = iommuAmdGetTotalBufLength(pThis->EvtLogBaseAddr.n.u4Len);
1471 pThis->EvtLogTailPtr.n.off = (offEvt + cbEvt) % cbEvtLog;
1472
1473 /* Indicate that an event log entry was written. */
1474 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_INTR);
1475
1476 /* Check and signal an interrupt if software wants to receive one when an event log entry is written. */
1477 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
1478 if (Ctrl.n.u1EvtIntrEn)
1479 iommuAmdRaiseMsiInterrupt(pDevIns);
1480 }
1481 else
1482 {
1483 /* Indicate that the event log has overflowed. */
1484 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_EVT_LOG_OVERFLOW);
1485
1486 /* Check and signal an interrupt if software wants to receive one when the event log has overflowed. */
1487 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
1488 if (Ctrl.n.u1EvtIntrEn)
1489 iommuAmdRaiseMsiInterrupt(pDevIns);
1490 }
1491 }
1492
1493 return VINF_SUCCESS;
1494}
1495
1496
1497/**
1498 * Sets an event in the hardware error registers.
1499 *
1500 * @param pDevIns The IOMMU device instance.
1501 * @param pEvent The event.
1502 *
1503 * @thread Any.
1504 */
1505static void iommuAmdSetHwError(PPDMDEVINS pDevIns, PCEVT_GENERIC_T pEvent)
1506{
1507 IOMMU_ASSERT_LOCKED(pDevIns);
1508
1509 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1510 if (pThis->ExtFeat.n.u1HwErrorSup)
1511 {
1512 if (pThis->HwEvtStatus.n.u1Valid)
1513 pThis->HwEvtStatus.n.u1Overflow = 1;
1514 pThis->HwEvtStatus.n.u1Valid = 1;
1515 pThis->HwEvtHi.u64 = RT_MAKE_U64(pEvent->au32[0], pEvent->au32[1]);
1516 pThis->HwEvtLo = RT_MAKE_U64(pEvent->au32[2], pEvent->au32[3]);
1517 Assert( pThis->HwEvtHi.n.u4EvtCode == IOMMU_EVT_DEV_TAB_HW_ERROR
1518 || pThis->HwEvtHi.n.u4EvtCode == IOMMU_EVT_PAGE_TAB_HW_ERROR
1519 || pThis->HwEvtHi.n.u4EvtCode == IOMMU_EVT_COMMAND_HW_ERROR);
1520 }
1521}
1522
1523
1524/**
1525 * Initializes a PAGE_TAB_HARDWARE_ERROR event.
1526 *
1527 * @param uDevId The device ID.
1528 * @param uDomainId The domain ID.
1529 * @param GCPhysPtEntity The system physical address of the page table
1530 * entity.
1531 * @param enmOp The IOMMU operation being performed.
1532 * @param pEvtPageTabHwErr Where to store the initialized event.
1533 */
1534static void iommuAmdInitPageTabHwErrorEvent(uint16_t uDevId, uint16_t uDomainId, RTGCPHYS GCPhysPtEntity, IOMMUOP enmOp,
1535 PEVT_PAGE_TAB_HW_ERR_T pEvtPageTabHwErr)
1536{
1537 memset(pEvtPageTabHwErr, 0, sizeof(*pEvtPageTabHwErr));
1538 pEvtPageTabHwErr->n.u16DevId = uDevId;
1539 pEvtPageTabHwErr->n.u16DomainOrPasidLo = uDomainId;
1540 pEvtPageTabHwErr->n.u1GuestOrNested = 0;
1541 pEvtPageTabHwErr->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1542 pEvtPageTabHwErr->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1543 pEvtPageTabHwErr->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1544 pEvtPageTabHwErr->n.u2Type = enmOp == IOMMUOP_CMD ? HWEVTTYPE_DATA_ERROR : HWEVTTYPE_TARGET_ABORT;
1545 pEvtPageTabHwErr->n.u4EvtCode = IOMMU_EVT_PAGE_TAB_HW_ERROR;
1546 pEvtPageTabHwErr->n.u64Addr = GCPhysPtEntity;
1547}
1548
1549
1550/**
1551 * Raises a PAGE_TAB_HARDWARE_ERROR event.
1552 *
1553 * @param pDevIns The IOMMU device instance.
1554 * @param enmOp The IOMMU operation being performed.
1555 * @param pEvtPageTabHwErr The page table hardware error event.
1556 *
1557 * @thread Any.
1558 */
1559static void iommuAmdRaisePageTabHwErrorEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PEVT_PAGE_TAB_HW_ERR_T pEvtPageTabHwErr)
1560{
1561 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_PAGE_TAB_HW_ERR_T));
1562 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtPageTabHwErr;
1563
1564 IOMMU_LOCK_NORET(pDevIns);
1565
1566 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
1567 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
1568 if (enmOp != IOMMUOP_CMD)
1569 iommuAmdSetPciTargetAbort(pDevIns);
1570
1571 IOMMU_UNLOCK(pDevIns);
1572
1573 LogFunc(("Raised PAGE_TAB_HARDWARE_ERROR. uDevId=%#x uDomainId=%#x GCPhysPtEntity=%#RGp enmOp=%u u2Type=%u\n",
1574 pEvtPageTabHwErr->n.u16DevId, pEvtPageTabHwErr->n.u16DomainOrPasidLo, pEvtPageTabHwErr->n.u64Addr, enmOp,
1575 pEvtPageTabHwErr->n.u2Type));
1576}
1577
1578
1579/**
1580 * Initializes a COMMAND_HARDWARE_ERROR event.
1581 *
1582 * @param GCPhysAddr The system physical address the IOMMU attempted to access.
1583 * @param pEvtCmdHwErr Where to store the initialized event.
1584 */
1585static void iommuAmdInitCmdHwErrorEvent(RTGCPHYS GCPhysAddr, PEVT_CMD_HW_ERR_T pEvtCmdHwErr)
1586{
1587 memset(pEvtCmdHwErr, 0, sizeof(*pEvtCmdHwErr));
1588 pEvtCmdHwErr->n.u2Type = HWEVTTYPE_DATA_ERROR;
1589 pEvtCmdHwErr->n.u4EvtCode = IOMMU_EVT_COMMAND_HW_ERROR;
1590 pEvtCmdHwErr->n.u64Addr = GCPhysAddr;
1591}
1592
1593
1594/**
1595 * Raises a COMMAND_HARDWARE_ERROR event.
1596 *
1597 * @param pDevIns The IOMMU device instance.
1598 * @param pEvtCmdHwErr The command hardware error event.
1599 *
1600 * @thread Any.
1601 */
1602static void iommuAmdRaiseCmdHwErrorEvent(PPDMDEVINS pDevIns, PCEVT_CMD_HW_ERR_T pEvtCmdHwErr)
1603{
1604 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_CMD_HW_ERR_T));
1605 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtCmdHwErr;
1606 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1607
1608 IOMMU_LOCK_NORET(pDevIns);
1609
1610 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
1611 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
1612 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING);
1613
1614 IOMMU_UNLOCK(pDevIns);
1615
1616 LogFunc(("Raised COMMAND_HARDWARE_ERROR. GCPhysCmd=%#RGp u2Type=%u\n", pEvtCmdHwErr->n.u64Addr, pEvtCmdHwErr->n.u2Type));
1617}
1618
1619
1620/**
1621 * Initializes a DEV_TAB_HARDWARE_ERROR event.
1622 *
1623 * @param uDevId The device ID.
1624 * @param GCPhysDte The system physical address of the failed device table
1625 * access.
1626 * @param enmOp The IOMMU operation being performed.
1627 * @param pEvtDevTabHwErr Where to store the initialized event.
1628 */
1629static void iommuAmdInitDevTabHwErrorEvent(uint16_t uDevId, RTGCPHYS GCPhysDte, IOMMUOP enmOp,
1630 PEVT_DEV_TAB_HW_ERROR_T pEvtDevTabHwErr)
1631{
1632 memset(pEvtDevTabHwErr, 0, sizeof(*pEvtDevTabHwErr));
1633 pEvtDevTabHwErr->n.u16DevId = uDevId;
1634 pEvtDevTabHwErr->n.u1Intr = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1635 /** @todo IOMMU: Any other transaction type that can set read/write bit? */
1636 pEvtDevTabHwErr->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1637 pEvtDevTabHwErr->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1638 pEvtDevTabHwErr->n.u2Type = enmOp == IOMMUOP_CMD ? HWEVTTYPE_DATA_ERROR : HWEVTTYPE_TARGET_ABORT;
1639 pEvtDevTabHwErr->n.u4EvtCode = IOMMU_EVT_DEV_TAB_HW_ERROR;
1640 pEvtDevTabHwErr->n.u64Addr = GCPhysDte;
1641}
1642
1643
1644/**
1645 * Raises a DEV_TAB_HARDWARE_ERROR event.
1646 *
1647 * @param pDevIns The IOMMU device instance.
1648 * @param enmOp The IOMMU operation being performed.
1649 * @param pEvtDevTabHwErr The device table hardware error event.
1650 *
1651 * @thread Any.
1652 */
1653static void iommuAmdRaiseDevTabHwErrorEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PEVT_DEV_TAB_HW_ERROR_T pEvtDevTabHwErr)
1654{
1655 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_DEV_TAB_HW_ERROR_T));
1656 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtDevTabHwErr;
1657
1658 IOMMU_LOCK_NORET(pDevIns);
1659
1660 iommuAmdSetHwError(pDevIns, (PCEVT_GENERIC_T)pEvent);
1661 iommuAmdWriteEvtLogEntry(pDevIns, (PCEVT_GENERIC_T)pEvent);
1662 if (enmOp != IOMMUOP_CMD)
1663 iommuAmdSetPciTargetAbort(pDevIns);
1664
1665 IOMMU_UNLOCK(pDevIns);
1666
1667 LogFunc(("Raised DEV_TAB_HARDWARE_ERROR. uDevId=%#x GCPhysDte=%#RGp enmOp=%u u2Type=%u\n", pEvtDevTabHwErr->n.u16DevId,
1668 pEvtDevTabHwErr->n.u64Addr, enmOp, pEvtDevTabHwErr->n.u2Type));
1669}
1670
1671
1672/**
1673 * Initializes an ILLEGAL_COMMAND_ERROR event.
1674 *
1675 * @param GCPhysCmd The system physical address of the failed command
1676 * access.
1677 * @param pEvtIllegalCmd Where to store the initialized event.
1678 */
1679static void iommuAmdInitIllegalCmdEvent(RTGCPHYS GCPhysCmd, PEVT_ILLEGAL_CMD_ERR_T pEvtIllegalCmd)
1680{
1681 Assert(!(GCPhysCmd & UINT64_C(0xf)));
1682 memset(pEvtIllegalCmd, 0, sizeof(*pEvtIllegalCmd));
1683 pEvtIllegalCmd->n.u4EvtCode = IOMMU_EVT_ILLEGAL_CMD_ERROR;
1684 pEvtIllegalCmd->n.u64Addr = GCPhysCmd;
1685}
1686
1687
1688/**
1689 * Raises an ILLEGAL_COMMAND_ERROR event.
1690 *
1691 * @param pDevIns The IOMMU device instance.
1692 * @param pEvtIllegalCmd The illegal command error event.
1693 */
1694static void iommuAmdRaiseIllegalCmdEvent(PPDMDEVINS pDevIns, PCEVT_ILLEGAL_CMD_ERR_T pEvtIllegalCmd)
1695{
1696 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_ILLEGAL_DTE_T));
1697 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIllegalCmd;
1698 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1699
1700 IOMMU_LOCK_NORET(pDevIns);
1701
1702 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1703 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING);
1704
1705 IOMMU_UNLOCK(pDevIns);
1706
1707 LogFunc(("Raised ILLEGAL_COMMAND_ERROR. Addr=%#RGp\n", pEvtIllegalCmd->n.u64Addr));
1708}
1709
1710
1711/**
1712 * Initializes an ILLEGAL_DEV_TABLE_ENTRY event.
1713 *
1714 * @param uDevId The device ID.
1715 * @param uIova The I/O virtual address.
1716 * @param fRsvdNotZero Whether reserved bits are not zero. Pass @c false if the
1717 * event was caused by an invalid level encoding in the
1718 * DTE.
1719 * @param enmOp The IOMMU operation being performed.
1720 * @param pEvtIllegalDte Where to store the initialized event.
1721 */
1722static void iommuAmdInitIllegalDteEvent(uint16_t uDevId, uint64_t uIova, bool fRsvdNotZero, IOMMUOP enmOp,
1723 PEVT_ILLEGAL_DTE_T pEvtIllegalDte)
1724{
1725 memset(pEvtIllegalDte, 0, sizeof(*pEvtIllegalDte));
1726 pEvtIllegalDte->n.u16DevId = uDevId;
1727 pEvtIllegalDte->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1728 pEvtIllegalDte->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1729 pEvtIllegalDte->n.u1RsvdNotZero = fRsvdNotZero;
1730 pEvtIllegalDte->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1731 pEvtIllegalDte->n.u4EvtCode = IOMMU_EVT_ILLEGAL_DEV_TAB_ENTRY;
1732 pEvtIllegalDte->n.u64Addr = uIova & ~UINT64_C(0x3);
1733 /** @todo r=ramshankar: Not sure why the last 2 bits are marked as reserved by the
1734 * IOMMU spec here but not for this field for I/O page fault event. */
1735 Assert(!(uIova & UINT64_C(0x3)));
1736}
1737
1738
1739/**
1740 * Raises an ILLEGAL_DEV_TABLE_ENTRY event.
1741 *
1742 * @param pDevIns The IOMMU instance data.
1743 * @param enmOp The IOMMU operation being performed.
1744 * @param pEvtIllegalDte The illegal device table entry event.
1745 * @param enmEvtType The illegal device table entry event type.
1746 *
1747 * @thread Any.
1748 */
1749static void iommuAmdRaiseIllegalDteEvent(PPDMDEVINS pDevIns, IOMMUOP enmOp, PCEVT_ILLEGAL_DTE_T pEvtIllegalDte,
1750 EVT_ILLEGAL_DTE_TYPE_T enmEvtType)
1751{
1752 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_ILLEGAL_DTE_T));
1753 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIllegalDte;
1754
1755 IOMMU_LOCK_NORET(pDevIns);
1756
1757 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1758 if (enmOp != IOMMUOP_CMD)
1759 iommuAmdSetPciTargetAbort(pDevIns);
1760
1761 IOMMU_UNLOCK(pDevIns);
1762
1763 LogFunc(("Raised ILLEGAL_DTE_EVENT. uDevId=%#x uIova=%#RX64 enmOp=%u enmEvtType=%u\n", pEvtIllegalDte->n.u16DevId,
1764 pEvtIllegalDte->n.u64Addr, enmOp, enmEvtType));
1765 NOREF(enmEvtType);
1766}
1767
1768
1769/**
1770 * Initializes an IO_PAGE_FAULT event.
1771 *
1772 * @param uDevId The device ID.
1773 * @param uDomainId The domain ID.
1774 * @param uIova The I/O virtual address being accessed.
1775 * @param fPresent Transaction to a page marked as present (including
1776 * DTE.V=1) or interrupt marked as remapped
1777 * (IRTE.RemapEn=1).
1778 * @param fRsvdNotZero Whether reserved bits are not zero. Pass @c false if
1779 * the I/O page fault was caused by invalid level
1780 * encoding.
1781 * @param fPermDenied Permission denied for the address being accessed.
1782 * @param enmOp The IOMMU operation being performed.
1783 * @param pEvtIoPageFault Where to store the initialized event.
1784 */
1785static void iommuAmdInitIoPageFaultEvent(uint16_t uDevId, uint16_t uDomainId, uint64_t uIova, bool fPresent, bool fRsvdNotZero,
1786 bool fPermDenied, IOMMUOP enmOp, PEVT_IO_PAGE_FAULT_T pEvtIoPageFault)
1787{
1788 Assert(!fPermDenied || fPresent);
1789 memset(pEvtIoPageFault, 0, sizeof(*pEvtIoPageFault));
1790 pEvtIoPageFault->n.u16DevId = uDevId;
1791 //pEvtIoPageFault->n.u4PasidHi = 0;
1792 pEvtIoPageFault->n.u16DomainOrPasidLo = uDomainId;
1793 //pEvtIoPageFault->n.u1GuestOrNested = 0;
1794 //pEvtIoPageFault->n.u1NoExecute = 0;
1795 //pEvtIoPageFault->n.u1User = 0;
1796 pEvtIoPageFault->n.u1Interrupt = RT_BOOL(enmOp == IOMMUOP_INTR_REQ);
1797 pEvtIoPageFault->n.u1Present = fPresent;
1798 pEvtIoPageFault->n.u1ReadWrite = RT_BOOL(enmOp == IOMMUOP_MEM_WRITE);
1799 pEvtIoPageFault->n.u1PermDenied = fPermDenied;
1800 pEvtIoPageFault->n.u1RsvdNotZero = fRsvdNotZero;
1801 pEvtIoPageFault->n.u1Translation = RT_BOOL(enmOp == IOMMUOP_TRANSLATE_REQ);
1802 pEvtIoPageFault->n.u4EvtCode = IOMMU_EVT_IO_PAGE_FAULT;
1803 pEvtIoPageFault->n.u64Addr = uIova;
1804}
1805
1806
1807/**
1808 * Raises an IO_PAGE_FAULT event.
1809 *
1810 * @param pDevIns The IOMMU instance data.
1811 * @param pDte The device table entry. Optional, can be NULL
1812 * depending on @a enmOp.
1813 * @param pIrte The interrupt remapping table entry. Optional, can
1814 * be NULL depending on @a enmOp.
1815 * @param enmOp The IOMMU operation being performed.
1816 * @param pEvtIoPageFault The I/O page fault event.
1817 * @param enmEvtType The I/O page fault event type.
1818 *
1819 * @thread Any.
1820 */
1821static void iommuAmdRaiseIoPageFaultEvent(PPDMDEVINS pDevIns, PCDTE_T pDte, PCIRTE_T pIrte, IOMMUOP enmOp,
1822 PCEVT_IO_PAGE_FAULT_T pEvtIoPageFault, EVT_IO_PAGE_FAULT_TYPE_T enmEvtType)
1823{
1824 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_IO_PAGE_FAULT_T));
1825 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIoPageFault;
1826
1827 IOMMU_LOCK_NORET(pDevIns);
1828
1829 bool fSuppressEvtLogging = false;
1830 if ( enmOp == IOMMUOP_MEM_READ
1831 || enmOp == IOMMUOP_MEM_WRITE)
1832 {
1833 if ( pDte
1834 && pDte->n.u1Valid)
1835 {
1836 fSuppressEvtLogging = pDte->n.u1SuppressAllPfEvents;
1837 /** @todo IOMMU: Implement DTE.SE bit, i.e. device ID specific I/O page fault
1838 * suppression. Perhaps will be possible when we complete IOTLB/cache
1839 * handling. */
1840 }
1841 }
1842 else if (enmOp == IOMMUOP_INTR_REQ)
1843 {
1844 if ( pDte
1845 && pDte->n.u1IntrMapValid)
1846 fSuppressEvtLogging = !pDte->n.u1IgnoreUnmappedIntrs;
1847
1848 if ( !fSuppressEvtLogging
1849 && pIrte)
1850 fSuppressEvtLogging = pIrte->n.u1SuppressPf;
1851 }
1852 /* else: Events are never suppressed for commands. */
1853
1854 switch (enmEvtType)
1855 {
1856 case kIoPageFaultType_PermDenied:
1857 {
1858 /* Cannot be triggered by a command. */
1859 Assert(enmOp != IOMMUOP_CMD);
1860 RT_FALL_THRU();
1861 }
1862 case kIoPageFaultType_DteRsvdPagingMode:
1863 case kIoPageFaultType_PteInvalidPageSize:
1864 case kIoPageFaultType_PteInvalidLvlEncoding:
1865 case kIoPageFaultType_SkippedLevelIovaNotZero:
1866 case kIoPageFaultType_PteRsvdNotZero:
1867 case kIoPageFaultType_PteValidNotSet:
1868 case kIoPageFaultType_DteTranslationDisabled:
1869 case kIoPageFaultType_PasidInvalidRange:
1870 {
1871 /*
1872 * For a translation request, the IOMMU doesn't signal an I/O page fault nor does it
1873 * create an event log entry. See AMD spec. 2.1.3.2 "I/O Page Faults".
1874 */
1875 if (enmOp != IOMMUOP_TRANSLATE_REQ)
1876 {
1877 if (!fSuppressEvtLogging)
1878 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1879 if (enmOp != IOMMUOP_CMD)
1880 iommuAmdSetPciTargetAbort(pDevIns);
1881 }
1882 break;
1883 }
1884
1885 case kIoPageFaultType_UserSupervisor:
1886 {
1887 /* Access is blocked and only creates an event log entry. */
1888 if (!fSuppressEvtLogging)
1889 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1890 break;
1891 }
1892
1893 case kIoPageFaultType_IrteAddrInvalid:
1894 case kIoPageFaultType_IrteRsvdNotZero:
1895 case kIoPageFaultType_IrteRemapEn:
1896 case kIoPageFaultType_IrteRsvdIntType:
1897 case kIoPageFaultType_IntrReqAborted:
1898 case kIoPageFaultType_IntrWithPasid:
1899 {
1900 /* Only trigerred by interrupt requests. */
1901 Assert(enmOp == IOMMUOP_INTR_REQ);
1902 if (!fSuppressEvtLogging)
1903 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1904 iommuAmdSetPciTargetAbort(pDevIns);
1905 break;
1906 }
1907
1908 case kIoPageFaultType_SmiFilterMismatch:
1909 {
1910 /* Not supported and probably will never be, assert. */
1911 AssertMsgFailed(("kIoPageFaultType_SmiFilterMismatch - Upstream SMI requests not supported/implemented."));
1912 break;
1913 }
1914
1915 case kIoPageFaultType_DevId_Invalid:
1916 {
1917 /* Cannot be triggered by a command. */
1918 Assert(enmOp != IOMMUOP_CMD);
1919 Assert(enmOp != IOMMUOP_TRANSLATE_REQ); /** @todo IOMMU: We don't support translation requests yet. */
1920 if (!fSuppressEvtLogging)
1921 iommuAmdWriteEvtLogEntry(pDevIns, pEvent);
1922 if ( enmOp == IOMMUOP_MEM_READ
1923 || enmOp == IOMMUOP_MEM_WRITE)
1924 iommuAmdSetPciTargetAbort(pDevIns);
1925 break;
1926 }
1927 }
1928
1929 IOMMU_UNLOCK(pDevIns);
1930}
1931
1932
1933/**
1934 * Returns whether the I/O virtual address is to be excluded from translation and
1935 * permission checks.
1936 *
1937 * @returns @c true if the DVA is excluded, @c false otherwise.
1938 * @param pThis The IOMMU device state.
1939 * @param pDte The device table entry.
1940 * @param uIova The I/O virtual address.
1941 *
1942 * @remarks Ensure the exclusion range is enabled prior to calling this function.
1943 *
1944 * @thread Any.
1945 */
1946static bool iommuAmdIsDvaInExclRange(PCIOMMU pThis, PCDTE_T pDte, uint64_t uIova)
1947{
1948 /* Ensure the exclusion range is enabled. */
1949 Assert(pThis->ExclRangeBaseAddr.n.u1ExclEnable);
1950
1951 /* Check if the IOVA falls within the exclusion range. */
1952 uint64_t const uIovaExclFirst = pThis->ExclRangeBaseAddr.n.u40ExclRangeBase << X86_PAGE_4K_SHIFT;
1953 uint64_t const uIovaExclLast = pThis->ExclRangeLimit.n.u52ExclLimit;
1954 if (uIovaExclLast - uIova >= uIovaExclFirst)
1955 {
1956 /* Check if device access to addresses in the exclusion range can be forwarded untranslated. */
1957 if ( pThis->ExclRangeBaseAddr.n.u1AllowAll
1958 || pDte->n.u1AllowExclusion)
1959 return true;
1960 }
1961 return false;
1962}
1963
1964
1965/**
1966 * Reads a device table entry from guest memory given the device ID.
1967 *
1968 * @returns VBox status code.
1969 * @param pDevIns The IOMMU device instance.
1970 * @param uDevId The device ID.
1971 * @param enmOp The IOMMU operation being performed.
1972 * @param pDte Where to store the device table entry.
1973 *
1974 * @thread Any.
1975 */
1976static int iommuAmdReadDte(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PDTE_T pDte)
1977{
1978 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
1979 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
1980
1981 uint8_t const idxSegsEn = Ctrl.n.u3DevTabSegEn;
1982 Assert(idxSegsEn < RT_ELEMENTS(g_auDevTabSegShifts));
1983 Assert(idxSegsEn < RT_ELEMENTS(g_auDevTabSegMasks));
1984
1985 uint8_t const idxSeg = (uDevId & g_auDevTabSegMasks[idxSegsEn]) >> g_auDevTabSegShifts[idxSegsEn];
1986 Assert(idxSeg < RT_ELEMENTS(pThis->aDevTabBaseAddrs));
1987
1988 RTGCPHYS const GCPhysDevTab = pThis->aDevTabBaseAddrs[idxSeg].n.u40Base << X86_PAGE_4K_SHIFT;
1989 uint16_t const offDte = (uDevId & ~g_auDevTabSegMasks[idxSegsEn]) * sizeof(DTE_T);
1990 RTGCPHYS const GCPhysDte = GCPhysDevTab + offDte;
1991
1992 LogFlowFunc(("idxSegsEn=%#x GCPhysDevTab=%#RGp offDte=%#x GCPhysDte=%#RGp\n", idxSegsEn, GCPhysDevTab, offDte, GCPhysDte));
1993
1994 Assert(!(GCPhysDevTab & X86_PAGE_4K_OFFSET_MASK));
1995 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysDte, pDte, sizeof(*pDte));
1996 if (RT_FAILURE(rc))
1997 {
1998 LogFunc(("Failed to read device table entry at %#RGp. rc=%Rrc -> DevTabHwError\n", GCPhysDte, rc));
1999
2000 EVT_DEV_TAB_HW_ERROR_T EvtDevTabHwErr;
2001 iommuAmdInitDevTabHwErrorEvent(uDevId, GCPhysDte, enmOp, &EvtDevTabHwErr);
2002 iommuAmdRaiseDevTabHwErrorEvent(pDevIns, enmOp, &EvtDevTabHwErr);
2003 return VERR_IOMMU_IPE_1;
2004 }
2005
2006
2007 return rc;
2008}
2009
2010
2011/**
2012 * Walks the I/O page table to translate the I/O virtual address to a system
2013 * physical address.
2014 *
2015 * @returns VBox status code.
2016 * @param pDevIns The IOMMU device instance.
2017 * @param uIova The I/O virtual address to translate. Must be 4K aligned.
2018 * @param uDevId The device ID.
2019 * @param fAccess The access permissions (IOMMU_IO_PERM_XXX). This is the
2020 * permissions for the access being made.
2021 * @param pDte The device table entry.
2022 * @param enmOp The IOMMU operation being performed.
2023 * @param pWalkResult Where to store the results of the I/O page walk. This is
2024 * only updated when VINF_SUCCESS is returned.
2025 *
2026 * @thread Any.
2027 */
2028static int iommuAmdWalkIoPageTable(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, uint8_t fAccess, PCDTE_T pDte,
2029 IOMMUOP enmOp, PIOWALKRESULT pWalkResult)
2030{
2031 Assert(pDte->n.u1Valid);
2032 Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK));
2033
2034 /* If the translation is not valid, raise an I/O page fault. */
2035 if (pDte->n.u1TranslationValid)
2036 { /* likely */ }
2037 else
2038 {
2039 /** @todo r=ramshankar: The AMD IOMMU spec. says page walk is terminated but
2040 * doesn't explicitly say whether an I/O page fault is raised. From other
2041 * places in the spec. it seems early page walk terminations (starting with
2042 * the DTE) return the state computed so far and raises an I/O page fault. So
2043 * returning an invalid translation rather than skipping translation. */
2044 LogFunc(("Translation valid bit not set -> IOPF"));
2045 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2046 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, false /* fPresent */, false /* fRsvdNotZero */,
2047 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2048 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2049 kIoPageFaultType_DteTranslationDisabled);
2050 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2051 }
2052
2053 /* If the root page table level is 0, translation is skipped and access is controlled by the permission bits. */
2054 uint8_t const uMaxLevel = pDte->n.u3Mode;
2055 if (uMaxLevel != 0)
2056 { /* likely */ }
2057 else
2058 {
2059 uint8_t const fDtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
2060 if ((fAccess & fDtePerm) != fAccess)
2061 {
2062 LogFunc(("Access denied for IOVA (%#RX64). fAccess=%#x fDtePerm=%#x\n", uIova, fAccess, fDtePerm));
2063 return VERR_IOMMU_ADDR_ACCESS_DENIED;
2064 }
2065 pWalkResult->GCPhysSpa = uIova;
2066 pWalkResult->cShift = 0;
2067 pWalkResult->fIoPerm = fDtePerm;
2068 return VINF_SUCCESS;
2069 }
2070
2071 /* If the root page table level exceeds the allowed host-address translation level, page walk is terminated. */
2072 if (uMaxLevel <= IOMMU_MAX_HOST_PT_LEVEL)
2073 { /* likely */ }
2074 else
2075 {
2076 /** @todo r=ramshankar: I cannot make out from the AMD IOMMU spec. if I should be
2077 * raising an ILLEGAL_DEV_TABLE_ENTRY event or an IO_PAGE_FAULT event here.
2078 * I'm just going with I/O page fault. */
2079 LogFunc(("Invalid root page table level %#x -> IOPF\n", uMaxLevel));
2080 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2081 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2082 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2083 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2084 kIoPageFaultType_PteInvalidLvlEncoding);
2085 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2086 }
2087
2088 /* Check permissions bits of the root page table. */
2089 uint8_t const fRootPtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
2090 if ((fAccess & fRootPtePerm) == fAccess)
2091 { /* likely */ }
2092 else
2093 {
2094 LogFunc(("Permission denied (fAccess=%#x fRootPtePerm=%#x) -> IOPF\n", fAccess, fRootPtePerm));
2095 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2096 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2097 true /* fPermDenied */, enmOp, &EvtIoPageFault);
2098 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
2099 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2100 }
2101
2102 /** @todo r=ramshankar: IOMMU: Consider splitting the rest of this into a separate
2103 * function called iommuAmdWalkIoPageDirectory() and call it for multi-page
2104 * accesses from the 2nd page. We can avoid re-checking the DTE root-page
2105 * table entry every time. Not sure if it's worth optimizing that case now
2106 * or if at all. */
2107
2108 /* The virtual address bits indexing table. */
2109 static uint8_t const s_acIovaLevelShifts[] = { 0, 12, 21, 30, 39, 48, 57, 0 };
2110 static uint64_t const s_auIovaLevelMasks[] = { UINT64_C(0x0000000000000000),
2111 UINT64_C(0x00000000001ff000),
2112 UINT64_C(0x000000003fe00000),
2113 UINT64_C(0x0000007fc0000000),
2114 UINT64_C(0x0000ff8000000000),
2115 UINT64_C(0x01ff000000000000),
2116 UINT64_C(0xfe00000000000000),
2117 UINT64_C(0x0000000000000000) };
2118 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) == RT_ELEMENTS(s_auIovaLevelMasks));
2119 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) > IOMMU_MAX_HOST_PT_LEVEL);
2120
2121 /* Traverse the I/O page table starting with the page directory in the DTE. */
2122 IOPTENTITY_T PtEntity;
2123 PtEntity.u64 = pDte->au64[0];
2124 for (;;)
2125 {
2126 /* Figure out the system physical address of the page table at the current level. */
2127 uint8_t const uLevel = PtEntity.n.u3NextLevel;
2128
2129 /* Read the page table entity at the current level. */
2130 {
2131 Assert(uLevel > 0 && uLevel < RT_ELEMENTS(s_acIovaLevelShifts));
2132 Assert(uLevel <= IOMMU_MAX_HOST_PT_LEVEL);
2133 uint16_t const idxPte = (uIova >> s_acIovaLevelShifts[uLevel]) & UINT64_C(0x1ff);
2134 uint64_t const offPte = idxPte << 3;
2135 RTGCPHYS const GCPhysPtEntity = (PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK) + offPte;
2136 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysPtEntity, &PtEntity.u64, sizeof(PtEntity));
2137 if (RT_FAILURE(rc))
2138 {
2139 LogFunc(("Failed to read page table entry at %#RGp. rc=%Rrc -> PageTabHwError\n", GCPhysPtEntity, rc));
2140 EVT_PAGE_TAB_HW_ERR_T EvtPageTabHwErr;
2141 iommuAmdInitPageTabHwErrorEvent(uDevId, pDte->n.u16DomainId, GCPhysPtEntity, enmOp, &EvtPageTabHwErr);
2142 iommuAmdRaisePageTabHwErrorEvent(pDevIns, enmOp, &EvtPageTabHwErr);
2143 return VERR_IOMMU_IPE_2;
2144 }
2145 }
2146
2147 /* Check present bit. */
2148 if (PtEntity.n.u1Present)
2149 { /* likely */ }
2150 else
2151 {
2152 LogFunc(("Page table entry not present -> IOPF"));
2153 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2154 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, false /* fPresent */, false /* fRsvdNotZero */,
2155 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2156 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
2157 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2158 }
2159
2160 /* Check permission bits. */
2161 uint8_t const fPtePerm = (PtEntity.u64 >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
2162 if ((fAccess & fPtePerm) == fAccess)
2163 { /* likely */ }
2164 else
2165 {
2166 LogFunc(("Page table entry permission denied (fAccess=%#x fPtePerm=%#x) -> IOPF\n", fAccess, fPtePerm));
2167 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2168 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2169 true /* fPermDenied */, enmOp, &EvtIoPageFault);
2170 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, kIoPageFaultType_PermDenied);
2171 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2172 }
2173
2174 /* If this is a PTE, we're at the final level and we're done. */
2175 uint8_t const uNextLevel = PtEntity.n.u3NextLevel;
2176 if (uNextLevel == 0)
2177 {
2178 /* The page size of the translation is the default (4K). */
2179 pWalkResult->GCPhysSpa = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
2180 pWalkResult->cShift = X86_PAGE_4K_SHIFT;
2181 pWalkResult->fIoPerm = fPtePerm;
2182 return VINF_SUCCESS;
2183 }
2184 if (uNextLevel == 7)
2185 {
2186 /* The default page size of the translation is overridden. */
2187 RTGCPHYS const GCPhysPte = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
2188 uint8_t cShift = X86_PAGE_4K_SHIFT;
2189 while (GCPhysPte & RT_BIT_64(cShift++))
2190 ;
2191
2192 /* The page size must be larger than the default size and lower than the default size of the higher level. */
2193 Assert(uLevel < IOMMU_MAX_HOST_PT_LEVEL); /* PTE at level 6 handled outside the loop, uLevel should be <= 5. */
2194 if ( cShift > s_acIovaLevelShifts[uLevel]
2195 && cShift < s_acIovaLevelShifts[uLevel + 1])
2196 {
2197 pWalkResult->GCPhysSpa = GCPhysPte;
2198 pWalkResult->cShift = cShift;
2199 pWalkResult->fIoPerm = fPtePerm;
2200 return VINF_SUCCESS;
2201 }
2202
2203 LogFunc(("Page size invalid cShift=%#x -> IOPF\n", cShift));
2204 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2205 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2206 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2207 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2208 kIoPageFaultType_PteInvalidPageSize);
2209 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2210 }
2211
2212 /* Validate the next level encoding of the PDE. */
2213#if IOMMU_MAX_HOST_PT_LEVEL < 6
2214 if (uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL)
2215 { /* likely */ }
2216 else
2217 {
2218 LogFunc(("Next level of PDE invalid uNextLevel=%#x -> IOPF\n", uNextLevel));
2219 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2220 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2221 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2222 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2223 kIoPageFaultType_PteInvalidLvlEncoding);
2224 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2225 }
2226#else
2227 Assert(uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL);
2228#endif
2229
2230 /* Validate level transition. */
2231 if (uNextLevel < uLevel)
2232 { /* likely */ }
2233 else
2234 {
2235 LogFunc(("Next level (%#x) must be less than the current level (%#x) -> IOPF\n", uNextLevel, uLevel));
2236 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2237 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2238 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2239 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2240 kIoPageFaultType_PteInvalidLvlEncoding);
2241 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2242 }
2243
2244 /* Ensure IOVA bits of skipped levels are zero. */
2245 Assert(uLevel > 0);
2246 uint64_t uIovaSkipMask = 0;
2247 for (unsigned idxLevel = uLevel - 1; idxLevel > uNextLevel; idxLevel--)
2248 uIovaSkipMask |= s_auIovaLevelMasks[idxLevel];
2249 if (!(uIova & uIovaSkipMask))
2250 { /* likely */ }
2251 else
2252 {
2253 LogFunc(("IOVA of skipped levels are not zero %#RX64 (SkipMask=%#RX64) -> IOPF\n", uIova, uIovaSkipMask));
2254 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2255 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
2256 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2257 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2258 kIoPageFaultType_SkippedLevelIovaNotZero);
2259 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2260 }
2261
2262 /* Continue with traversing the page directory at this level. */
2263 }
2264}
2265
2266
2267/**
2268 * Looks up an I/O virtual address from the device table.
2269 *
2270 * @returns VBox status code.
2271 * @param pDevIns The IOMMU instance data.
2272 * @param uDevId The device ID.
2273 * @param uIova The I/O virtual address to lookup.
2274 * @param cbAccess The size of the access.
2275 * @param fAccess The access permissions (IOMMU_IO_PERM_XXX). This is the
2276 * permissions for the access being made.
2277 * @param enmOp The IOMMU operation being performed.
2278 * @param pGCPhysSpa Where to store the translated system physical address. Only
2279 * valid when translation succeeds and VINF_SUCCESS is
2280 * returned!
2281 *
2282 * @thread Any.
2283 */
2284static int iommuAmdLookupDeviceTable(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbAccess, uint8_t fAccess,
2285 IOMMUOP enmOp, PRTGCPHYS pGCPhysSpa)
2286{
2287 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2288
2289 /* Read the device table entry from memory. */
2290 DTE_T Dte;
2291 int rc = iommuAmdReadDte(pDevIns, uDevId, enmOp, &Dte);
2292 if (RT_SUCCESS(rc))
2293 {
2294 /* If the DTE is not valid, addresses are forwarded without translation */
2295 if (Dte.n.u1Valid)
2296 { /* likely */ }
2297 else
2298 {
2299 /** @todo IOMMU: Add to IOLTB cache. */
2300 *pGCPhysSpa = uIova;
2301 return VINF_SUCCESS;
2302 }
2303
2304 /* Validate bits 127:0 of the device table entry when DTE.V is 1. */
2305 uint64_t const fRsvd0 = Dte.au64[0] & ~(IOMMU_DTE_QWORD_0_VALID_MASK & ~IOMMU_DTE_QWORD_0_FEAT_MASK);
2306 uint64_t const fRsvd1 = Dte.au64[1] & ~(IOMMU_DTE_QWORD_1_VALID_MASK & ~IOMMU_DTE_QWORD_1_FEAT_MASK);
2307 if (RT_LIKELY( !fRsvd0
2308 && !fRsvd1))
2309 { /* likely */ }
2310 else
2311 {
2312 LogFunc(("Invalid reserved bits in DTE (u64[0]=%#RX64 u64[1]=%#RX64) -> Illegal DTE\n", fRsvd0, fRsvd1));
2313 EVT_ILLEGAL_DTE_T Event;
2314 iommuAmdInitIllegalDteEvent(uDevId, uIova, true /* fRsvdNotZero */, enmOp, &Event);
2315 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdNotZero);
2316 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2317 }
2318
2319 /* If the IOVA is subject to address exclusion, addresses are forwarded without translation. */
2320 if ( !pThis->ExclRangeBaseAddr.n.u1ExclEnable
2321 || !iommuAmdIsDvaInExclRange(pThis, &Dte, uIova))
2322 { /* likely */ }
2323 else
2324 {
2325 /** @todo IOMMU: Add to IOLTB cache. */
2326 *pGCPhysSpa = uIova;
2327 return VINF_SUCCESS;
2328 }
2329
2330 /** @todo IOMMU: Perhaps do the <= 4K access case first, if the generic loop
2331 * below gets too expensive and when we have iommuAmdWalkIoPageDirectory. */
2332
2333 uint64_t uBaseIova = uIova & X86_PAGE_4K_BASE_MASK;
2334 uint64_t offIova = uIova & X86_PAGE_4K_OFFSET_MASK;
2335 uint64_t cbRemaining = cbAccess;
2336 for (;;)
2337 {
2338 /* Walk the I/O page tables to translate the IOVA and check permission for the access. */
2339 IOWALKRESULT WalkResult;
2340 rc = iommuAmdWalkIoPageTable(pDevIns, uDevId, uBaseIova, fAccess, &Dte, enmOp, &WalkResult);
2341 if (RT_SUCCESS(rc))
2342 {
2343 /** @todo IOMMU: Split large pages into 4K IOTLB entries and add to IOTLB cache. */
2344
2345 /* Store the translated base address before continuing to check permissions for any more pages. */
2346 if (cbRemaining == cbAccess)
2347 {
2348 RTGCPHYS const offSpa = ~(UINT64_C(0xffffffffffffffff) << WalkResult.cShift);
2349 *pGCPhysSpa = WalkResult.GCPhysSpa | offSpa;
2350 }
2351
2352 uint64_t const cbPhysPage = UINT64_C(1) << WalkResult.cShift;
2353 if (cbRemaining > cbPhysPage - offIova)
2354 {
2355 cbRemaining -= (cbPhysPage - offIova);
2356 uBaseIova += cbPhysPage;
2357 offIova = 0;
2358 }
2359 else
2360 break;
2361 }
2362 else
2363 {
2364 LogFunc(("I/O page table walk failed. uIova=%#RX64 uBaseIova=%#RX64 fAccess=%u rc=%Rrc\n", uIova,
2365 uBaseIova, fAccess, rc));
2366 *pGCPhysSpa = NIL_RTGCPHYS;
2367 return rc;
2368 }
2369 }
2370
2371 return rc;
2372 }
2373
2374 LogFunc(("Failed to read device table entry. uDevId=%#x rc=%Rrc\n", uDevId, rc));
2375 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2376}
2377
2378
2379/**
2380 * Memory read request from a device.
2381 *
2382 * @returns VBox status code.
2383 * @param pDevIns The IOMMU device instance.
2384 * @param uDevId The device ID (bus, device, function).
2385 * @param uIova The I/O virtual address being read.
2386 * @param cbRead The number of bytes being read.
2387 * @param pGCPhysSpa Where to store the translated system physical address.
2388 *
2389 * @thread Any.
2390 */
2391static DECLCALLBACK(int) iommuAmdDeviceMemRead(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbRead,
2392 PRTGCPHYS pGCPhysSpa)
2393{
2394 /* Validate. */
2395 Assert(pDevIns);
2396 Assert(pGCPhysSpa);
2397 Assert(cbRead > 0);
2398
2399 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2400 LogFlowFunc(("uDevId=%#x uIova=%#RX64 cbRead=%u\n", uDevId, uIova, cbRead));
2401
2402 /* Addresses are forwarded without translation when the IOMMU is disabled. */
2403 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2404 if (Ctrl.n.u1IommuEn)
2405 {
2406 /** @todo IOMMU: IOTLB cache lookup. */
2407
2408 /* Lookup the IOVA from the device table. */
2409 return iommuAmdLookupDeviceTable(pDevIns, uDevId, uIova, cbRead, IOMMU_IO_PERM_READ, IOMMUOP_MEM_READ, pGCPhysSpa);
2410 }
2411
2412 *pGCPhysSpa = uIova;
2413 return VINF_SUCCESS;
2414}
2415
2416
2417/**
2418 * Memory write request from a device.
2419 *
2420 * @returns VBox status code.
2421 * @param pDevIns The IOMMU device instance.
2422 * @param uDevId The device ID (bus, device, function).
2423 * @param uIova The I/O virtual address being written.
2424 * @param cbWrite The number of bytes being written.
2425 * @param pGCPhysSpa Where to store the translated physical address.
2426 *
2427 * @thread Any.
2428 */
2429static DECLCALLBACK(int) iommuAmdDeviceMemWrite(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, size_t cbWrite,
2430 PRTGCPHYS pGCPhysSpa)
2431{
2432 /* Validate. */
2433 Assert(pDevIns);
2434 Assert(pGCPhysSpa);
2435 Assert(cbWrite > 0);
2436
2437 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2438 LogFlowFunc(("uDevId=%#x uIova=%#RX64 cbWrite=%u\n", uDevId, uIova, cbWrite));
2439
2440 /* Addresses are forwarded without translation when the IOMMU is disabled. */
2441 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2442 if (Ctrl.n.u1IommuEn)
2443 {
2444 /** @todo IOMMU: IOTLB cache lookup. */
2445
2446 /* Lookup the IOVA from the device table. */
2447 return iommuAmdLookupDeviceTable(pDevIns, uDevId, uIova, cbWrite, IOMMU_IO_PERM_WRITE, IOMMUOP_MEM_WRITE, pGCPhysSpa);
2448 }
2449
2450 *pGCPhysSpa = uIova;
2451 return VINF_SUCCESS;
2452}
2453
2454
2455/**
2456 * Reads an interrupt remapping table entry from guest memory given its DTE.
2457 *
2458 * @returns VBox status code.
2459 * @param pDevIns The IOMMU device instance.
2460 * @param uDevId The device ID.
2461 * @param pDte The device table entry.
2462 * @param GCPhysIn The source MSI address.
2463 * @param uDataIn The source MSI data.
2464 * @param enmOp The IOMMU operation being performed.
2465 * @param pIrte Where to store the interrupt remapping table entry.
2466 *
2467 * @thread Any.
2468 */
2469static int iommuAmdReadIrte(PPDMDEVINS pDevIns, uint16_t uDevId, PCDTE_T pDte, RTGCPHYS GCPhysIn, uint32_t uDataIn,
2470 IOMMUOP enmOp, PIRTE_T pIrte)
2471{
2472 RTGCPHYS const GCPhysIntrTable = pDte->au64[2] & IOMMU_DTE_IRTE_ROOT_PTR_MASK;
2473 uint16_t const offIrte = (uDataIn & IOMMU_MSI_DATA_IRTE_OFFSET_MASK) << IOMMU_IRTE_SIZE_SHIFT;
2474 RTGCPHYS const GCPhysIrte = GCPhysIntrTable + offIrte;
2475
2476 /* Ensure the IRTE offset is within the specified table size. */
2477 Assert(pDte->n.u4IntrTableLength < 12);
2478 if (offIrte + sizeof(IRTE_T) <= (1U << pDte->n.u4IntrTableLength) << IOMMU_IRTE_SIZE_SHIFT)
2479 { /* likely */ }
2480 else
2481 {
2482 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2483 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, GCPhysIn, false /* fPresent */, false /* fRsvdNotZero */,
2484 false /* fPermDenied */, enmOp, &EvtIoPageFault);
2485 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
2486 kIoPageFaultType_IrteAddrInvalid);
2487 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2488 }
2489
2490 /* Read the IRTE from memory. */
2491 Assert(!(GCPhysIrte & 3));
2492 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysIrte, pIrte, sizeof(*pIrte));
2493 if (RT_SUCCESS(rc))
2494 return VINF_SUCCESS;
2495
2496 /** @todo The IOMMU spec. does not tell what kind of error is reported in this
2497 * situation. Is it an I/O page fault or a device table hardware error?
2498 * There's no interrupt table hardware error event, but it's unclear what
2499 * we should do here. */
2500 LogFunc(("Failed to read interrupt table entry at %#RGp. rc=%Rrc -> ???\n", GCPhysIrte, rc));
2501 return VERR_IOMMU_IPE_4;
2502}
2503
2504
2505/**
2506 * Remap the interrupt using the interrupt remapping table.
2507 *
2508 * @returns VBox status code.
2509 * @param pDevIns The IOMMU instance data.
2510 * @param uDevId The device ID.
2511 * @param pDte The device table entry.
2512 * @param enmOp The IOMMU operation being performed.
2513 * @param pMsiIn The source MSI.
2514 * @param pMsiOut Where to store the remapped MSI.
2515 *
2516 * @thread Any.
2517 */
2518static int iommuAmdRemapIntr(PPDMDEVINS pDevIns, uint16_t uDevId, PCDTE_T pDte, IOMMUOP enmOp, PCMSIMSG pMsiIn,
2519 PMSIMSG pMsiOut)
2520{
2521 Assert(pDte->n.u2IntrCtrl == IOMMU_INTR_CTRL_REMAP);
2522
2523 IRTE_T Irte;
2524 int rc = iommuAmdReadIrte(pDevIns, uDevId, pDte, pMsiIn->Addr.u64, pMsiIn->Data.u32, enmOp, &Irte);
2525 if (RT_SUCCESS(rc))
2526 {
2527 if (Irte.n.u1RemapEnable)
2528 {
2529 if (!Irte.n.u1GuestMode)
2530 {
2531 if (Irte.n.u3IntrType < VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO)
2532 {
2533 /* Preserve all bits from the source MSI address that don't map 1:1 from the IRTE. */
2534 pMsiOut->Addr.u64 = pMsiIn->Addr.u64;
2535 pMsiOut->Addr.n.u1DestMode = Irte.n.u1DestMode;
2536 pMsiOut->Addr.n.u8DestId = Irte.n.u8Dest;
2537
2538 /* Preserve all bits from the source MSI data that don't map 1:1 from the IRTE. */
2539 pMsiOut->Data.u32 = pMsiIn->Data.u32;
2540 pMsiOut->Data.n.u8Vector = Irte.n.u8Vector;
2541 pMsiOut->Data.n.u3DeliveryMode = Irte.n.u3IntrType;
2542
2543 return VINF_SUCCESS;
2544 }
2545
2546 LogFunc(("Interrupt type (%#x) invalid -> IOPF\n", Irte.n.u3IntrType));
2547 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2548 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, pMsiIn->Addr.u64, Irte.n.u1RemapEnable,
2549 true /* fRsvdNotZero */, false /* fPermDenied */, enmOp, &EvtIoPageFault);
2550 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, &Irte, enmOp, &EvtIoPageFault, kIoPageFaultType_IrteRsvdIntType);
2551 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2552 }
2553
2554 LogFunc(("Guest mode not supported -> IOPF\n"));
2555 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2556 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, pMsiIn->Addr.u64, Irte.n.u1RemapEnable,
2557 true /* fRsvdNotZero */, false /* fPermDenied */, enmOp, &EvtIoPageFault);
2558 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, &Irte, enmOp, &EvtIoPageFault, kIoPageFaultType_IrteRsvdNotZero);
2559 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2560 }
2561
2562 LogFunc(("Remapping disabled -> IOPF\n"));
2563 EVT_IO_PAGE_FAULT_T EvtIoPageFault;
2564 iommuAmdInitIoPageFaultEvent(uDevId, pDte->n.u16DomainId, pMsiIn->Addr.u64, Irte.n.u1RemapEnable,
2565 false /* fRsvdNotZero */, false /* fPermDenied */, enmOp, &EvtIoPageFault);
2566 iommuAmdRaiseIoPageFaultEvent(pDevIns, pDte, &Irte, enmOp, &EvtIoPageFault, kIoPageFaultType_IrteRemapEn);
2567 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
2568 }
2569
2570 return rc;
2571}
2572
2573
2574/**
2575 * Looks up an MSI interrupt from the interrupt remapping table.
2576 *
2577 * @returns VBox status code.
2578 * @param pDevIns The IOMMU instance data.
2579 * @param uDevId The device ID.
2580 * @param enmOp The IOMMU operation being performed.
2581 * @param pMsiIn The source MSI.
2582 * @param pMsiOut Where to store the remapped MSI.
2583 *
2584 * @thread Any.
2585 */
2586static int iommuAmdLookupIntrTable(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
2587{
2588 /* Read the device table entry from memory. */
2589 LogFlowFunc(("uDevId=%#x enmOp=%u\n", uDevId, enmOp));
2590
2591 DTE_T Dte;
2592 int rc = iommuAmdReadDte(pDevIns, uDevId, enmOp, &Dte);
2593 if (RT_SUCCESS(rc))
2594 {
2595 /* If the DTE is not valid, all interrupts are forwarded without remapping. */
2596 if (Dte.n.u1IntrMapValid)
2597 {
2598 /* Validate bits 255:128 of the device table entry when DTE.IV is 1. */
2599 uint64_t const fRsvd0 = Dte.au64[2] & ~IOMMU_DTE_QWORD_2_VALID_MASK;
2600 uint64_t const fRsvd1 = Dte.au64[3] & ~IOMMU_DTE_QWORD_3_VALID_MASK;
2601 if (RT_LIKELY( !fRsvd0
2602 && !fRsvd1))
2603 { /* likely */ }
2604 else
2605 {
2606 LogFunc(("Invalid reserved bits in DTE (u64[2]=%#RX64 u64[3]=%#RX64) -> Illegal DTE\n", fRsvd0,
2607 fRsvd1));
2608 EVT_ILLEGAL_DTE_T Event;
2609 iommuAmdInitIllegalDteEvent(uDevId, pMsiIn->Addr.u64, true /* fRsvdNotZero */, enmOp, &Event);
2610 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdNotZero);
2611 return VERR_IOMMU_INTR_REMAP_FAILED;
2612 }
2613
2614 /*
2615 * LINT0/LINT1 pins cannot be driven by PCI(e) devices. Perhaps for a Southbridge
2616 * that's connected through HyperTransport it might be possible; but for us, it
2617 * doesn't seem we need to specially handle these pins.
2618 */
2619
2620 /*
2621 * Validate the MSI source address.
2622 *
2623 * 64-bit MSIs are supported by the PCI and AMD IOMMU spec. However as far as the
2624 * CPU is concerned, the MSI region is fixed and we must ensure no other device
2625 * claims the region as I/O space.
2626 *
2627 * See PCI spec. 6.1.4. "Message Signaled Interrupt (MSI) Support".
2628 * See AMD IOMMU spec. 2.8 "IOMMU Interrupt Support".
2629 * See Intel spec. 10.11.1 "Message Address Register Format".
2630 */
2631 if ((pMsiIn->Addr.u64 & VBOX_MSI_ADDR_ADDR_MASK) == VBOX_MSI_ADDR_BASE)
2632 {
2633 /*
2634 * The IOMMU remaps fixed and arbitrated interrupts using the IRTE.
2635 * See AMD IOMMU spec. "2.2.5.1 Interrupt Remapping Tables, Guest Virtual APIC Not Enabled".
2636 */
2637 uint8_t const u8DeliveryMode = pMsiIn->Data.n.u3DeliveryMode;
2638 bool fPassThru = false;
2639 switch (u8DeliveryMode)
2640 {
2641 case VBOX_MSI_DELIVERY_MODE_FIXED:
2642 case VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO:
2643 {
2644 uint8_t const uIntrCtrl = Dte.n.u2IntrCtrl;
2645 if (uIntrCtrl == IOMMU_INTR_CTRL_TARGET_ABORT)
2646 {
2647 LogFunc(("IntCtl=0: Target aborting fixed/arbitrated interrupt -> Target abort\n"));
2648 iommuAmdSetPciTargetAbort(pDevIns);
2649 return VERR_IOMMU_INTR_REMAP_DENIED;
2650 }
2651
2652 if (uIntrCtrl == IOMMU_INTR_CTRL_FWD_UNMAPPED)
2653 {
2654 fPassThru = true;
2655 break;
2656 }
2657
2658 if (uIntrCtrl == IOMMU_INTR_CTRL_REMAP)
2659 {
2660 /* Validate the encoded interrupt table length when IntCtl specifies remapping. */
2661 uint32_t const uIntTabLen = Dte.n.u4IntrTableLength;
2662 if (Dte.n.u4IntrTableLength < 12)
2663 {
2664 /*
2665 * We don't support guest interrupt remapping yet. When we do, we'll need to
2666 * check Ctrl.u1GstVirtApicEn and use the guest Virtual APIC Table Root Pointer
2667 * in the DTE rather than the Interrupt Root Table Pointer. Since the caller
2668 * already reads the control register, add that as a parameter when we eventually
2669 * support guest interrupt remapping. For now, just assert.
2670 */
2671 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2672 Assert(!pThis->ExtFeat.n.u1GstVirtApicSup);
2673 NOREF(pThis);
2674
2675 return iommuAmdRemapIntr(pDevIns, uDevId, &Dte, enmOp, pMsiIn, pMsiOut);
2676 }
2677
2678 LogFunc(("Invalid interrupt table length %#x -> Illegal DTE\n", uIntTabLen));
2679 EVT_ILLEGAL_DTE_T Event;
2680 iommuAmdInitIllegalDteEvent(uDevId, pMsiIn->Addr.u64, false /* fRsvdNotZero */, enmOp, &Event);
2681 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdIntTabLen);
2682 return VERR_IOMMU_INTR_REMAP_FAILED;
2683 }
2684
2685 /* Paranoia. */
2686 Assert(uIntrCtrl == IOMMU_INTR_CTRL_RSVD);
2687
2688 LogFunc(("IntCtl mode invalid %#x -> Illegal DTE\n", uIntrCtrl));
2689
2690 EVT_ILLEGAL_DTE_T Event;
2691 iommuAmdInitIllegalDteEvent(uDevId, pMsiIn->Addr.u64, true /* fRsvdNotZero */, enmOp, &Event);
2692 iommuAmdRaiseIllegalDteEvent(pDevIns, enmOp, &Event, kIllegalDteType_RsvdIntCtl);
2693 return VERR_IOMMU_INTR_REMAP_FAILED;
2694 }
2695
2696 /* SMIs are passed through unmapped. We don't implement SMI filters. */
2697 case VBOX_MSI_DELIVERY_MODE_SMI: fPassThru = true; break;
2698 case VBOX_MSI_DELIVERY_MODE_NMI: fPassThru = Dte.n.u1NmiPassthru; break;
2699 case VBOX_MSI_DELIVERY_MODE_INIT: fPassThru = Dte.n.u1InitPassthru; break;
2700 case VBOX_MSI_DELIVERY_MODE_EXT_INT: fPassThru = Dte.n.u1ExtIntPassthru; break;
2701 default:
2702 {
2703 LogFunc(("MSI data delivery mode invalid %#x -> Target abort\n", u8DeliveryMode));
2704 iommuAmdSetPciTargetAbort(pDevIns);
2705 return VERR_IOMMU_INTR_REMAP_FAILED;
2706 }
2707 }
2708
2709 if (fPassThru)
2710 {
2711 *pMsiOut = *pMsiIn;
2712 return VINF_SUCCESS;
2713 }
2714
2715 iommuAmdSetPciTargetAbort(pDevIns);
2716 return VERR_IOMMU_INTR_REMAP_DENIED;
2717 }
2718 else
2719 {
2720 LogFunc(("MSI address region invalid %#RX64\n", pMsiIn->Addr.u64));
2721 return VERR_IOMMU_INTR_REMAP_FAILED;
2722 }
2723 }
2724 else
2725 {
2726 /** @todo IOMMU: Add to interrupt remapping cache. */
2727 LogFlowFunc(("DTE interrupt map not valid\n"));
2728 *pMsiOut = *pMsiIn;
2729 return VINF_SUCCESS;
2730 }
2731 }
2732
2733 LogFunc(("Failed to read device table entry. uDevId=%#x rc=%Rrc\n", uDevId, rc));
2734 return VERR_IOMMU_INTR_REMAP_FAILED;
2735}
2736
2737
2738/**
2739 * Interrupt remap request from a device.
2740 *
2741 * @returns VBox status code.
2742 * @param pDevIns The IOMMU device instance.
2743 * @param uDevId The device ID (bus, device, function).
2744 * @param pMsiIn The source MSI.
2745 * @param pMsiOut Where to store the remapped MSI.
2746 */
2747static DECLCALLBACK(int) iommuAmdDeviceMsiRemap(PPDMDEVINS pDevIns, uint16_t uDevId, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
2748{
2749 /* Validate. */
2750 Assert(pDevIns);
2751 Assert(pMsiIn);
2752 Assert(pMsiOut);
2753
2754 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2755 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemap));
2756
2757 LogFlowFunc(("uDevId=%#x\n", uDevId));
2758
2759 /* Interrupts are forwarded with remapping when the IOMMU is disabled. */
2760 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2761 if (Ctrl.n.u1IommuEn)
2762 {
2763 /** @todo Cache? */
2764
2765 return iommuAmdLookupIntrTable(pDevIns, uDevId, IOMMUOP_INTR_REQ, pMsiIn, pMsiOut);
2766 }
2767
2768 *pMsiOut = *pMsiIn;
2769 return VINF_SUCCESS;
2770}
2771
2772
2773/**
2774 * @callback_method_impl{FNIOMMMIONEWWRITE}
2775 */
2776static DECLCALLBACK(VBOXSTRICTRC) iommuAmdMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2777{
2778 NOREF(pvUser);
2779 Assert(cb == 4 || cb == 8);
2780 Assert(!(off & (cb - 1)));
2781
2782 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2783 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioWrite)); NOREF(pThis);
2784
2785 uint64_t const uValue = cb == 8 ? *(uint64_t const *)pv : *(uint32_t const *)pv;
2786 return iommuAmdWriteRegister(pDevIns, off, cb, uValue);
2787}
2788
2789
2790/**
2791 * @callback_method_impl{FNIOMMMIONEWREAD}
2792 */
2793static DECLCALLBACK(VBOXSTRICTRC) iommuAmdMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2794{
2795 NOREF(pvUser);
2796 Assert(cb == 4 || cb == 8);
2797 Assert(!(off & (cb - 1)));
2798
2799 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2800 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMmioRead)); NOREF(pThis);
2801
2802 uint64_t uResult;
2803 VBOXSTRICTRC rcStrict = iommuAmdReadRegister(pDevIns, off, &uResult);
2804 if (cb == 8)
2805 *(uint64_t *)pv = uResult;
2806 else
2807 *(uint32_t *)pv = (uint32_t)uResult;
2808
2809 return rcStrict;
2810}
2811
2812# ifdef IN_RING3
2813
2814/**
2815 * Processes an IOMMU command.
2816 *
2817 * @returns VBox status code.
2818 * @param pDevIns The IOMMU device instance.
2819 * @param pCmd The command to process.
2820 * @param GCPhysCmd The system physical address of the command.
2821 * @param pEvtError Where to store the error event in case of failures.
2822 *
2823 * @thread Command thread.
2824 */
2825static int iommuAmdR3ProcessCmd(PPDMDEVINS pDevIns, PCCMD_GENERIC_T pCmd, RTGCPHYS GCPhysCmd, PEVT_GENERIC_T pEvtError)
2826{
2827 IOMMU_ASSERT_NOT_LOCKED(pDevIns);
2828
2829 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2830 STAM_COUNTER_INC(&pThis->StatCmd);
2831
2832 uint8_t const bCmd = pCmd->n.u4Opcode;
2833 switch (bCmd)
2834 {
2835 case IOMMU_CMD_COMPLETION_WAIT:
2836 {
2837 STAM_COUNTER_INC(&pThis->StatCmdCompWait);
2838
2839 PCCMD_COMWAIT_T pCmdComWait = (PCCMD_COMWAIT_T)pCmd;
2840 AssertCompile(sizeof(*pCmdComWait) == sizeof(*pCmd));
2841
2842 /* Validate reserved bits in the command. */
2843 if (!(pCmdComWait->au64[0] & ~IOMMU_CMD_COM_WAIT_QWORD_0_VALID_MASK))
2844 {
2845 /* If Completion Store is requested, write the StoreData to the specified address. */
2846 if (pCmdComWait->n.u1Store)
2847 {
2848 RTGCPHYS const GCPhysStore = RT_MAKE_U64(pCmdComWait->n.u29StoreAddrLo << 3, pCmdComWait->n.u20StoreAddrHi);
2849 uint64_t const u64Data = pCmdComWait->n.u64StoreData;
2850 int rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhysStore, &u64Data, sizeof(u64Data));
2851 if (RT_FAILURE(rc))
2852 {
2853 LogFunc(("Cmd(%#x): Failed to write StoreData (%#RX64) to %#RGp, rc=%Rrc\n", bCmd, u64Data,
2854 GCPhysStore, rc));
2855 iommuAmdInitCmdHwErrorEvent(GCPhysStore, (PEVT_CMD_HW_ERR_T)pEvtError);
2856 return VERR_IOMMU_CMD_HW_ERROR;
2857 }
2858 }
2859
2860 /* If the command requests an interrupt and completion wait interrupts are enabled, raise it. */
2861 if (pCmdComWait->n.u1Interrupt)
2862 {
2863 IOMMU_LOCK(pDevIns);
2864 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_COMPLETION_WAIT_INTR);
2865 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis);
2866 bool const fRaiseInt = Ctrl.n.u1CompWaitIntrEn;
2867 IOMMU_UNLOCK(pDevIns);
2868
2869 if (fRaiseInt)
2870 iommuAmdRaiseMsiInterrupt(pDevIns);
2871 }
2872 return VINF_SUCCESS;
2873 }
2874 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2875 return VERR_IOMMU_CMD_INVALID_FORMAT;
2876 }
2877
2878 case IOMMU_CMD_INV_DEV_TAB_ENTRY:
2879 {
2880 /** @todo IOMMU: Implement this once we implement IOTLB. Pretend success until
2881 * then. */
2882 STAM_COUNTER_INC(&pThis->StatCmdInvDte);
2883 return VINF_SUCCESS;
2884 }
2885
2886 case IOMMU_CMD_INV_IOMMU_PAGES:
2887 {
2888 /** @todo IOMMU: Implement this once we implement IOTLB. Pretend success until
2889 * then. */
2890 STAM_COUNTER_INC(&pThis->StatCmdInvIommuPages);
2891 return VINF_SUCCESS;
2892 }
2893
2894 case IOMMU_CMD_INV_IOTLB_PAGES:
2895 {
2896 STAM_COUNTER_INC(&pThis->StatCmdInvIotlbPages);
2897
2898 uint32_t const uCapHdr = PDMPciDevGetDWord(pDevIns->apPciDevs[0], IOMMU_PCI_OFF_CAP_HDR);
2899 if (RT_BF_GET(uCapHdr, IOMMU_BF_CAPHDR_IOTLB_SUP))
2900 {
2901 /** @todo IOMMU: Implement remote IOTLB invalidation. */
2902 return VERR_NOT_IMPLEMENTED;
2903 }
2904 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2905 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2906 }
2907
2908 case IOMMU_CMD_INV_INTR_TABLE:
2909 {
2910 /** @todo IOMMU: Implement this once we implement IOTLB. Pretend success until
2911 * then. */
2912 STAM_COUNTER_INC(&pThis->StatCmdInvIntrTable);
2913 return VINF_SUCCESS;
2914 }
2915
2916 case IOMMU_CMD_PREFETCH_IOMMU_PAGES:
2917 {
2918 STAM_COUNTER_INC(&pThis->StatCmdPrefIommuPages);
2919 if (pThis->ExtFeat.n.u1PrefetchSup)
2920 {
2921 /** @todo IOMMU: Implement prefetch. Pretend success until then. */
2922 return VINF_SUCCESS;
2923 }
2924 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2925 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2926 }
2927
2928 case IOMMU_CMD_COMPLETE_PPR_REQ:
2929 {
2930 STAM_COUNTER_INC(&pThis->StatCmdCompletePprReq);
2931
2932 /* We don't support PPR requests yet. */
2933 Assert(!pThis->ExtFeat.n.u1PprSup);
2934 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2935 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2936 }
2937
2938 case IOMMU_CMD_INV_IOMMU_ALL:
2939 {
2940 STAM_COUNTER_INC(&pThis->StatCmdInvIommuAll);
2941
2942 if (pThis->ExtFeat.n.u1InvAllSup)
2943 {
2944 /** @todo IOMMU: Invalidate all. Pretend success until then. */
2945 return VINF_SUCCESS;
2946 }
2947 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2948 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2949 }
2950 }
2951
2952 STAM_COUNTER_DEC(&pThis->StatCmd);
2953 LogFunc(("Cmd(%#x): Unrecognized\n", bCmd));
2954 iommuAmdInitIllegalCmdEvent(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError);
2955 return VERR_IOMMU_CMD_NOT_SUPPORTED;
2956}
2957
2958
2959/**
2960 * The IOMMU command thread.
2961 *
2962 * @returns VBox status code.
2963 * @param pDevIns The IOMMU device instance.
2964 * @param pThread The command thread.
2965 */
2966static DECLCALLBACK(int) iommuAmdR3CmdThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
2967{
2968 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
2969
2970 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
2971 return VINF_SUCCESS;
2972
2973 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
2974 {
2975 /*
2976 * Sleep perpetually until we are woken up to process commands.
2977 */
2978 {
2979 ASMAtomicWriteBool(&pThis->fCmdThreadSleeping, true);
2980 bool fSignaled = ASMAtomicXchgBool(&pThis->fCmdThreadSignaled, false);
2981 if (!fSignaled)
2982 {
2983 Assert(ASMAtomicReadBool(&pThis->fCmdThreadSleeping));
2984 int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEvtCmdThread, RT_INDEFINITE_WAIT);
2985 AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc), rc);
2986 if (RT_UNLIKELY(pThread->enmState != PDMTHREADSTATE_RUNNING))
2987 break;
2988 Log5Func(("Woken up with rc=%Rrc\n", rc));
2989 ASMAtomicWriteBool(&pThis->fCmdThreadSignaled, false);
2990 }
2991 ASMAtomicWriteBool(&pThis->fCmdThreadSleeping, false);
2992 }
2993
2994 /*
2995 * Fetch and process IOMMU commands.
2996 */
2997 /** @todo r=ramshankar: This employs a simplistic method of fetching commands (one
2998 * at a time) and is expensive due to calls to PGM for fetching guest memory.
2999 * We could optimize by fetching a bunch of commands at a time reducing
3000 * number of calls to PGM. In the longer run we could lock the memory and
3001 * mappings and accessing them directly. */
3002 IOMMU_LOCK(pDevIns);
3003
3004 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis);
3005 if (Status.n.u1CmdBufRunning)
3006 {
3007 /* Get the offset we need to read the command from memory (circular buffer offset). */
3008 uint32_t const cbCmdBuf = iommuAmdGetTotalBufLength(pThis->CmdBufBaseAddr.n.u4Len);
3009 uint32_t offHead = pThis->CmdBufHeadPtr.n.off;
3010 Assert(!(offHead & ~IOMMU_CMD_BUF_HEAD_PTR_VALID_MASK));
3011 Assert(offHead < cbCmdBuf);
3012 while (offHead != pThis->CmdBufTailPtr.n.off)
3013 {
3014 /* Read the command from memory. */
3015 CMD_GENERIC_T Cmd;
3016 RTGCPHYS const GCPhysCmd = (pThis->CmdBufBaseAddr.n.u40Base << X86_PAGE_4K_SHIFT) + offHead;
3017 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysCmd, &Cmd, sizeof(Cmd));
3018 if (RT_SUCCESS(rc))
3019 {
3020 /* Increment the command buffer head pointer. */
3021 offHead = (offHead + sizeof(CMD_GENERIC_T)) % cbCmdBuf;
3022 pThis->CmdBufHeadPtr.n.off = offHead;
3023
3024 /* Process the fetched command. */
3025 EVT_GENERIC_T EvtError;
3026 IOMMU_UNLOCK(pDevIns);
3027 rc = iommuAmdR3ProcessCmd(pDevIns, &Cmd, GCPhysCmd, &EvtError);
3028 IOMMU_LOCK(pDevIns);
3029 if (RT_FAILURE(rc))
3030 {
3031 if ( rc == VERR_IOMMU_CMD_NOT_SUPPORTED
3032 || rc == VERR_IOMMU_CMD_INVALID_FORMAT)
3033 {
3034 Assert(EvtError.n.u4EvtCode == IOMMU_EVT_ILLEGAL_CMD_ERROR);
3035 iommuAmdRaiseIllegalCmdEvent(pDevIns, (PCEVT_ILLEGAL_CMD_ERR_T)&EvtError);
3036 }
3037 else if (rc == VERR_IOMMU_CMD_HW_ERROR)
3038 {
3039 Assert(EvtError.n.u4EvtCode == IOMMU_EVT_COMMAND_HW_ERROR);
3040 iommuAmdRaiseCmdHwErrorEvent(pDevIns, (PCEVT_CMD_HW_ERR_T)&EvtError);
3041 }
3042 break;
3043 }
3044 }
3045 else
3046 {
3047 EVT_CMD_HW_ERR_T EvtCmdHwErr;
3048 iommuAmdInitCmdHwErrorEvent(GCPhysCmd, &EvtCmdHwErr);
3049 iommuAmdRaiseCmdHwErrorEvent(pDevIns, &EvtCmdHwErr);
3050 break;
3051 }
3052 }
3053 }
3054
3055 IOMMU_UNLOCK(pDevIns);
3056 }
3057
3058 LogFlowFunc(("Command thread terminating\n"));
3059 return VINF_SUCCESS;
3060}
3061
3062
3063/**
3064 * Wakes up the command thread so it can respond to a state change.
3065 *
3066 * @returns VBox status code.
3067 * @param pDevIns The IOMMU device instance.
3068 * @param pThread The command thread.
3069 */
3070static DECLCALLBACK(int) iommuAmdR3CmdThreadWakeUp(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
3071{
3072 RT_NOREF(pThread);
3073 LogFlowFunc(("\n"));
3074 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3075 return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
3076}
3077
3078
3079/**
3080 * @callback_method_impl{FNPCICONFIGREAD}
3081 */
3082static DECLCALLBACK(VBOXSTRICTRC) iommuAmdR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress,
3083 unsigned cb, uint32_t *pu32Value)
3084{
3085 /** @todo IOMMU: PCI config read stat counter. */
3086 VBOXSTRICTRC rcStrict = PDMDevHlpPCIConfigRead(pDevIns, pPciDev, uAddress, cb, pu32Value);
3087 Log3Func(("Reading PCI config register %#x (cb=%u) -> %#x %Rrc\n", uAddress, cb, *pu32Value,
3088 VBOXSTRICTRC_VAL(rcStrict)));
3089 return rcStrict;
3090}
3091
3092
3093/**
3094 * @callback_method_impl{FNPCICONFIGWRITE}
3095 */
3096static DECLCALLBACK(VBOXSTRICTRC) iommuAmdR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t uAddress,
3097 unsigned cb, uint32_t u32Value)
3098{
3099 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3100
3101 /*
3102 * Discard writes to read-only registers that are specific to the IOMMU.
3103 * Other common PCI registers are handled by the generic code, see devpciR3IsConfigByteWritable().
3104 * See PCI spec. 6.1. "Configuration Space Organization".
3105 */
3106 switch (uAddress)
3107 {
3108 case IOMMU_PCI_OFF_CAP_HDR: /* All bits are read-only. */
3109 case IOMMU_PCI_OFF_RANGE_REG: /* We don't have any devices integrated with the IOMMU. */
3110 case IOMMU_PCI_OFF_MISCINFO_REG_0: /* We don't support MSI-X. */
3111 case IOMMU_PCI_OFF_MISCINFO_REG_1: /* We don't support guest-address translation. */
3112 {
3113 LogFunc(("PCI config write (%#RX32) to read-only register %#x -> Ignored\n", u32Value, uAddress));
3114 return VINF_SUCCESS;
3115 }
3116 }
3117
3118 IOMMU_LOCK(pDevIns);
3119
3120 VBOXSTRICTRC rcStrict = VERR_INVALID_FUNCTION;
3121 switch (uAddress)
3122 {
3123 case IOMMU_PCI_OFF_BASE_ADDR_REG_LO:
3124 {
3125 if (pThis->IommuBar.n.u1Enable)
3126 {
3127 rcStrict = VINF_SUCCESS;
3128 LogFunc(("Writing Base Address (Lo) when it's already enabled -> Ignored\n"));
3129 break;
3130 }
3131
3132 pThis->IommuBar.au32[0] = u32Value & IOMMU_BAR_VALID_MASK;
3133 if (pThis->IommuBar.n.u1Enable)
3134 {
3135 Assert(pThis->hMmio != NIL_IOMMMIOHANDLE);
3136 Assert(PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmio) == NIL_RTGCPHYS);
3137 Assert(!pThis->ExtFeat.n.u1PerfCounterSup); /* Base is 16K aligned when performance counters aren't supported. */
3138 RTGCPHYS const GCPhysMmioBase = RT_MAKE_U64(pThis->IommuBar.au32[0] & 0xffffc000, pThis->IommuBar.au32[1]);
3139 rcStrict = PDMDevHlpMmioMap(pDevIns, pThis->hMmio, GCPhysMmioBase);
3140 if (RT_FAILURE(rcStrict))
3141 LogFunc(("Failed to map IOMMU MMIO region at %#RGp. rc=%Rrc\n", GCPhysMmioBase, rcStrict));
3142 }
3143 break;
3144 }
3145
3146 case IOMMU_PCI_OFF_BASE_ADDR_REG_HI:
3147 {
3148 if (!pThis->IommuBar.n.u1Enable)
3149 pThis->IommuBar.au32[1] = u32Value;
3150 else
3151 {
3152 rcStrict = VINF_SUCCESS;
3153 LogFunc(("Writing Base Address (Hi) when it's already enabled -> Ignored\n"));
3154 }
3155 break;
3156 }
3157
3158 case IOMMU_PCI_OFF_MSI_CAP_HDR:
3159 {
3160 u32Value |= RT_BIT(23); /* 64-bit MSI addressess must always be enabled for IOMMU. */
3161 RT_FALL_THRU();
3162 }
3163 default:
3164 {
3165 rcStrict = PDMDevHlpPCIConfigWrite(pDevIns, pPciDev, uAddress, cb, u32Value);
3166 break;
3167 }
3168 }
3169
3170 IOMMU_UNLOCK(pDevIns);
3171
3172 Log3Func(("PCI config write: %#x -> To %#x (%u) %Rrc\n", u32Value, uAddress, cb, VBOXSTRICTRC_VAL(rcStrict)));
3173 return rcStrict;
3174}
3175
3176
3177/**
3178 * @callback_method_impl{FNDBGFHANDLERDEV}
3179 */
3180static DECLCALLBACK(void) iommuAmdR3DbgInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
3181{
3182 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3183 PCPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3184 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3185
3186 LogFlowFunc(("pThis=%p pszArgs=%s\n", pThis, pszArgs));
3187 bool fVerbose;
3188 if ( pszArgs
3189 && !strncmp(pszArgs, RT_STR_TUPLE("verbose")))
3190 fVerbose = true;
3191 else
3192 fVerbose = false;
3193
3194 pHlp->pfnPrintf(pHlp, "AMD-IOMMU:\n");
3195 /* Device Table Base Addresses (all segments). */
3196 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aDevTabBaseAddrs); i++)
3197 {
3198 DEV_TAB_BAR_T const DevTabBar = pThis->aDevTabBaseAddrs[i];
3199 pHlp->pfnPrintf(pHlp, " Device Table BAR %u = %#RX64\n", i, DevTabBar.u64);
3200 if (fVerbose)
3201 {
3202 pHlp->pfnPrintf(pHlp, " Size = %#x (%u bytes)\n", DevTabBar.n.u9Size,
3203 IOMMU_GET_DEV_TAB_SIZE(DevTabBar.n.u9Size));
3204 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", DevTabBar.n.u40Base << X86_PAGE_4K_SHIFT);
3205 }
3206 }
3207 /* Command Buffer Base Address Register. */
3208 {
3209 CMD_BUF_BAR_T const CmdBufBar = pThis->CmdBufBaseAddr;
3210 uint8_t const uEncodedLen = CmdBufBar.n.u4Len;
3211 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3212 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3213 pHlp->pfnPrintf(pHlp, " Command Buffer BAR = %#RX64\n", CmdBufBar.u64);
3214 if (fVerbose)
3215 {
3216 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", CmdBufBar.n.u40Base << X86_PAGE_4K_SHIFT);
3217 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3218 cEntries, cbBuffer);
3219 }
3220 }
3221 /* Event Log Base Address Register. */
3222 {
3223 EVT_LOG_BAR_T const EvtLogBar = pThis->EvtLogBaseAddr;
3224 uint8_t const uEncodedLen = EvtLogBar.n.u4Len;
3225 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3226 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3227 pHlp->pfnPrintf(pHlp, " Event Log BAR = %#RX64\n", EvtLogBar.u64);
3228 if (fVerbose)
3229 {
3230 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", EvtLogBar.n.u40Base << X86_PAGE_4K_SHIFT);
3231 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3232 cEntries, cbBuffer);
3233 }
3234 }
3235 /* IOMMU Control Register. */
3236 {
3237 IOMMU_CTRL_T const Ctrl = pThis->Ctrl;
3238 pHlp->pfnPrintf(pHlp, " Control = %#RX64\n", Ctrl.u64);
3239 if (fVerbose)
3240 {
3241 pHlp->pfnPrintf(pHlp, " IOMMU enable = %RTbool\n", Ctrl.n.u1IommuEn);
3242 pHlp->pfnPrintf(pHlp, " HT Tunnel translation enable = %RTbool\n", Ctrl.n.u1HtTunEn);
3243 pHlp->pfnPrintf(pHlp, " Event log enable = %RTbool\n", Ctrl.n.u1EvtLogEn);
3244 pHlp->pfnPrintf(pHlp, " Event log interrupt enable = %RTbool\n", Ctrl.n.u1EvtIntrEn);
3245 pHlp->pfnPrintf(pHlp, " Completion wait interrupt enable = %RTbool\n", Ctrl.n.u1EvtIntrEn);
3246 pHlp->pfnPrintf(pHlp, " Invalidation timeout = %u\n", Ctrl.n.u3InvTimeOut);
3247 pHlp->pfnPrintf(pHlp, " Pass posted write = %RTbool\n", Ctrl.n.u1PassPW);
3248 pHlp->pfnPrintf(pHlp, " Respose Pass posted write = %RTbool\n", Ctrl.n.u1ResPassPW);
3249 pHlp->pfnPrintf(pHlp, " Coherent = %RTbool\n", Ctrl.n.u1Coherent);
3250 pHlp->pfnPrintf(pHlp, " Isochronous = %RTbool\n", Ctrl.n.u1Isoc);
3251 pHlp->pfnPrintf(pHlp, " Command buffer enable = %RTbool\n", Ctrl.n.u1CmdBufEn);
3252 pHlp->pfnPrintf(pHlp, " PPR log enable = %RTbool\n", Ctrl.n.u1PprLogEn);
3253 pHlp->pfnPrintf(pHlp, " PPR interrupt enable = %RTbool\n", Ctrl.n.u1PprIntrEn);
3254 pHlp->pfnPrintf(pHlp, " PPR enable = %RTbool\n", Ctrl.n.u1PprEn);
3255 pHlp->pfnPrintf(pHlp, " Guest translation eanble = %RTbool\n", Ctrl.n.u1GstTranslateEn);
3256 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC enable = %RTbool\n", Ctrl.n.u1GstVirtApicEn);
3257 pHlp->pfnPrintf(pHlp, " CRW = %#x\n", Ctrl.n.u4Crw);
3258 pHlp->pfnPrintf(pHlp, " SMI filter enable = %RTbool\n", Ctrl.n.u1SmiFilterEn);
3259 pHlp->pfnPrintf(pHlp, " Self-writeback disable = %RTbool\n", Ctrl.n.u1SelfWriteBackDis);
3260 pHlp->pfnPrintf(pHlp, " SMI filter log enable = %RTbool\n", Ctrl.n.u1SmiFilterLogEn);
3261 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC mode enable = %#x\n", Ctrl.n.u3GstVirtApicModeEn);
3262 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC GA log enable = %RTbool\n", Ctrl.n.u1GstLogEn);
3263 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC interrupt enable = %RTbool\n", Ctrl.n.u1GstIntrEn);
3264 pHlp->pfnPrintf(pHlp, " Dual PPR log enable = %#x\n", Ctrl.n.u2DualPprLogEn);
3265 pHlp->pfnPrintf(pHlp, " Dual event log enable = %#x\n", Ctrl.n.u2DualEvtLogEn);
3266 pHlp->pfnPrintf(pHlp, " Device table segmentation enable = %#x\n", Ctrl.n.u3DevTabSegEn);
3267 pHlp->pfnPrintf(pHlp, " Privilege abort enable = %#x\n", Ctrl.n.u2PrivAbortEn);
3268 pHlp->pfnPrintf(pHlp, " PPR auto response enable = %RTbool\n", Ctrl.n.u1PprAutoRespEn);
3269 pHlp->pfnPrintf(pHlp, " MARC enable = %RTbool\n", Ctrl.n.u1MarcEn);
3270 pHlp->pfnPrintf(pHlp, " Block StopMark enable = %RTbool\n", Ctrl.n.u1BlockStopMarkEn);
3271 pHlp->pfnPrintf(pHlp, " PPR auto response always-on enable = %RTbool\n", Ctrl.n.u1PprAutoRespAlwaysOnEn);
3272 pHlp->pfnPrintf(pHlp, " Domain IDPNE = %RTbool\n", Ctrl.n.u1DomainIDPNE);
3273 pHlp->pfnPrintf(pHlp, " Enhanced PPR handling = %RTbool\n", Ctrl.n.u1EnhancedPpr);
3274 pHlp->pfnPrintf(pHlp, " Host page table access/dirty bit update = %#x\n", Ctrl.n.u2HstAccDirtyBitUpdate);
3275 pHlp->pfnPrintf(pHlp, " Guest page table dirty bit disable = %RTbool\n", Ctrl.n.u1GstDirtyUpdateDis);
3276 pHlp->pfnPrintf(pHlp, " x2APIC enable = %RTbool\n", Ctrl.n.u1X2ApicEn);
3277 pHlp->pfnPrintf(pHlp, " x2APIC interrupt enable = %RTbool\n", Ctrl.n.u1X2ApicIntrGenEn);
3278 pHlp->pfnPrintf(pHlp, " Guest page table access bit update = %RTbool\n", Ctrl.n.u1GstAccessUpdateDis);
3279 }
3280 }
3281 /* Exclusion Base Address Register. */
3282 {
3283 IOMMU_EXCL_RANGE_BAR_T const ExclRangeBar = pThis->ExclRangeBaseAddr;
3284 pHlp->pfnPrintf(pHlp, " Exclusion BAR = %#RX64\n", ExclRangeBar.u64);
3285 if (fVerbose)
3286 {
3287 pHlp->pfnPrintf(pHlp, " Exclusion enable = %RTbool\n", ExclRangeBar.n.u1ExclEnable);
3288 pHlp->pfnPrintf(pHlp, " Allow all devices = %RTbool\n", ExclRangeBar.n.u1AllowAll);
3289 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n",
3290 ExclRangeBar.n.u40ExclRangeBase << X86_PAGE_4K_SHIFT);
3291 }
3292 }
3293 /* Exclusion Range Limit Register. */
3294 {
3295 IOMMU_EXCL_RANGE_LIMIT_T const ExclRangeLimit = pThis->ExclRangeLimit;
3296 pHlp->pfnPrintf(pHlp, " Exclusion Range Limit = %#RX64\n", ExclRangeLimit.u64);
3297 if (fVerbose)
3298 pHlp->pfnPrintf(pHlp, " Range limit = %#RX64\n", ExclRangeLimit.n.u52ExclLimit);
3299 }
3300 /* Extended Feature Register. */
3301 {
3302 IOMMU_EXT_FEAT_T ExtFeat = pThis->ExtFeat;
3303 pHlp->pfnPrintf(pHlp, " Extended Feature Register = %#RX64\n", ExtFeat.u64);
3304 if (fVerbose)
3305 {
3306 pHlp->pfnPrintf(pHlp, " Prefetch support = %RTbool\n", ExtFeat.n.u1PrefetchSup);
3307 pHlp->pfnPrintf(pHlp, " PPR support = %RTbool\n", ExtFeat.n.u1PprSup);
3308 pHlp->pfnPrintf(pHlp, " x2APIC support = %RTbool\n", ExtFeat.n.u1X2ApicSup);
3309 pHlp->pfnPrintf(pHlp, " NX and privilege level support = %RTbool\n", ExtFeat.n.u1NoExecuteSup);
3310 pHlp->pfnPrintf(pHlp, " Guest translation support = %RTbool\n", ExtFeat.n.u1GstTranslateSup);
3311 pHlp->pfnPrintf(pHlp, " Invalidate-All command support = %RTbool\n", ExtFeat.n.u1InvAllSup);
3312 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC support = %RTbool\n", ExtFeat.n.u1GstVirtApicSup);
3313 pHlp->pfnPrintf(pHlp, " Hardware error register support = %RTbool\n", ExtFeat.n.u1HwErrorSup);
3314 pHlp->pfnPrintf(pHlp, " Performance counters support = %RTbool\n", ExtFeat.n.u1PerfCounterSup);
3315 pHlp->pfnPrintf(pHlp, " Host address translation size = %#x\n", ExtFeat.n.u2HostAddrTranslateSize);
3316 pHlp->pfnPrintf(pHlp, " Guest address translation size = %#x\n", ExtFeat.n.u2GstAddrTranslateSize);
3317 pHlp->pfnPrintf(pHlp, " Guest CR3 root table level support = %#x\n", ExtFeat.n.u2GstCr3RootTblLevel);
3318 pHlp->pfnPrintf(pHlp, " SMI filter register support = %#x\n", ExtFeat.n.u2SmiFilterSup);
3319 pHlp->pfnPrintf(pHlp, " SMI filter register count = %#x\n", ExtFeat.n.u3SmiFilterCount);
3320 pHlp->pfnPrintf(pHlp, " Guest virtual-APIC modes support = %#x\n", ExtFeat.n.u3GstVirtApicModeSup);
3321 pHlp->pfnPrintf(pHlp, " Dual PPR log support = %#x\n", ExtFeat.n.u2DualPprLogSup);
3322 pHlp->pfnPrintf(pHlp, " Dual event log support = %#x\n", ExtFeat.n.u2DualEvtLogSup);
3323 pHlp->pfnPrintf(pHlp, " Maximum PASID = %#x\n", ExtFeat.n.u5MaxPasidSup);
3324 pHlp->pfnPrintf(pHlp, " User/supervisor page protection support = %RTbool\n", ExtFeat.n.u1UserSupervisorSup);
3325 pHlp->pfnPrintf(pHlp, " Device table segments supported = %#x (%u)\n", ExtFeat.n.u2DevTabSegSup,
3326 g_acDevTabSegs[ExtFeat.n.u2DevTabSegSup]);
3327 pHlp->pfnPrintf(pHlp, " PPR log overflow early warning support = %RTbool\n", ExtFeat.n.u1PprLogOverflowWarn);
3328 pHlp->pfnPrintf(pHlp, " PPR auto response support = %RTbool\n", ExtFeat.n.u1PprAutoRespSup);
3329 pHlp->pfnPrintf(pHlp, " MARC support = %#x\n", ExtFeat.n.u2MarcSup);
3330 pHlp->pfnPrintf(pHlp, " Block StopMark message support = %RTbool\n", ExtFeat.n.u1BlockStopMarkSup);
3331 pHlp->pfnPrintf(pHlp, " Performance optimization support = %RTbool\n", ExtFeat.n.u1PerfOptSup);
3332 pHlp->pfnPrintf(pHlp, " MSI capability MMIO access support = %RTbool\n", ExtFeat.n.u1MsiCapMmioSup);
3333 pHlp->pfnPrintf(pHlp, " Guest I/O protection support = %RTbool\n", ExtFeat.n.u1GstIoSup);
3334 pHlp->pfnPrintf(pHlp, " Host access support = %RTbool\n", ExtFeat.n.u1HostAccessSup);
3335 pHlp->pfnPrintf(pHlp, " Enhanced PPR handling support = %RTbool\n", ExtFeat.n.u1EnhancedPprSup);
3336 pHlp->pfnPrintf(pHlp, " Attribute forward supported = %RTbool\n", ExtFeat.n.u1AttrForwardSup);
3337 pHlp->pfnPrintf(pHlp, " Host dirty support = %RTbool\n", ExtFeat.n.u1HostDirtySup);
3338 pHlp->pfnPrintf(pHlp, " Invalidate IOTLB type support = %RTbool\n", ExtFeat.n.u1InvIoTlbTypeSup);
3339 pHlp->pfnPrintf(pHlp, " Guest page table access bit hw disable = %RTbool\n", ExtFeat.n.u1GstUpdateDisSup);
3340 pHlp->pfnPrintf(pHlp, " Force physical dest for remapped intr. = %RTbool\n", ExtFeat.n.u1ForcePhysDstSup);
3341 }
3342 }
3343 /* PPR Log Base Address Register. */
3344 {
3345 PPR_LOG_BAR_T PprLogBar = pThis->PprLogBaseAddr;
3346 uint8_t const uEncodedLen = PprLogBar.n.u4Len;
3347 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3348 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3349 pHlp->pfnPrintf(pHlp, " PPR Log BAR = %#RX64\n", PprLogBar.u64);
3350 if (fVerbose)
3351 {
3352 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", PprLogBar.n.u40Base << X86_PAGE_4K_SHIFT);
3353 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3354 cEntries, cbBuffer);
3355 }
3356 }
3357 /* Hardware Event (Hi) Register. */
3358 {
3359 IOMMU_HW_EVT_HI_T HwEvtHi = pThis->HwEvtHi;
3360 pHlp->pfnPrintf(pHlp, " Hardware Event (Hi) = %#RX64\n", HwEvtHi.u64);
3361 if (fVerbose)
3362 {
3363 pHlp->pfnPrintf(pHlp, " First operand = %#RX64\n", HwEvtHi.n.u60FirstOperand);
3364 pHlp->pfnPrintf(pHlp, " Event code = %#RX8\n", HwEvtHi.n.u4EvtCode);
3365 }
3366 }
3367 /* Hardware Event (Lo) Register. */
3368 pHlp->pfnPrintf(pHlp, " Hardware Event (Lo) = %#RX64\n", pThis->HwEvtLo);
3369 /* Hardware Event Status. */
3370 {
3371 IOMMU_HW_EVT_STATUS_T HwEvtStatus = pThis->HwEvtStatus;
3372 pHlp->pfnPrintf(pHlp, " Hardware Event Status = %#RX64\n", HwEvtStatus.u64);
3373 if (fVerbose)
3374 {
3375 pHlp->pfnPrintf(pHlp, " Valid = %RTbool\n", HwEvtStatus.n.u1Valid);
3376 pHlp->pfnPrintf(pHlp, " Overflow = %RTbool\n", HwEvtStatus.n.u1Overflow);
3377 }
3378 }
3379 /* Guest Virtual-APIC Log Base Address Register. */
3380 {
3381 GALOG_BAR_T const GALogBar = pThis->GALogBaseAddr;
3382 uint8_t const uEncodedLen = GALogBar.n.u4Len;
3383 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3384 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3385 pHlp->pfnPrintf(pHlp, " Guest Log BAR = %#RX64\n", GALogBar.u64);
3386 if (fVerbose)
3387 {
3388 pHlp->pfnPrintf(pHlp, " Base address = %RTbool\n", GALogBar.n.u40Base << X86_PAGE_4K_SHIFT);
3389 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3390 cEntries, cbBuffer);
3391 }
3392 }
3393 /* Guest Virtual-APIC Log Tail Address Register. */
3394 {
3395 GALOG_TAIL_ADDR_T GALogTail = pThis->GALogTailAddr;
3396 pHlp->pfnPrintf(pHlp, " Guest Log Tail Address = %#RX64\n", GALogTail.u64);
3397 if (fVerbose)
3398 pHlp->pfnPrintf(pHlp, " Tail address = %#RX64\n", GALogTail.n.u40GALogTailAddr);
3399 }
3400 /* PPR Log B Base Address Register. */
3401 {
3402 PPR_LOG_B_BAR_T PprLogBBar = pThis->PprLogBBaseAddr;
3403 uint8_t const uEncodedLen = PprLogBBar.n.u4Len;
3404 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3405 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3406 pHlp->pfnPrintf(pHlp, " PPR Log B BAR = %#RX64\n", PprLogBBar.u64);
3407 if (fVerbose)
3408 {
3409 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", PprLogBBar.n.u40Base << X86_PAGE_4K_SHIFT);
3410 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3411 cEntries, cbBuffer);
3412 }
3413 }
3414 /* Event Log B Base Address Register. */
3415 {
3416 EVT_LOG_B_BAR_T EvtLogBBar = pThis->EvtLogBBaseAddr;
3417 uint8_t const uEncodedLen = EvtLogBBar.n.u4Len;
3418 uint32_t const cEntries = iommuAmdGetBufMaxEntries(uEncodedLen);
3419 uint32_t const cbBuffer = iommuAmdGetTotalBufLength(uEncodedLen);
3420 pHlp->pfnPrintf(pHlp, " Event Log B BAR = %#RX64\n", EvtLogBBar.u64);
3421 if (fVerbose)
3422 {
3423 pHlp->pfnPrintf(pHlp, " Base address = %#RX64\n", EvtLogBBar.n.u40Base << X86_PAGE_4K_SHIFT);
3424 pHlp->pfnPrintf(pHlp, " Length = %u (%u entries, %u bytes)\n", uEncodedLen,
3425 cEntries, cbBuffer);
3426 }
3427 }
3428 /* Device-Specific Feature Extension Register. */
3429 {
3430 DEV_SPECIFIC_FEAT_T const DevSpecificFeat = pThis->DevSpecificFeat;
3431 pHlp->pfnPrintf(pHlp, " Device-specific Feature = %#RX64\n", DevSpecificFeat.u64);
3432 if (fVerbose)
3433 {
3434 pHlp->pfnPrintf(pHlp, " Feature = %#RX32\n", DevSpecificFeat.n.u24DevSpecFeat);
3435 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificFeat.n.u4RevMinor);
3436 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificFeat.n.u4RevMajor);
3437 }
3438 }
3439 /* Device-Specific Control Extension Register. */
3440 {
3441 DEV_SPECIFIC_CTRL_T const DevSpecificCtrl = pThis->DevSpecificCtrl;
3442 pHlp->pfnPrintf(pHlp, " Device-specific Control = %#RX64\n", DevSpecificCtrl.u64);
3443 if (fVerbose)
3444 {
3445 pHlp->pfnPrintf(pHlp, " Control = %#RX32\n", DevSpecificCtrl.n.u24DevSpecCtrl);
3446 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificCtrl.n.u4RevMinor);
3447 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificCtrl.n.u4RevMajor);
3448 }
3449 }
3450 /* Device-Specific Status Extension Register. */
3451 {
3452 DEV_SPECIFIC_STATUS_T const DevSpecificStatus = pThis->DevSpecificStatus;
3453 pHlp->pfnPrintf(pHlp, " Device-specific Status = %#RX64\n", DevSpecificStatus.u64);
3454 if (fVerbose)
3455 {
3456 pHlp->pfnPrintf(pHlp, " Status = %#RX32\n", DevSpecificStatus.n.u24DevSpecStatus);
3457 pHlp->pfnPrintf(pHlp, " Minor revision ID = %#x\n", DevSpecificStatus.n.u4RevMinor);
3458 pHlp->pfnPrintf(pHlp, " Major revision ID = %#x\n", DevSpecificStatus.n.u4RevMajor);
3459 }
3460 }
3461 /* Miscellaneous Information Register (Lo and Hi). */
3462 {
3463 MSI_MISC_INFO_T const MiscInfo = pThis->MiscInfo;
3464 pHlp->pfnPrintf(pHlp, " Misc. Info. Register = %#RX64\n", MiscInfo.u64);
3465 if (fVerbose)
3466 {
3467 pHlp->pfnPrintf(pHlp, " Event Log MSI number = %#x\n", MiscInfo.n.u5MsiNumEvtLog);
3468 pHlp->pfnPrintf(pHlp, " Guest Virtual-Address Size = %#x\n", MiscInfo.n.u3GstVirtAddrSize);
3469 pHlp->pfnPrintf(pHlp, " Physical Address Size = %#x\n", MiscInfo.n.u7PhysAddrSize);
3470 pHlp->pfnPrintf(pHlp, " Virtual-Address Size = %#x\n", MiscInfo.n.u7VirtAddrSize);
3471 pHlp->pfnPrintf(pHlp, " HT Transport ATS Range Reserved = %RTbool\n", MiscInfo.n.u1HtAtsResv);
3472 pHlp->pfnPrintf(pHlp, " PPR MSI number = %#x\n", MiscInfo.n.u5MsiNumPpr);
3473 pHlp->pfnPrintf(pHlp, " GA Log MSI number = %#x\n", MiscInfo.n.u5MsiNumGa);
3474 }
3475 }
3476 /* MSI Capability Header. */
3477 {
3478 MSI_CAP_HDR_T MsiCapHdr;
3479 MsiCapHdr.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_CAP_HDR);
3480 pHlp->pfnPrintf(pHlp, " MSI Capability Header = %#RX32\n", MsiCapHdr.u32);
3481 if (fVerbose)
3482 {
3483 pHlp->pfnPrintf(pHlp, " Capability ID = %#x\n", MsiCapHdr.n.u8MsiCapId);
3484 pHlp->pfnPrintf(pHlp, " Capability Ptr (PCI config offset) = %#x\n", MsiCapHdr.n.u8MsiCapPtr);
3485 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", MsiCapHdr.n.u1MsiEnable);
3486 pHlp->pfnPrintf(pHlp, " Multi-message capability = %#x\n", MsiCapHdr.n.u3MsiMultiMessCap);
3487 pHlp->pfnPrintf(pHlp, " Multi-message enable = %#x\n", MsiCapHdr.n.u3MsiMultiMessEn);
3488 }
3489 }
3490 /* MSI Address Register (Lo and Hi). */
3491 {
3492 uint32_t const uMsiAddrLo = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO);
3493 uint32_t const uMsiAddrHi = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI);
3494 MSIADDR MsiAddr;
3495 MsiAddr.u64 = RT_MAKE_U64(uMsiAddrLo, uMsiAddrHi);
3496 pHlp->pfnPrintf(pHlp, " MSI Address = %#RX64\n", MsiAddr.u64);
3497 if (fVerbose)
3498 {
3499 pHlp->pfnPrintf(pHlp, " Destination mode = %#x\n", MsiAddr.n.u1DestMode);
3500 pHlp->pfnPrintf(pHlp, " Redirection hint = %#x\n", MsiAddr.n.u1RedirHint);
3501 pHlp->pfnPrintf(pHlp, " Destination Id = %#x\n", MsiAddr.n.u8DestId);
3502 pHlp->pfnPrintf(pHlp, " Address = %#RX32\n", MsiAddr.n.u12Addr);
3503 pHlp->pfnPrintf(pHlp, " Address (Hi) / Rsvd? = %#RX32\n", MsiAddr.n.u32Rsvd0);
3504 }
3505 }
3506 /* MSI Data. */
3507 {
3508 MSIDATA MsiData;
3509 MsiData.u32 = PDMPciDevGetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA);
3510 pHlp->pfnPrintf(pHlp, " MSI Data = %#RX32\n", MsiData.u32);
3511 if (fVerbose)
3512 {
3513 pHlp->pfnPrintf(pHlp, " Vector = %#x (%u)\n", MsiData.n.u8Vector,
3514 MsiData.n.u8Vector);
3515 pHlp->pfnPrintf(pHlp, " Delivery mode = %#x\n", MsiData.n.u3DeliveryMode);
3516 pHlp->pfnPrintf(pHlp, " Level = %#x\n", MsiData.n.u1Level);
3517 pHlp->pfnPrintf(pHlp, " Trigger mode = %s\n", MsiData.n.u1TriggerMode ?
3518 "level" : "edge");
3519 }
3520 }
3521 /* MSI Mapping Capability Header (HyperTransport, reporting all 0s currently). */
3522 {
3523 MSI_MAP_CAP_HDR_T MsiMapCapHdr;
3524 MsiMapCapHdr.u32 = 0;
3525 pHlp->pfnPrintf(pHlp, " MSI Mapping Capability Header = %#RX32\n", MsiMapCapHdr.u32);
3526 if (fVerbose)
3527 {
3528 pHlp->pfnPrintf(pHlp, " Capability ID = %#x\n", MsiMapCapHdr.n.u8MsiMapCapId);
3529 pHlp->pfnPrintf(pHlp, " Map enable = %RTbool\n", MsiMapCapHdr.n.u1MsiMapEn);
3530 pHlp->pfnPrintf(pHlp, " Map fixed = %RTbool\n", MsiMapCapHdr.n.u1MsiMapFixed);
3531 pHlp->pfnPrintf(pHlp, " Map capability type = %#x\n", MsiMapCapHdr.n.u5MapCapType);
3532 }
3533 }
3534 /* Performance Optimization Control Register. */
3535 {
3536 IOMMU_PERF_OPT_CTRL_T const PerfOptCtrl = pThis->PerfOptCtrl;
3537 pHlp->pfnPrintf(pHlp, " Performance Optimization Control = %#RX32\n", PerfOptCtrl.u32);
3538 if (fVerbose)
3539 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PerfOptCtrl.n.u1PerfOptEn);
3540 }
3541 /* XT (x2APIC) General Interrupt Control Register. */
3542 {
3543 IOMMU_XT_GEN_INTR_CTRL_T const XtGenIntrCtrl = pThis->XtGenIntrCtrl;
3544 pHlp->pfnPrintf(pHlp, " XT General Interrupt Control = %#RX64\n", XtGenIntrCtrl.u64);
3545 if (fVerbose)
3546 {
3547 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
3548 !XtGenIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
3549 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
3550 RT_MAKE_U64(XtGenIntrCtrl.n.u24X2ApicIntrDstLo, XtGenIntrCtrl.n.u7X2ApicIntrDstHi));
3551 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtGenIntrCtrl.n.u8X2ApicIntrVector);
3552 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %s\n",
3553 !XtGenIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
3554 }
3555 }
3556 /* XT (x2APIC) PPR Interrupt Control Register. */
3557 {
3558 IOMMU_XT_PPR_INTR_CTRL_T const XtPprIntrCtrl = pThis->XtPprIntrCtrl;
3559 pHlp->pfnPrintf(pHlp, " XT PPR Interrupt Control = %#RX64\n", XtPprIntrCtrl.u64);
3560 if (fVerbose)
3561 {
3562 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
3563 !XtPprIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
3564 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
3565 RT_MAKE_U64(XtPprIntrCtrl.n.u24X2ApicIntrDstLo, XtPprIntrCtrl.n.u7X2ApicIntrDstHi));
3566 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtPprIntrCtrl.n.u8X2ApicIntrVector);
3567 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %s\n",
3568 !XtPprIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
3569 }
3570 }
3571 /* XT (X2APIC) GA Log Interrupt Control Register. */
3572 {
3573 IOMMU_XT_GALOG_INTR_CTRL_T const XtGALogIntrCtrl = pThis->XtGALogIntrCtrl;
3574 pHlp->pfnPrintf(pHlp, " XT PPR Interrupt Control = %#RX64\n", XtGALogIntrCtrl.u64);
3575 if (fVerbose)
3576 {
3577 pHlp->pfnPrintf(pHlp, " Interrupt destination mode = %s\n",
3578 !XtGALogIntrCtrl.n.u1X2ApicIntrDstMode ? "physical" : "logical");
3579 pHlp->pfnPrintf(pHlp, " Interrupt destination = %#RX64\n",
3580 RT_MAKE_U64(XtGALogIntrCtrl.n.u24X2ApicIntrDstLo, XtGALogIntrCtrl.n.u7X2ApicIntrDstHi));
3581 pHlp->pfnPrintf(pHlp, " Interrupt vector = %#x\n", XtGALogIntrCtrl.n.u8X2ApicIntrVector);
3582 pHlp->pfnPrintf(pHlp, " Interrupt delivery mode = %s\n",
3583 !XtGALogIntrCtrl.n.u8X2ApicIntrVector ? "fixed" : "arbitrated");
3584 }
3585 }
3586 /* MARC Registers. */
3587 {
3588 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aMarcApers); i++)
3589 {
3590 pHlp->pfnPrintf(pHlp, " MARC Aperature %u:\n", i);
3591 MARC_APER_BAR_T const MarcAperBar = pThis->aMarcApers[i].Base;
3592 pHlp->pfnPrintf(pHlp, " Base = %#RX64\n", MarcAperBar.n.u40MarcBaseAddr << X86_PAGE_4K_SHIFT);
3593
3594 MARC_APER_RELOC_T const MarcAperReloc = pThis->aMarcApers[i].Reloc;
3595 pHlp->pfnPrintf(pHlp, " Reloc = %#RX64 (addr: %#RX64, read-only: %RTbool, enable: %RTbool)\n",
3596 MarcAperReloc.u64, MarcAperReloc.n.u40MarcRelocAddr << X86_PAGE_4K_SHIFT,
3597 MarcAperReloc.n.u1ReadOnly, MarcAperReloc.n.u1RelocEn);
3598
3599 MARC_APER_LEN_T const MarcAperLen = pThis->aMarcApers[i].Length;
3600 pHlp->pfnPrintf(pHlp, " Length = %u pages\n", MarcAperLen.n.u40MarcLength);
3601 }
3602 }
3603 /* Reserved Register. */
3604 pHlp->pfnPrintf(pHlp, " Reserved Register = %#RX64\n", pThis->RsvdReg);
3605 /* Command Buffer Head Pointer Register. */
3606 {
3607 CMD_BUF_HEAD_PTR_T const CmdBufHeadPtr = pThis->CmdBufHeadPtr;
3608 pHlp->pfnPrintf(pHlp, " Command Buffer Head Pointer = %#RX64 (off: %#x)\n", CmdBufHeadPtr.u64,
3609 CmdBufHeadPtr.n.off);
3610 }
3611 /* Command Buffer Tail Pointer Register. */
3612 {
3613 CMD_BUF_HEAD_PTR_T const CmdBufTailPtr = pThis->CmdBufTailPtr;
3614 pHlp->pfnPrintf(pHlp, " Command Buffer Tail Pointer = %#RX64 (off: %#x)\n", CmdBufTailPtr.u64,
3615 CmdBufTailPtr.n.off);
3616 }
3617 /* Event Log Head Pointer Register. */
3618 {
3619 EVT_LOG_HEAD_PTR_T const EvtLogHeadPtr = pThis->EvtLogHeadPtr;
3620 pHlp->pfnPrintf(pHlp, " Event Log Head Pointer = %#RX64 (off: %#x)\n", EvtLogHeadPtr.u64,
3621 EvtLogHeadPtr.n.off);
3622 }
3623 /* Event Log Tail Pointer Register. */
3624 {
3625 EVT_LOG_TAIL_PTR_T const EvtLogTailPtr = pThis->EvtLogTailPtr;
3626 pHlp->pfnPrintf(pHlp, " Event Log Head Pointer = %#RX64 (off: %#x)\n", EvtLogTailPtr.u64,
3627 EvtLogTailPtr.n.off);
3628 }
3629 /* Status Register. */
3630 {
3631 IOMMU_STATUS_T const Status = pThis->Status;
3632 pHlp->pfnPrintf(pHlp, " Status Register = %#RX64\n", Status.u64);
3633 if (fVerbose)
3634 {
3635 pHlp->pfnPrintf(pHlp, " Event log overflow = %RTbool\n", Status.n.u1EvtOverflow);
3636 pHlp->pfnPrintf(pHlp, " Event log interrupt = %RTbool\n", Status.n.u1EvtLogIntr);
3637 pHlp->pfnPrintf(pHlp, " Completion wait interrupt = %RTbool\n", Status.n.u1CompWaitIntr);
3638 pHlp->pfnPrintf(pHlp, " Event log running = %RTbool\n", Status.n.u1EvtLogRunning);
3639 pHlp->pfnPrintf(pHlp, " Command buffer running = %RTbool\n", Status.n.u1CmdBufRunning);
3640 pHlp->pfnPrintf(pHlp, " PPR overflow = %RTbool\n", Status.n.u1PprOverflow);
3641 pHlp->pfnPrintf(pHlp, " PPR interrupt = %RTbool\n", Status.n.u1PprIntr);
3642 pHlp->pfnPrintf(pHlp, " PPR log running = %RTbool\n", Status.n.u1PprLogRunning);
3643 pHlp->pfnPrintf(pHlp, " Guest log running = %RTbool\n", Status.n.u1GstLogRunning);
3644 pHlp->pfnPrintf(pHlp, " Guest log interrupt = %RTbool\n", Status.n.u1GstLogIntr);
3645 pHlp->pfnPrintf(pHlp, " PPR log B overflow = %RTbool\n", Status.n.u1PprOverflowB);
3646 pHlp->pfnPrintf(pHlp, " PPR log active = %RTbool\n", Status.n.u1PprLogActive);
3647 pHlp->pfnPrintf(pHlp, " Event log B overflow = %RTbool\n", Status.n.u1EvtOverflowB);
3648 pHlp->pfnPrintf(pHlp, " Event log active = %RTbool\n", Status.n.u1EvtLogActive);
3649 pHlp->pfnPrintf(pHlp, " PPR log B overflow early warning = %RTbool\n", Status.n.u1PprOverflowEarlyB);
3650 pHlp->pfnPrintf(pHlp, " PPR log overflow early warning = %RTbool\n", Status.n.u1PprOverflowEarly);
3651 }
3652 }
3653 /* PPR Log Head Pointer. */
3654 {
3655 PPR_LOG_HEAD_PTR_T const PprLogHeadPtr = pThis->PprLogHeadPtr;
3656 pHlp->pfnPrintf(pHlp, " PPR Log Head Pointer = %#RX64 (off: %#x)\n", PprLogHeadPtr.u64,
3657 PprLogHeadPtr.n.off);
3658 }
3659 /* PPR Log Tail Pointer. */
3660 {
3661 PPR_LOG_TAIL_PTR_T const PprLogTailPtr = pThis->PprLogTailPtr;
3662 pHlp->pfnPrintf(pHlp, " PPR Log Tail Pointer = %#RX64 (off: %#x)\n", PprLogTailPtr.u64,
3663 PprLogTailPtr.n.off);
3664 }
3665 /* Guest Virtual-APIC Log Head Pointer. */
3666 {
3667 GALOG_HEAD_PTR_T const GALogHeadPtr = pThis->GALogHeadPtr;
3668 pHlp->pfnPrintf(pHlp, " Guest Virtual-APIC Log Head Pointer = %#RX64 (off: %#x)\n", GALogHeadPtr.u64,
3669 GALogHeadPtr.n.u12GALogPtr);
3670 }
3671 /* Guest Virtual-APIC Log Tail Pointer. */
3672 {
3673 GALOG_HEAD_PTR_T const GALogTailPtr = pThis->GALogTailPtr;
3674 pHlp->pfnPrintf(pHlp, " Guest Virtual-APIC Log Tail Pointer = %#RX64 (off: %#x)\n", GALogTailPtr.u64,
3675 GALogTailPtr.n.u12GALogPtr);
3676 }
3677 /* PPR Log B Head Pointer. */
3678 {
3679 PPR_LOG_B_HEAD_PTR_T const PprLogBHeadPtr = pThis->PprLogBHeadPtr;
3680 pHlp->pfnPrintf(pHlp, " PPR Log B Head Pointer = %#RX64 (off: %#x)\n", PprLogBHeadPtr.u64,
3681 PprLogBHeadPtr.n.off);
3682 }
3683 /* PPR Log B Tail Pointer. */
3684 {
3685 PPR_LOG_B_TAIL_PTR_T const PprLogBTailPtr = pThis->PprLogBTailPtr;
3686 pHlp->pfnPrintf(pHlp, " PPR Log B Tail Pointer = %#RX64 (off: %#x)\n", PprLogBTailPtr.u64,
3687 PprLogBTailPtr.n.off);
3688 }
3689 /* Event Log B Head Pointer. */
3690 {
3691 EVT_LOG_B_HEAD_PTR_T const EvtLogBHeadPtr = pThis->EvtLogBHeadPtr;
3692 pHlp->pfnPrintf(pHlp, " Event Log B Head Pointer = %#RX64 (off: %#x)\n", EvtLogBHeadPtr.u64,
3693 EvtLogBHeadPtr.n.off);
3694 }
3695 /* Event Log B Tail Pointer. */
3696 {
3697 EVT_LOG_B_TAIL_PTR_T const EvtLogBTailPtr = pThis->EvtLogBTailPtr;
3698 pHlp->pfnPrintf(pHlp, " Event Log B Tail Pointer = %#RX64 (off: %#x)\n", EvtLogBTailPtr.u64,
3699 EvtLogBTailPtr.n.off);
3700 }
3701 /* PPR Log Auto Response Register. */
3702 {
3703 PPR_LOG_AUTO_RESP_T const PprLogAutoResp = pThis->PprLogAutoResp;
3704 pHlp->pfnPrintf(pHlp, " PPR Log Auto Response Register = %#RX64\n", PprLogAutoResp.u64);
3705 if (fVerbose)
3706 {
3707 pHlp->pfnPrintf(pHlp, " Code = %#x\n", PprLogAutoResp.n.u4AutoRespCode);
3708 pHlp->pfnPrintf(pHlp, " Mask Gen. = %RTbool\n", PprLogAutoResp.n.u1AutoRespMaskGen);
3709 }
3710 }
3711 /* PPR Log Overflow Early Warning Indicator Register. */
3712 {
3713 PPR_LOG_OVERFLOW_EARLY_T const PprLogOverflowEarly = pThis->PprLogOverflowEarly;
3714 pHlp->pfnPrintf(pHlp, " PPR Log overflow early warning = %#RX64\n", PprLogOverflowEarly.u64);
3715 if (fVerbose)
3716 {
3717 pHlp->pfnPrintf(pHlp, " Threshold = %#x\n", PprLogOverflowEarly.n.u15Threshold);
3718 pHlp->pfnPrintf(pHlp, " Interrupt enable = %RTbool\n", PprLogOverflowEarly.n.u1IntrEn);
3719 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PprLogOverflowEarly.n.u1Enable);
3720 }
3721 }
3722 /* PPR Log Overflow Early Warning Indicator Register. */
3723 {
3724 PPR_LOG_OVERFLOW_EARLY_T const PprLogBOverflowEarly = pThis->PprLogBOverflowEarly;
3725 pHlp->pfnPrintf(pHlp, " PPR Log B overflow early warning = %#RX64\n", PprLogBOverflowEarly.u64);
3726 if (fVerbose)
3727 {
3728 pHlp->pfnPrintf(pHlp, " Threshold = %#x\n", PprLogBOverflowEarly.n.u15Threshold);
3729 pHlp->pfnPrintf(pHlp, " Interrupt enable = %RTbool\n", PprLogBOverflowEarly.n.u1IntrEn);
3730 pHlp->pfnPrintf(pHlp, " Enable = %RTbool\n", PprLogBOverflowEarly.n.u1Enable);
3731 }
3732 }
3733}
3734
3735
3736/**
3737 * @callback_method_impl{FNSSMDEVSAVEEXEC}
3738 */
3739static DECLCALLBACK(int) iommuAmdR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
3740{
3741 /** @todo IOMMU: Save state. */
3742 RT_NOREF2(pDevIns, pSSM);
3743 LogFlowFunc(("\n"));
3744 return VERR_NOT_IMPLEMENTED;
3745}
3746
3747
3748/**
3749 * @callback_method_impl{FNSSMDEVLOADEXEC}
3750 */
3751static DECLCALLBACK(int) iommuAmdR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3752{
3753 /** @todo IOMMU: Load state. */
3754 RT_NOREF4(pDevIns, pSSM, uVersion, uPass);
3755 LogFlowFunc(("\n"));
3756 return VERR_NOT_IMPLEMENTED;
3757}
3758
3759
3760/**
3761 * @interface_method_impl{PDMDEVREG,pfnReset}
3762 */
3763static DECLCALLBACK(void) iommuAmdR3Reset(PPDMDEVINS pDevIns)
3764{
3765 /*
3766 * Resets read-write portion of the IOMMU state.
3767 *
3768 * State data not initialized here is expected to be initialized during
3769 * device construction and remain read-only through the lifetime of the VM.
3770 */
3771 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3772 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3773 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3774
3775 LogFlowFunc(("\n"));
3776
3777 memset(&pThis->aDevTabBaseAddrs[0], 0, sizeof(pThis->aDevTabBaseAddrs));
3778
3779 pThis->CmdBufBaseAddr.u64 = 0;
3780 pThis->CmdBufBaseAddr.n.u4Len = 8;
3781
3782 pThis->EvtLogBaseAddr.u64 = 0;
3783 pThis->EvtLogBaseAddr.n.u4Len = 8;
3784
3785 pThis->Ctrl.u64 = 0;
3786 pThis->Ctrl.n.u1Coherent = 1;
3787 Assert(!pThis->ExtFeat.n.u1BlockStopMarkSup);
3788
3789 pThis->ExclRangeBaseAddr.u64 = 0;
3790 pThis->ExclRangeLimit.u64 = 0;
3791
3792 pThis->PprLogBaseAddr.u64 = 0;
3793 pThis->PprLogBaseAddr.n.u4Len = 8;
3794
3795 pThis->HwEvtHi.u64 = 0;
3796 pThis->HwEvtLo = 0;
3797 pThis->HwEvtStatus.u64 = 0;
3798
3799 pThis->GALogBaseAddr.u64 = 0;
3800 pThis->GALogBaseAddr.n.u4Len = 8;
3801 pThis->GALogTailAddr.u64 = 0;
3802
3803 pThis->PprLogBBaseAddr.u64 = 0;
3804 pThis->PprLogBBaseAddr.n.u4Len = 8;
3805
3806 pThis->EvtLogBBaseAddr.u64 = 0;
3807 pThis->EvtLogBBaseAddr.n.u4Len = 8;
3808
3809 pThis->PerfOptCtrl.u32 = 0;
3810
3811 pThis->XtGenIntrCtrl.u64 = 0;
3812 pThis->XtPprIntrCtrl.u64 = 0;
3813 pThis->XtGALogIntrCtrl.u64 = 0;
3814
3815 memset(&pThis->aMarcApers[0], 0, sizeof(pThis->aMarcApers));
3816
3817 pThis->CmdBufHeadPtr.u64 = 0;
3818 pThis->CmdBufTailPtr.u64 = 0;
3819 pThis->EvtLogHeadPtr.u64 = 0;
3820 pThis->EvtLogTailPtr.u64 = 0;
3821
3822 pThis->Status.u64 = 0;
3823
3824 pThis->PprLogHeadPtr.u64 = 0;
3825 pThis->PprLogTailPtr.u64 = 0;
3826
3827 pThis->GALogHeadPtr.u64 = 0;
3828 pThis->GALogTailPtr.u64 = 0;
3829
3830 pThis->PprLogBHeadPtr.u64 = 0;
3831 pThis->PprLogBTailPtr.u64 = 0;
3832
3833 pThis->EvtLogBHeadPtr.u64 = 0;
3834 pThis->EvtLogBTailPtr.u64 = 0;
3835
3836 pThis->PprLogAutoResp.u64 = 0;
3837 pThis->PprLogOverflowEarly.u64 = 0;
3838 pThis->PprLogBOverflowEarly.u64 = 0;
3839
3840 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_LO, 0);
3841 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_HI, 0);
3842
3843 /*
3844 * I ASSUME all MMIO regions mapped by a PDM device are automatically unmapped
3845 * on VM reset. If not, we need to enable the following...
3846 */
3847#if 0
3848 /* Unmap the MMIO region on reset if it has been mapped previously. */
3849 Assert(pThis->hMmio != NIL_IOMMMIOHANDLE);
3850 if (PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmio) != NIL_RTGCPHYS)
3851 PDMDevHlpMmioUnmap(pDevIns, pThis->hMmio);
3852#endif
3853}
3854
3855
3856/**
3857 * @interface_method_impl{PDMDEVREG,pfnDestruct}
3858 */
3859static DECLCALLBACK(int) iommuAmdR3Destruct(PPDMDEVINS pDevIns)
3860{
3861 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
3862 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3863 LogFlowFunc(("\n"));
3864
3865 /* Close the command thread semaphore. */
3866 if (pThis->hEvtCmdThread != NIL_SUPSEMEVENT)
3867 {
3868 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEvtCmdThread);
3869 pThis->hEvtCmdThread = NIL_SUPSEMEVENT;
3870 }
3871 return VINF_SUCCESS;
3872}
3873
3874
3875/**
3876 * @interface_method_impl{PDMDEVREG,pfnConstruct}
3877 */
3878static DECLCALLBACK(int) iommuAmdR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
3879{
3880 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
3881 RT_NOREF(pCfg);
3882
3883 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
3884 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC);
3885 pThisCC->pDevInsR3 = pDevIns;
3886
3887 LogFlowFunc(("iInstance=%d\n", iInstance));
3888
3889 /*
3890 * Register the IOMMU with PDM.
3891 */
3892 PDMIOMMUREGR3 IommuReg;
3893 RT_ZERO(IommuReg);
3894 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
3895 IommuReg.pfnMemRead = iommuAmdDeviceMemRead;
3896 IommuReg.pfnMemWrite = iommuAmdDeviceMemWrite;
3897 IommuReg.pfnMsiRemap = iommuAmdDeviceMsiRemap;
3898 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
3899 int rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
3900 if (RT_FAILURE(rc))
3901 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
3902 if (pThisCC->CTX_SUFF(pIommuHlp)->u32Version != PDM_IOMMUHLPR3_VERSION)
3903 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
3904 N_("IOMMU helper version mismatch; got %#x expected %#x"),
3905 pThisCC->CTX_SUFF(pIommuHlp)->u32Version, PDM_IOMMUHLPR3_VERSION);
3906 if (pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd != PDM_IOMMUHLPR3_VERSION)
3907 return PDMDevHlpVMSetError(pDevIns, VERR_VERSION_MISMATCH, RT_SRC_POS,
3908 N_("IOMMU helper end-version mismatch; got %#x expected %#x"),
3909 pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd, PDM_IOMMUHLPR3_VERSION);
3910
3911 /*
3912 * Initialize read-only PCI configuration space.
3913 */
3914 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3915 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3916
3917 /* Header. */
3918 PDMPciDevSetVendorId(pPciDev, IOMMU_PCI_VENDOR_ID); /* AMD */
3919 PDMPciDevSetDeviceId(pPciDev, IOMMU_PCI_DEVICE_ID); /* VirtualBox IOMMU device */
3920 PDMPciDevSetCommand(pPciDev, VBOX_PCI_COMMAND_MASTER); /* Enable bus master (as we write to main memory) */
3921 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST); /* Capability list supported */
3922 PDMPciDevSetRevisionId(pPciDev, IOMMU_PCI_REVISION_ID); /* VirtualBox specific device implementation revision */
3923 PDMPciDevSetClassBase(pPciDev, VBOX_PCI_CLASS_SYSTEM); /* System Base Peripheral */
3924 PDMPciDevSetClassSub(pPciDev, VBOX_PCI_SUB_SYSTEM_IOMMU); /* IOMMU */
3925 PDMPciDevSetClassProg(pPciDev, 0x0); /* IOMMU Programming interface */
3926 PDMPciDevSetHeaderType(pPciDev, 0x0); /* Single function, type 0 */
3927 PDMPciDevSetSubSystemId(pPciDev, IOMMU_PCI_DEVICE_ID); /* AMD */
3928 PDMPciDevSetSubSystemVendorId(pPciDev, IOMMU_PCI_VENDOR_ID); /* VirtualBox IOMMU device */
3929 PDMPciDevSetCapabilityList(pPciDev, IOMMU_PCI_OFF_CAP_HDR); /* Offset into capability registers */
3930 PDMPciDevSetInterruptPin(pPciDev, 0x1); /* INTA#. */
3931 PDMPciDevSetInterruptLine(pPciDev, 0x0); /* For software compatibility; no effect on hardware */
3932
3933 /* Capability Header. */
3934 /* NOTE! Fields (e.g, EFR) must match what we expose in the ACPI tables. */
3935 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_CAP_HDR,
3936 RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_ID, 0xf) /* RO - Secure Device capability block */
3937 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_PTR, IOMMU_PCI_OFF_MSI_CAP_HDR) /* RO - Offset to next capability */
3938 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_TYPE, 0x3) /* RO - IOMMU capability block */
3939 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_REV, 0x1) /* RO - IOMMU interface revision */
3940 | RT_BF_MAKE(IOMMU_BF_CAPHDR_IOTLB_SUP, 0x0) /* RO - Remote IOTLB support */
3941 | RT_BF_MAKE(IOMMU_BF_CAPHDR_HT_TUNNEL, 0x0) /* RO - HyperTransport Tunnel support */
3942 | RT_BF_MAKE(IOMMU_BF_CAPHDR_NP_CACHE, 0x0) /* RO - Cache NP page table entries */
3943 | RT_BF_MAKE(IOMMU_BF_CAPHDR_EFR_SUP, 0x1) /* RO - Extended Feature Register support */
3944 | RT_BF_MAKE(IOMMU_BF_CAPHDR_CAP_EXT, 0x1)); /* RO - Misc. Information Register support */
3945
3946 /* Base Address Register. */
3947 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_LO, 0x0); /* RW - Base address (Lo) and enable bit */
3948 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_BASE_ADDR_REG_HI, 0x0); /* RW - Base address (Hi) */
3949
3950 /* IOMMU Range Register. */
3951 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_RANGE_REG, 0x0); /* RW - Range register (implemented as RO by us) */
3952
3953 /* Misc. Information Register. */
3954 /* NOTE! Fields (e.g, GVA size) must match what we expose in the ACPI tables. */
3955 uint32_t const uMiscInfoReg0 = RT_BF_MAKE(IOMMU_BF_MISCINFO_0_MSI_NUM, 0) /* RO - MSI number */
3956 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_GVA_SIZE, 2) /* RO - Guest Virt. Addr size (2=48 bits) */
3957 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_PA_SIZE, 48) /* RO - Physical Addr size (48 bits) */
3958 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_VA_SIZE, 64) /* RO - Virt. Addr size (64 bits) */
3959 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_HT_ATS_RESV, 0) /* RW - HT ATS reserved */
3960 | RT_BF_MAKE(IOMMU_BF_MISCINFO_0_MSI_NUM_PPR, 0); /* RW - PPR interrupt number */
3961 uint32_t const uMiscInfoReg1 = 0;
3962 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MISCINFO_REG_0, uMiscInfoReg0);
3963 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MISCINFO_REG_1, uMiscInfoReg1);
3964
3965 /* MSI Capability Header register. */
3966 PDMMSIREG MsiReg;
3967 RT_ZERO(MsiReg);
3968 MsiReg.cMsiVectors = 1;
3969 MsiReg.iMsiCapOffset = IOMMU_PCI_OFF_MSI_CAP_HDR;
3970 MsiReg.iMsiNextOffset = 0; /* IOMMU_PCI_OFF_MSI_MAP_CAP_HDR */
3971 MsiReg.fMsi64bit = 1; /* 64-bit addressing support is mandatory; See AMD spec. 2.8 "IOMMU Interrupt Support". */
3972
3973 /* MSI Address (Lo, Hi) and MSI data are read-write PCI config registers handled by our generic PCI config space code. */
3974#if 0
3975 /* MSI Address Lo. */
3976 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_LO, 0); /* RW - MSI message address (Lo) */
3977 /* MSI Address Hi. */
3978 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_ADDR_HI, 0); /* RW - MSI message address (Hi) */
3979 /* MSI Data. */
3980 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_DATA, 0); /* RW - MSI data */
3981#endif
3982
3983#if 0
3984 /** @todo IOMMU: I don't know if we need to support this, enable later if
3985 * required. */
3986 /* MSI Mapping Capability Header register. */
3987 PDMPciDevSetDWord(pPciDev, IOMMU_PCI_OFF_MSI_MAP_CAP_HDR,
3988 RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_ID, 0x8) /* RO - Capability ID */
3989 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_PTR, 0x0) /* RO - Offset to next capability (NULL) */
3990 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_EN, 0x1) /* RO - MSI mapping capability enable */
3991 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_FIXED, 0x1) /* RO - MSI mapping range is fixed */
3992 | RT_BF_MAKE(IOMMU_BF_MSI_MAP_CAPHDR_CAP_TYPE, 0x15)); /* RO - MSI mapping capability */
3993 /* When implementing don't forget to copy this to its MMIO shadow register (MsiMapCapHdr) in iommuAmdR3Init. */
3994#endif
3995
3996 /*
3997 * Register the PCI function with PDM.
3998 */
3999 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
4000 AssertLogRelRCReturn(rc, rc);
4001
4002 /*
4003 * Register MSI support for the PCI device.
4004 * This must be done -after- register it as a PCI device!
4005 */
4006 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
4007 AssertRCReturn(rc, rc);
4008
4009 /*
4010 * Intercept PCI config. space accesses.
4011 */
4012 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, iommuAmdR3PciConfigRead, iommuAmdR3PciConfigWrite);
4013 AssertLogRelRCReturn(rc, rc);
4014
4015 /*
4016 * Create the MMIO region.
4017 * Mapping of the region is done when software configures it via PCI config space.
4018 */
4019 rc = PDMDevHlpMmioCreate(pDevIns, IOMMU_MMIO_REGION_SIZE, pPciDev, 0 /* iPciRegion */, iommuAmdMmioWrite, iommuAmdMmioRead,
4020 NULL /* pvUser */, IOMMMIO_FLAGS_READ_DWORD_QWORD | IOMMMIO_FLAGS_WRITE_DWORD_QWORD_ZEROED,
4021 "AMD-IOMMU", &pThis->hMmio);
4022 AssertLogRelRCReturn(rc, rc);
4023
4024 /*
4025 * Register saved state.
4026 */
4027 rc = PDMDevHlpSSMRegisterEx(pDevIns, IOMMU_SAVED_STATE_VERSION, sizeof(IOMMU), NULL,
4028 NULL, NULL, NULL,
4029 NULL, iommuAmdR3SaveExec, NULL,
4030 NULL, iommuAmdR3LoadExec, NULL);
4031 AssertLogRelRCReturn(rc, rc);
4032
4033 /*
4034 * Register debugger info item.
4035 */
4036 rc = PDMDevHlpDBGFInfoRegister(pDevIns, "iommu", "Display IOMMU state.", iommuAmdR3DbgInfo);
4037 AssertLogRelRCReturn(rc, rc);
4038
4039# ifdef VBOX_WITH_STATISTICS
4040 /*
4041 * Statistics.
4042 */
4043 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadR3, STAMTYPE_COUNTER, "R3/MmioReadR3", STAMUNIT_OCCURENCES, "Number of MMIO reads in R3");
4044 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioReadRZ, STAMTYPE_COUNTER, "RZ/MmioReadRZ", STAMUNIT_OCCURENCES, "Number of MMIO reads in RZ.");
4045
4046 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteR3, STAMTYPE_COUNTER, "R3/MmioWriteR3", STAMUNIT_OCCURENCES, "Number of MMIO writes in R3.");
4047 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMmioWriteRZ, STAMTYPE_COUNTER, "RZ/MmioWriteRZ", STAMUNIT_OCCURENCES, "Number of MMIO writes in RZ.");
4048
4049 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapR3, STAMTYPE_COUNTER, "R3/MsiRemapR3", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in R3.");
4050 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMsiRemapRZ, STAMTYPE_COUNTER, "RZ/MsiRemapRZ", STAMUNIT_OCCURENCES, "Number of interrupt remap requests in RZ.");
4051
4052 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmd, STAMTYPE_COUNTER, "R3/Commands", STAMUNIT_OCCURENCES, "Number of commands processed (total).");
4053 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdCompWait, STAMTYPE_COUNTER, "R3/Commands/CompWait", STAMUNIT_OCCURENCES, "Number of Completion Wait commands processed.");
4054 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvDte, STAMTYPE_COUNTER, "R3/Commands/InvDte", STAMUNIT_OCCURENCES, "Number of Invalidate DTE commands processed.");
4055 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIommuPages, STAMTYPE_COUNTER, "R3/Commands/InvIommuPages", STAMUNIT_OCCURENCES, "Number of Invalidate IOMMU Pages commands processed.");
4056 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIotlbPages, STAMTYPE_COUNTER, "R3/Commands/InvIotlbPages", STAMUNIT_OCCURENCES, "Number of Invalidate IOTLB Pages commands processed.");
4057 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIntrTable, STAMTYPE_COUNTER, "R3/Commands/InvIntrTable", STAMUNIT_OCCURENCES, "Number of Invalidate Interrupt Table commands processed.");
4058 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdPrefIommuPages, STAMTYPE_COUNTER, "R3/Commands/PrefIommuPages", STAMUNIT_OCCURENCES, "Number of Prefetch IOMMU Pages commands processed.");
4059 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdCompletePprReq, STAMTYPE_COUNTER, "R3/Commands/CompletePprReq", STAMUNIT_OCCURENCES, "Number of Complete PPR Requests commands processed.");
4060 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIommuAll, STAMTYPE_COUNTER, "R3/Commands/InvIommuAll", STAMUNIT_OCCURENCES, "Number of Invalidate IOMMU All commands processed.");
4061# endif
4062
4063 /*
4064 * Create the command thread and its event semaphore.
4065 */
4066 char szDevIommu[64];
4067 RT_ZERO(szDevIommu);
4068 RTStrPrintf(szDevIommu, sizeof(szDevIommu), "IOMMU-%u", iInstance);
4069 rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->pCmdThread, pThis, iommuAmdR3CmdThread, iommuAmdR3CmdThreadWakeUp,
4070 0 /* cbStack */, RTTHREADTYPE_IO, szDevIommu);
4071 AssertLogRelRCReturn(rc, rc);
4072
4073 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEvtCmdThread);
4074 AssertLogRelRCReturn(rc, rc);
4075
4076 /*
4077 * Initialize read-only registers.
4078 * NOTE! Fields here must match their corresponding field in the ACPI tables.
4079 */
4080 /** @todo Don't remove the =0 assignment for now. It's just there so it's easier
4081 * for me to see existing features that we might want to implement. Do it
4082 * later. */
4083 pThis->ExtFeat.u64 = 0;
4084 pThis->ExtFeat.n.u1PrefetchSup = 0;
4085 pThis->ExtFeat.n.u1PprSup = 0;
4086 pThis->ExtFeat.n.u1X2ApicSup = 0;
4087 pThis->ExtFeat.n.u1NoExecuteSup = 0;
4088 pThis->ExtFeat.n.u1GstTranslateSup = 0;
4089 pThis->ExtFeat.n.u1InvAllSup = 1;
4090 pThis->ExtFeat.n.u1GstVirtApicSup = 0;
4091 pThis->ExtFeat.n.u1HwErrorSup = 1;
4092 pThis->ExtFeat.n.u1PerfCounterSup = 0;
4093 AssertCompile((IOMMU_MAX_HOST_PT_LEVEL & 0x3) < 3);
4094 pThis->ExtFeat.n.u2HostAddrTranslateSize = (IOMMU_MAX_HOST_PT_LEVEL & 0x3);
4095 pThis->ExtFeat.n.u2GstAddrTranslateSize = 0; /* Requires GstTranslateSup */
4096 pThis->ExtFeat.n.u2GstCr3RootTblLevel = 0; /* Requires GstTranslateSup */
4097 pThis->ExtFeat.n.u2SmiFilterSup = 0;
4098 pThis->ExtFeat.n.u3SmiFilterCount = 0;
4099 pThis->ExtFeat.n.u3GstVirtApicModeSup = 0; /* Requires GstVirtApicSup */
4100 pThis->ExtFeat.n.u2DualPprLogSup = 0;
4101 pThis->ExtFeat.n.u2DualEvtLogSup = 0;
4102 pThis->ExtFeat.n.u5MaxPasidSup = 0; /* Requires GstTranslateSup */
4103 pThis->ExtFeat.n.u1UserSupervisorSup = 0;
4104 AssertCompile(IOMMU_MAX_DEV_TAB_SEGMENTS <= 3);
4105 pThis->ExtFeat.n.u2DevTabSegSup = IOMMU_MAX_DEV_TAB_SEGMENTS;
4106 pThis->ExtFeat.n.u1PprLogOverflowWarn = 0;
4107 pThis->ExtFeat.n.u1PprAutoRespSup = 0;
4108 pThis->ExtFeat.n.u2MarcSup = 0;
4109 pThis->ExtFeat.n.u1BlockStopMarkSup = 0;
4110 pThis->ExtFeat.n.u1PerfOptSup = 0;
4111 pThis->ExtFeat.n.u1MsiCapMmioSup = 1;
4112 pThis->ExtFeat.n.u1GstIoSup = 0;
4113 pThis->ExtFeat.n.u1HostAccessSup = 0;
4114 pThis->ExtFeat.n.u1EnhancedPprSup = 0;
4115 pThis->ExtFeat.n.u1AttrForwardSup = 0;
4116 pThis->ExtFeat.n.u1HostDirtySup = 0;
4117 pThis->ExtFeat.n.u1InvIoTlbTypeSup = 0;
4118 pThis->ExtFeat.n.u1GstUpdateDisSup = 0;
4119 pThis->ExtFeat.n.u1ForcePhysDstSup = 0;
4120
4121 pThis->RsvdReg = 0;
4122
4123 pThis->DevSpecificFeat.u64 = 0;
4124 pThis->DevSpecificFeat.n.u4RevMajor = IOMMU_DEVSPEC_FEAT_MAJOR_VERSION;
4125 pThis->DevSpecificFeat.n.u4RevMinor = IOMMU_DEVSPEC_FEAT_MINOR_VERSION;
4126
4127 pThis->DevSpecificCtrl.u64 = 0;
4128 pThis->DevSpecificCtrl.n.u4RevMajor = IOMMU_DEVSPEC_CTRL_MAJOR_VERSION;
4129 pThis->DevSpecificCtrl.n.u4RevMinor = IOMMU_DEVSPEC_CTRL_MINOR_VERSION;
4130
4131 pThis->DevSpecificStatus.u64 = 0;
4132 pThis->DevSpecificStatus.n.u4RevMajor = IOMMU_DEVSPEC_STATUS_MAJOR_VERSION;
4133 pThis->DevSpecificStatus.n.u4RevMinor = IOMMU_DEVSPEC_STATUS_MINOR_VERSION;
4134
4135 pThis->MiscInfo.u64 = RT_MAKE_U64(uMiscInfoReg0, uMiscInfoReg1);
4136
4137 /*
4138 * Initialize parts of the IOMMU state as it would during reset.
4139 * Must be called -after- initializing PCI config. space registers.
4140 */
4141 iommuAmdR3Reset(pDevIns);
4142
4143 return VINF_SUCCESS;
4144}
4145
4146# else /* !IN_RING3 */
4147
4148/**
4149 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
4150 */
4151static DECLCALLBACK(int) iommuAmdRZConstruct(PPDMDEVINS pDevIns)
4152{
4153 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
4154 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
4155 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC);
4156
4157 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
4158
4159 /* Set up the MMIO RZ handlers. */
4160 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, iommuAmdMmioWrite, iommuAmdMmioRead, NULL /* pvUser */);
4161 AssertRCReturn(rc, rc);
4162
4163 /* Set up the IOMMU RZ callbacks. */
4164 PDMIOMMUREGCC IommuReg;
4165 RT_ZERO(IommuReg);
4166 IommuReg.u32Version = PDM_IOMMUREGCC_VERSION;
4167 IommuReg.idxIommu = pThis->idxIommu;
4168 IommuReg.pfnMemRead = iommuAmdDeviceMemRead;
4169 IommuReg.pfnMemWrite = iommuAmdDeviceMemWrite;
4170 IommuReg.pfnMsiRemap = iommuAmdDeviceMsiRemap;
4171 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION;
4172 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp));
4173 AssertRCReturn(rc, rc);
4174
4175 return VINF_SUCCESS;
4176}
4177
4178# endif /* !IN_RING3 */
4179
4180/**
4181 * The device registration structure.
4182 */
4183const PDMDEVREG g_DeviceIommuAmd =
4184{
4185 /* .u32Version = */ PDM_DEVREG_VERSION,
4186 /* .uReserved0 = */ 0,
4187 /* .szName = */ "iommu-amd",
4188 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
4189 /* .fClass = */ PDM_DEVREG_CLASS_PCI_BUILTIN,
4190 /* .cMaxInstances = */ ~0U,
4191 /* .uSharedVersion = */ 42,
4192 /* .cbInstanceShared = */ sizeof(IOMMU),
4193 /* .cbInstanceCC = */ sizeof(IOMMUCC),
4194 /* .cbInstanceRC = */ sizeof(IOMMURC),
4195 /* .cMaxPciDevices = */ 1,
4196 /* .cMaxMsixVectors = */ 0,
4197 /* .pszDescription = */ "IOMMU (AMD)",
4198#if defined(IN_RING3)
4199 /* .pszRCMod = */ "VBoxDDRC.rc",
4200 /* .pszR0Mod = */ "VBoxDDR0.r0",
4201 /* .pfnConstruct = */ iommuAmdR3Construct,
4202 /* .pfnDestruct = */ iommuAmdR3Destruct,
4203 /* .pfnRelocate = */ NULL,
4204 /* .pfnMemSetup = */ NULL,
4205 /* .pfnPowerOn = */ NULL,
4206 /* .pfnReset = */ iommuAmdR3Reset,
4207 /* .pfnSuspend = */ NULL,
4208 /* .pfnResume = */ NULL,
4209 /* .pfnAttach = */ NULL,
4210 /* .pfnDetach = */ NULL,
4211 /* .pfnQueryInterface = */ NULL,
4212 /* .pfnInitComplete = */ NULL,
4213 /* .pfnPowerOff = */ NULL,
4214 /* .pfnSoftReset = */ NULL,
4215 /* .pfnReserved0 = */ NULL,
4216 /* .pfnReserved1 = */ NULL,
4217 /* .pfnReserved2 = */ NULL,
4218 /* .pfnReserved3 = */ NULL,
4219 /* .pfnReserved4 = */ NULL,
4220 /* .pfnReserved5 = */ NULL,
4221 /* .pfnReserved6 = */ NULL,
4222 /* .pfnReserved7 = */ NULL,
4223#elif defined(IN_RING0)
4224 /* .pfnEarlyConstruct = */ NULL,
4225 /* .pfnConstruct = */ iommuAmdRZConstruct,
4226 /* .pfnDestruct = */ NULL,
4227 /* .pfnFinalDestruct = */ NULL,
4228 /* .pfnRequest = */ NULL,
4229 /* .pfnReserved0 = */ NULL,
4230 /* .pfnReserved1 = */ NULL,
4231 /* .pfnReserved2 = */ NULL,
4232 /* .pfnReserved3 = */ NULL,
4233 /* .pfnReserved4 = */ NULL,
4234 /* .pfnReserved5 = */ NULL,
4235 /* .pfnReserved6 = */ NULL,
4236 /* .pfnReserved7 = */ NULL,
4237#elif defined(IN_RC)
4238 /* .pfnConstruct = */ iommuAmdRZConstruct,
4239 /* .pfnReserved0 = */ NULL,
4240 /* .pfnReserved1 = */ NULL,
4241 /* .pfnReserved2 = */ NULL,
4242 /* .pfnReserved3 = */ NULL,
4243 /* .pfnReserved4 = */ NULL,
4244 /* .pfnReserved5 = */ NULL,
4245 /* .pfnReserved6 = */ NULL,
4246 /* .pfnReserved7 = */ NULL,
4247#else
4248# error "Not in IN_RING3, IN_RING0 or IN_RC!"
4249#endif
4250 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
4251};
4252
4253#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
4254
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette