VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllIommu.cpp@ 87626

Last change on this file since 87626 was 87494, checked in by vboxsync, 4 years ago

AMD IOMMU: bugref:9654 PDM IOMMU code de-duplication and cleanup, part 3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.8 KB
Line 
1/* $Id: PDMAllIommu.cpp 87494 2021-02-01 05:47:40Z vboxsync $ */
2/** @file
3 * PDM IOMMU - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2021 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM
23#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
24#include "PDMInternal.h"
25
26#include <VBox/vmm/vmcc.h>
27#ifdef IN_RING3
28# include <iprt/mem.h>
29#endif
30
31
32/*********************************************************************************************************************************
33* Defined Constants And Macros *
34*********************************************************************************************************************************/
35/**
36 * Gets the PDM IOMMU for the current context from the PDM device instance.
37 */
38#ifdef IN_RING0
39#define PDMDEVINS_TO_IOMMU(a_pDevIns) &(a_pDevIns)->Internal.s.pGVM->pdmr0.s.aIommus[0];
40#else
41#define PDMDEVINS_TO_IOMMU(a_pDevIns) &(a_pDevIns)->Internal.s.pVMR3->pdm.s.aIommus[0];
42#endif
43
44
45/**
46 * Gets the PCI device ID (Bus:Dev:Fn) for the given PCI device.
47 *
48 * @returns PCI device ID.
49 * @param pDevIns The device instance.
50 * @param pPciDev The PCI device structure. Cannot be NULL.
51 */
52DECL_FORCE_INLINE(uint16_t) pdmIommuGetPciDeviceId(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev)
53{
54 uint8_t const idxBus = pPciDev->Int.s.idxPdmBus;
55#if defined(IN_RING0)
56 PGVM pGVM = pDevIns->Internal.s.pGVM;
57 Assert(idxBus < RT_ELEMENTS(pGVM->pdmr0.s.aPciBuses));
58 PCPDMPCIBUSR0 pBus = &pGVM->pdmr0.s.aPciBuses[idxBus];
59#elif defined(IN_RING3)
60 PVM pVM = pDevIns->Internal.s.pVMR3;
61 Assert(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses));
62 PCPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
63#endif
64 return PCIBDF_MAKE(pBus->iBus, pPciDev->uDevFn);
65}
66
67
68/** @copydoc PDMIOMMUREGR3::pfnMsiRemap */
69int pdmIommuMsiRemap(PPDMDEVINS pDevIns, uint16_t uDeviceId, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
70{
71 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
72 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
73 if ( pDevInsIommu
74 && pDevInsIommu != pDevIns)
75 {
76 int rc = pIommu->pfnMsiRemap(pDevInsIommu, uDeviceId, pMsiIn, pMsiOut);
77 if (RT_FAILURE(rc))
78 {
79 LogFunc(("MSI remap failed. uDeviceId=%#x pMsiIn=(%#RX64, %#RU32) rc=%Rrc\n", uDeviceId, pMsiIn->Addr.u64,
80 pMsiIn->Data.u32, rc));
81 }
82 return rc;
83 }
84 return VERR_IOMMU_NOT_PRESENT;
85}
86
87
88/**
89 * Bus master physical memory read after translating the physical address using the
90 * IOMMU.
91 *
92 * @returns VBox status code.
93 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
94 *
95 * @param pDevIns The device instance.
96 * @param pPciDev The PCI device. Cannot be NULL.
97 * @param GCPhys The guest-physical address to read.
98 * @param pvBuf Where to put the data read.
99 * @param cbRead How many bytes to read.
100 * @param fFlags Combination of PDM_DEVHLP_PHYS_RW_F_XXX.
101 *
102 * @thread Any.
103 */
104int pdmIommuMemAccessRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags)
105{
106 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
107 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
108 if ( pDevInsIommu
109 && pDevInsIommu != pDevIns)
110 {
111 uint16_t const uDeviceId = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
112 int rc = VINF_SUCCESS;
113 while (cbRead > 0)
114 {
115 RTGCPHYS GCPhysOut;
116 size_t cbContig;
117 rc = pIommu->pfnMemAccess(pDevInsIommu, uDeviceId, GCPhys, cbRead, PDMIOMMU_MEM_F_READ, &GCPhysOut, &cbContig);
118 if (RT_SUCCESS(rc))
119 {
120 /** @todo Handle strict return codes from PGMPhysRead. */
121 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysRead(pDevIns, GCPhysOut, pvBuf, cbRead, fFlags);
122 if (RT_SUCCESS(rc))
123 {
124 cbRead -= cbContig;
125 pvBuf = (void *)((uintptr_t)pvBuf + cbContig);
126 GCPhys += cbContig;
127 }
128 else
129 break;
130 }
131 else
132 {
133 LogFunc(("IOMMU memory read failed. uDeviceId=%#x GCPhys=%#RGp cb=%zu rc=%Rrc\n", uDeviceId, GCPhys, cbRead, rc));
134 break;
135 }
136 }
137 return rc;
138 }
139 return VERR_IOMMU_NOT_PRESENT;
140}
141
142
143/**
144 * Bus master physical memory write after translating the physical address using the
145 * IOMMU.
146 *
147 * @returns VBox status code.
148 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
149 *
150 * @param pDevIns The device instance.
151 * @param pPciDev The PCI device structure. Cannot be NULL.
152 * @param GCPhys The guest-physical address to write.
153 * @param pvBuf The data to write.
154 * @param cbWrite How many bytes to write.
155 * @param fFlags Combination of PDM_DEVHLP_PHYS_RW_F_XXX.
156 *
157 * @thread Any.
158 */
159int pdmIommuMemAccessWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite,
160 uint32_t fFlags)
161{
162 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
163 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
164 if ( pDevInsIommu
165 && pDevInsIommu != pDevIns)
166 {
167 uint16_t const uDeviceId = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
168 int rc = VINF_SUCCESS;
169 while (cbWrite > 0)
170 {
171 RTGCPHYS GCPhysOut;
172 size_t cbContig;
173 rc = pIommu->pfnMemAccess(pDevInsIommu, uDeviceId, GCPhys, cbWrite, PDMIOMMU_MEM_F_WRITE, &GCPhysOut, &cbContig);
174 if (RT_SUCCESS(rc))
175 {
176 /** @todo Handle strict return codes from PGMPhysWrite. */
177 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysWrite(pDevIns, GCPhysOut, pvBuf, cbWrite, fFlags);
178 if (RT_SUCCESS(rc))
179 {
180 cbWrite -= cbContig;
181 pvBuf = (const void *)((uintptr_t)pvBuf + cbContig);
182 GCPhys += cbContig;
183 }
184 else
185 break;
186 }
187 else
188 {
189 LogFunc(("IOMMU memory write failed. uDeviceId=%#x GCPhys=%#RGp cb=%zu rc=%Rrc\n", uDeviceId, GCPhys, cbWrite,
190 rc));
191 break;
192 }
193 }
194 return rc;
195 }
196 return VERR_IOMMU_NOT_PRESENT;
197}
198
199
200#ifdef IN_RING3
201/**
202 * Requests the mapping of a guest page into ring-3 in preparation for a bus master
203 * physical memory read operation.
204 *
205 * Refer pfnPhysGCPhys2CCPtrReadOnly() for further details.
206 *
207 * @returns VBox status code.
208 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
209 *
210 * @param pDevIns The device instance.
211 * @param pPciDev The PCI device structure. Cannot be NULL.
212 * @param GCPhys The guest physical address of the page that should be
213 * mapped.
214 * @param fFlags Flags reserved for future use, MBZ.
215 * @param ppv Where to store the address corresponding to GCPhys.
216 * @param pLock Where to store the lock information that
217 * pfnPhysReleasePageMappingLock needs.
218 */
219int pdmR3IommuMemAccessReadCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, uint32_t fFlags, void const **ppv,
220 PPGMPAGEMAPLOCK pLock)
221{
222 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
223 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
224 if ( pDevInsIommu
225 && pDevInsIommu != pDevIns)
226 {
227 uint16_t const uDeviceId = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
228 size_t cbContig = 0;
229 RTGCPHYS GCPhysOut = NIL_RTGCPHYS;
230 int rc = pIommu->pfnMemAccess(pDevInsIommu, uDeviceId, GCPhys & X86_PAGE_BASE_MASK, X86_PAGE_SIZE, PDMIOMMU_MEM_F_READ,
231 &GCPhysOut, &cbContig);
232 if (RT_SUCCESS(rc))
233 {
234 Assert(GCPhysOut != NIL_RTGCPHYS);
235 Assert(cbContig == X86_PAGE_SIZE);
236 return pDevIns->pHlpR3->pfnPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysOut, fFlags, ppv, pLock);
237 }
238
239 LogFunc(("IOMMU memory read for pointer access failed. uDeviceId=%#x GCPhys=%#RGp rc=%Rrc\n", uDeviceId, GCPhys, rc));
240 return rc;
241 }
242 return VERR_IOMMU_NOT_PRESENT;
243}
244
245
246/**
247 * Requests the mapping of a guest page into ring-3 in preparation for a bus master
248 * physical memory write operation.
249 *
250 * Refer pfnPhysGCPhys2CCPtr() for further details.
251 *
252 * @returns VBox status code.
253 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
254 *
255 * @param pDevIns The device instance.
256 * @param pPciDev The PCI device structure. Cannot be NULL.
257 * @param GCPhys The guest physical address of the page that should be
258 * mapped.
259 * @param fFlags Flags reserved for future use, MBZ.
260 * @param ppv Where to store the address corresponding to GCPhys.
261 * @param pLock Where to store the lock information that
262 * pfnPhysReleasePageMappingLock needs.
263 */
264int pdmR3IommuMemAccessWriteCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, uint32_t fFlags, void **ppv,
265 PPGMPAGEMAPLOCK pLock)
266{
267 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
268 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
269 if ( pDevInsIommu
270 && pDevInsIommu != pDevIns)
271 {
272 uint16_t const uDeviceId = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
273 size_t cbContig = 0;
274 RTGCPHYS GCPhysOut = NIL_RTGCPHYS;
275 int rc = pIommu->pfnMemAccess(pDevInsIommu, uDeviceId, GCPhys & X86_PAGE_BASE_MASK, X86_PAGE_SIZE, PDMIOMMU_MEM_F_WRITE,
276 &GCPhysOut, &cbContig);
277 if (RT_SUCCESS(rc))
278 {
279 Assert(GCPhysOut != NIL_RTGCPHYS);
280 Assert(cbContig == X86_PAGE_SIZE);
281 return pDevIns->pHlpR3->pfnPhysGCPhys2CCPtr(pDevIns, GCPhysOut, fFlags, ppv, pLock);
282 }
283
284 LogFunc(("IOMMU memory write for pointer access failed. uDeviceId=%#x GCPhys=%#RGp rc=%Rrc\n", uDeviceId, GCPhys, rc));
285 return rc;
286 }
287 return VERR_IOMMU_NOT_PRESENT;
288}
289
290
291/**
292 * Requests the mapping of multiple guest pages into ring-3 in prepartion for a bus
293 * master physical memory read operation.
294 *
295 * Refer pfnPhysBulkGCPhys2CCPtrReadOnly() for further details.
296 *
297 * @returns VBox status code.
298 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
299 *
300 * @param pDevIns The device instance.
301 * @param pPciDev The PCI device structure. Cannot be NULL.
302 * @param cPages Number of pages to lock.
303 * @param paGCPhysPages The guest physical address of the pages that
304 * should be mapped (@a cPages entries).
305 * @param fFlags Flags reserved for future use, MBZ.
306 * @param papvPages Where to store the ring-3 mapping addresses
307 * corresponding to @a paGCPhysPages.
308 * @param paLocks Where to store the locking information that
309 * pfnPhysBulkReleasePageMappingLock needs (@a cPages
310 * in length).
311 */
312int pdmR3IommuMemAccessBulkReadCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
313 uint32_t fFlags, const void **papvPages, PPGMPAGEMAPLOCK paLocks)
314{
315 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
316 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
317 if ( pDevInsIommu
318 && pDevInsIommu != pDevIns)
319 {
320 /* Allocate space for translated addresses. */
321 size_t const cbIovas = cPages * sizeof(uint64_t);
322 PRTGCPHYS paGCPhysOut = (PRTGCPHYS)RTMemAllocZ(cbIovas);
323 if (paGCPhysOut)
324 { /* likely */ }
325 else
326 {
327 LogFunc(("caller='%s'/%d: returns %Rrc - Failed to alloc %zu bytes for IOVA addresses\n",
328 pDevIns->pReg->szName, pDevIns->iInstance, VERR_NO_MEMORY, cbIovas));
329 return VERR_NO_MEMORY;
330 }
331
332 /* Ask the IOMMU for corresponding translated physical addresses. */
333 uint16_t const uDeviceId = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
334 AssertCompile(sizeof(RTGCPHYS) == sizeof(uint64_t));
335 int rc = pIommu->pfnMemBulkAccess(pDevInsIommu, uDeviceId, cPages, (uint64_t const *)paGCPhysPages, PDMIOMMU_MEM_F_READ,
336 paGCPhysOut);
337 if (RT_SUCCESS(rc))
338 {
339 /* Perform the bulk mapping but with the translated addresses. */
340 rc = pDevIns->pHlpR3->pfnPhysBulkGCPhys2CCPtrReadOnly(pDevIns, cPages, paGCPhysOut, fFlags, papvPages, paLocks);
341 if (RT_FAILURE(rc))
342 LogFunc(("Bulk mapping for read access failed. cPages=%zu fFlags=%#x rc=%Rrc\n", rc, cPages, fFlags));
343 }
344 else
345 LogFunc(("Bulk translation for read access failed. uDeviceId=%#x cPages=%zu rc=%Rrc\n", uDeviceId, cPages, rc));
346
347 RTMemFree(paGCPhysOut);
348 return rc;
349 }
350 return VERR_IOMMU_NOT_PRESENT;
351}
352
353
354/**
355 * Requests the mapping of multiple guest pages into ring-3 in prepartion for a bus
356 * master physical memory write operation.
357 *
358 * Refer pfnPhysBulkGCPhys2CCPtr() for further details.
359 *
360 * @returns VBox status code.
361 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
362 *
363 * @param pDevIns The device instance.
364 * @param pPciDev The PCI device structure. Cannot be NULL.
365 * @param cPages Number of pages to lock.
366 * @param paGCPhysPages The guest physical address of the pages that
367 * should be mapped (@a cPages entries).
368 * @param fFlags Flags reserved for future use, MBZ.
369 * @param papvPages Where to store the ring-3 mapping addresses
370 * corresponding to @a paGCPhysPages.
371 * @param paLocks Where to store the locking information that
372 * pfnPhysBulkReleasePageMappingLock needs (@a cPages
373 * in length).
374 */
375int pdmR3IommuMemAccessBulkWriteCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
376 uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks)
377{
378 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
379 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
380 if ( pDevInsIommu
381 && pDevInsIommu != pDevIns)
382 {
383 /* Allocate space for translated addresses. */
384 size_t const cbIovas = cPages * sizeof(uint64_t);
385 PRTGCPHYS paGCPhysOut = (PRTGCPHYS)RTMemAllocZ(cbIovas);
386 if (paGCPhysOut)
387 { /* likely */ }
388 else
389 {
390 LogFunc(("caller='%s'/%d: returns %Rrc - Failed to alloc %zu bytes for IOVA addresses\n",
391 pDevIns->pReg->szName, pDevIns->iInstance, VERR_NO_MEMORY, cbIovas));
392 return VERR_NO_MEMORY;
393 }
394
395 /* Ask the IOMMU for corresponding translated physical addresses. */
396 uint16_t const uDeviceId = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
397 AssertCompile(sizeof(RTGCPHYS) == sizeof(uint64_t));
398 int rc = pIommu->pfnMemBulkAccess(pDevInsIommu, uDeviceId, cPages, (uint64_t const *)paGCPhysPages, PDMIOMMU_MEM_F_WRITE,
399 paGCPhysOut);
400 if (RT_SUCCESS(rc))
401 {
402 /* Perform the bulk mapping but with the translated addresses. */
403 rc = pDevIns->pHlpR3->pfnPhysBulkGCPhys2CCPtr(pDevIns, cPages, paGCPhysOut, fFlags, papvPages, paLocks);
404 if (RT_FAILURE(rc))
405 LogFunc(("Bulk mapping of addresses failed. cPages=%zu fFlags=%#x rc=%Rrc\n", rc, cPages, fFlags));
406 }
407 else
408 LogFunc(("IOMMU bulk translation failed. uDeviceId=%#x cPages=%zu rc=%Rrc\n", uDeviceId, cPages, rc));
409
410 RTMemFree(paGCPhysOut);
411 return rc;
412 }
413 return VERR_IOMMU_NOT_PRESENT;
414}
415#endif /* IN_RING3 */
416
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette