VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllIommu.cpp@ 102020

Last change on this file since 102020 was 98103, checked in by vboxsync, 23 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.2 KB
Line 
1/* $Id: PDMAllIommu.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * PDM IOMMU - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2021-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PDM
33#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
34#include "PDMInternal.h"
35
36#include <VBox/vmm/vmcc.h>
37#include <iprt/string.h>
38#ifdef IN_RING3
39# include <iprt/mem.h>
40#endif
41
42
43/*********************************************************************************************************************************
44* Defined Constants And Macros *
45*********************************************************************************************************************************/
46/**
47 * Gets the PDM IOMMU for the current context from the PDM device instance.
48 */
49#ifdef IN_RING0
50#define PDMDEVINS_TO_IOMMU(a_pDevIns) &(a_pDevIns)->Internal.s.pGVM->pdmr0.s.aIommus[0];
51#else
52#define PDMDEVINS_TO_IOMMU(a_pDevIns) &(a_pDevIns)->Internal.s.pVMR3->pdm.s.aIommus[0];
53#endif
54
55
56/**
57 * Gets the PCI device ID (Bus:Dev:Fn) for the given PCI device.
58 *
59 * @returns PCI device ID.
60 * @param pDevIns The device instance.
61 * @param pPciDev The PCI device structure. Cannot be NULL.
62 */
63DECL_FORCE_INLINE(uint16_t) pdmIommuGetPciDeviceId(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev)
64{
65 uint8_t const idxBus = pPciDev->Int.s.idxPdmBus;
66#if defined(IN_RING0)
67 PGVM pGVM = pDevIns->Internal.s.pGVM;
68 Assert(idxBus < RT_ELEMENTS(pGVM->pdmr0.s.aPciBuses));
69 PCPDMPCIBUSR0 pBus = &pGVM->pdmr0.s.aPciBuses[idxBus];
70#elif defined(IN_RING3)
71 PVM pVM = pDevIns->Internal.s.pVMR3;
72 Assert(idxBus < RT_ELEMENTS(pVM->pdm.s.aPciBuses));
73 PCPDMPCIBUS pBus = &pVM->pdm.s.aPciBuses[idxBus];
74#endif
75 return PCIBDF_MAKE(pBus->iBus, pPciDev->uDevFn);
76}
77
78
79/**
80 * Returns whether an IOMMU instance is present.
81 *
82 * @returns @c true if an IOMMU is present, @c false otherwise.
83 * @param pDevIns The device instance.
84 */
85bool pdmIommuIsPresent(PPDMDEVINS pDevIns)
86{
87#ifdef IN_RING0
88 PCPDMIOMMUR3 pIommuR3 = &pDevIns->Internal.s.pGVM->pdm.s.aIommus[0];
89#else
90 PCPDMIOMMUR3 pIommuR3 = &pDevIns->Internal.s.pVMR3->pdm.s.aIommus[0];
91#endif
92 return pIommuR3->pDevInsR3 != NULL;
93}
94
95
96/** @copydoc PDMIOMMUREGR3::pfnMsiRemap */
97int pdmIommuMsiRemap(PPDMDEVINS pDevIns, uint16_t idDevice, PCMSIMSG pMsiIn, PMSIMSG pMsiOut)
98{
99 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
100 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
101 Assert(pDevInsIommu);
102 if (pDevInsIommu != pDevIns)
103 return pIommu->pfnMsiRemap(pDevInsIommu, idDevice, pMsiIn, pMsiOut);
104 return VERR_IOMMU_CANNOT_CALL_SELF;
105}
106
107
108/**
109 * Bus master physical memory read after translating the physical address using the
110 * IOMMU.
111 *
112 * @returns VBox status code.
113 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
114 *
115 * @param pDevIns The device instance.
116 * @param pPciDev The PCI device. Cannot be NULL.
117 * @param GCPhys The guest-physical address to read.
118 * @param pvBuf Where to put the data read.
119 * @param cbRead How many bytes to read.
120 * @param fFlags Combination of PDM_DEVHLP_PHYS_RW_F_XXX.
121 *
122 * @thread Any.
123 */
124int pdmIommuMemAccessRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, uint32_t fFlags)
125{
126 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
127 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
128 if (pDevInsIommu)
129 {
130 if (pDevInsIommu != pDevIns)
131 { /* likely */ }
132 else
133 return VERR_IOMMU_CANNOT_CALL_SELF;
134
135 uint16_t const idDevice = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
136 int rc = VINF_SUCCESS;
137 while (cbRead > 0)
138 {
139 RTGCPHYS GCPhysOut;
140 size_t cbContig;
141 rc = pIommu->pfnMemAccess(pDevInsIommu, idDevice, GCPhys, cbRead, PDMIOMMU_MEM_F_READ, &GCPhysOut, &cbContig);
142 if (RT_SUCCESS(rc))
143 {
144 Assert(cbContig > 0 && cbContig <= cbRead);
145 /** @todo Handle strict return codes from PGMPhysRead. */
146 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysRead(pDevIns, GCPhysOut, pvBuf, cbContig, fFlags);
147 if (RT_SUCCESS(rc))
148 {
149 cbRead -= cbContig;
150 pvBuf = (void *)((uintptr_t)pvBuf + cbContig);
151 GCPhys += cbContig;
152 }
153 else
154 break;
155 }
156 else
157 {
158 LogFunc(("IOMMU memory read failed. idDevice=%#x GCPhys=%#RGp cb=%zu rc=%Rrc\n", idDevice, GCPhys, cbRead, rc));
159
160 /*
161 * We should initialize the read buffer on failure for devices that don't check
162 * return codes (but would verify the data). But we still want to propagate the
163 * error code from the IOMMU to the device, see @bugref{9936#c3}.
164 */
165 memset(pvBuf, 0xff, cbRead);
166 break;
167 }
168 }
169 return rc;
170 }
171 return VERR_IOMMU_NOT_PRESENT;
172}
173
174
175/**
176 * Bus master physical memory write after translating the physical address using the
177 * IOMMU.
178 *
179 * @returns VBox status code.
180 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
181 *
182 * @param pDevIns The device instance.
183 * @param pPciDev The PCI device structure. Cannot be NULL.
184 * @param GCPhys The guest-physical address to write.
185 * @param pvBuf The data to write.
186 * @param cbWrite How many bytes to write.
187 * @param fFlags Combination of PDM_DEVHLP_PHYS_RW_F_XXX.
188 *
189 * @thread Any.
190 */
191int pdmIommuMemAccessWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite,
192 uint32_t fFlags)
193{
194 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
195 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
196 if (pDevInsIommu)
197 {
198 if (pDevInsIommu != pDevIns)
199 { /* likely */ }
200 else
201 return VERR_IOMMU_CANNOT_CALL_SELF;
202
203 uint16_t const idDevice = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
204 int rc = VINF_SUCCESS;
205 while (cbWrite > 0)
206 {
207 RTGCPHYS GCPhysOut;
208 size_t cbContig;
209 rc = pIommu->pfnMemAccess(pDevInsIommu, idDevice, GCPhys, cbWrite, PDMIOMMU_MEM_F_WRITE, &GCPhysOut, &cbContig);
210 if (RT_SUCCESS(rc))
211 {
212 Assert(cbContig > 0 && cbContig <= cbWrite);
213 /** @todo Handle strict return codes from PGMPhysWrite. */
214 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysWrite(pDevIns, GCPhysOut, pvBuf, cbContig, fFlags);
215 if (RT_SUCCESS(rc))
216 {
217 cbWrite -= cbContig;
218 pvBuf = (const void *)((uintptr_t)pvBuf + cbContig);
219 GCPhys += cbContig;
220 }
221 else
222 break;
223 }
224 else
225 {
226 LogFunc(("IOMMU memory write failed. idDevice=%#x GCPhys=%#RGp cb=%zu rc=%Rrc\n", idDevice, GCPhys, cbWrite, rc));
227 break;
228 }
229 }
230 return rc;
231 }
232 return VERR_IOMMU_NOT_PRESENT;
233}
234
235
236#ifdef IN_RING3
237/**
238 * Requests the mapping of a guest page into ring-3 in preparation for a bus master
239 * physical memory read operation.
240 *
241 * Refer pfnPhysGCPhys2CCPtrReadOnly() for further details.
242 *
243 * @returns VBox status code.
244 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
245 *
246 * @param pDevIns The device instance.
247 * @param pPciDev The PCI device structure. Cannot be NULL.
248 * @param GCPhys The guest physical address of the page that should be
249 * mapped.
250 * @param fFlags Flags reserved for future use, MBZ.
251 * @param ppv Where to store the address corresponding to GCPhys.
252 * @param pLock Where to store the lock information that
253 * pfnPhysReleasePageMappingLock needs.
254 */
255int pdmR3IommuMemAccessReadCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, uint32_t fFlags, void const **ppv,
256 PPGMPAGEMAPLOCK pLock)
257{
258 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
259 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
260 if (pDevInsIommu)
261 {
262 if (pDevInsIommu != pDevIns)
263 { /* likely */ }
264 else
265 return VERR_IOMMU_CANNOT_CALL_SELF;
266
267 uint16_t const idDevice = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
268 size_t cbContig = 0;
269 RTGCPHYS GCPhysOut = NIL_RTGCPHYS;
270 int rc = pIommu->pfnMemAccess(pDevInsIommu, idDevice, GCPhys & X86_PAGE_BASE_MASK, X86_PAGE_SIZE, PDMIOMMU_MEM_F_READ,
271 &GCPhysOut, &cbContig);
272 if (RT_SUCCESS(rc))
273 {
274 Assert(GCPhysOut != NIL_RTGCPHYS);
275 Assert(cbContig == X86_PAGE_SIZE);
276 return pDevIns->pHlpR3->pfnPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysOut, fFlags, ppv, pLock);
277 }
278
279 LogFunc(("IOMMU memory read for pointer access failed. idDevice=%#x GCPhys=%#RGp rc=%Rrc\n", idDevice, GCPhys, rc));
280 return rc;
281 }
282 return VERR_IOMMU_NOT_PRESENT;
283}
284
285
286/**
287 * Requests the mapping of a guest page into ring-3 in preparation for a bus master
288 * physical memory write operation.
289 *
290 * Refer pfnPhysGCPhys2CCPtr() for further details.
291 *
292 * @returns VBox status code.
293 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
294 *
295 * @param pDevIns The device instance.
296 * @param pPciDev The PCI device structure. Cannot be NULL.
297 * @param GCPhys The guest physical address of the page that should be
298 * mapped.
299 * @param fFlags Flags reserved for future use, MBZ.
300 * @param ppv Where to store the address corresponding to GCPhys.
301 * @param pLock Where to store the lock information that
302 * pfnPhysReleasePageMappingLock needs.
303 */
304int pdmR3IommuMemAccessWriteCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, RTGCPHYS GCPhys, uint32_t fFlags, void **ppv,
305 PPGMPAGEMAPLOCK pLock)
306{
307 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
308 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
309 if (pDevInsIommu)
310 {
311 if (pDevInsIommu != pDevIns)
312 { /* likely */ }
313 else
314 return VERR_IOMMU_CANNOT_CALL_SELF;
315
316 uint16_t const idDevice = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
317 size_t cbContig = 0;
318 RTGCPHYS GCPhysOut = NIL_RTGCPHYS;
319 int rc = pIommu->pfnMemAccess(pDevInsIommu, idDevice, GCPhys & X86_PAGE_BASE_MASK, X86_PAGE_SIZE, PDMIOMMU_MEM_F_WRITE,
320 &GCPhysOut, &cbContig);
321 if (RT_SUCCESS(rc))
322 {
323 Assert(GCPhysOut != NIL_RTGCPHYS);
324 Assert(cbContig == X86_PAGE_SIZE);
325 return pDevIns->pHlpR3->pfnPhysGCPhys2CCPtr(pDevIns, GCPhysOut, fFlags, ppv, pLock);
326 }
327
328 LogFunc(("IOMMU memory write for pointer access failed. idDevice=%#x GCPhys=%#RGp rc=%Rrc\n", idDevice, GCPhys, rc));
329 return rc;
330 }
331 return VERR_IOMMU_NOT_PRESENT;
332}
333
334
335/**
336 * Requests the mapping of multiple guest pages into ring-3 in prepartion for a bus
337 * master physical memory read operation.
338 *
339 * Refer pfnPhysBulkGCPhys2CCPtrReadOnly() for further details.
340 *
341 * @returns VBox status code.
342 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
343 *
344 * @param pDevIns The device instance.
345 * @param pPciDev The PCI device structure. Cannot be NULL.
346 * @param cPages Number of pages to lock.
347 * @param paGCPhysPages The guest physical address of the pages that
348 * should be mapped (@a cPages entries).
349 * @param fFlags Flags reserved for future use, MBZ.
350 * @param papvPages Where to store the ring-3 mapping addresses
351 * corresponding to @a paGCPhysPages.
352 * @param paLocks Where to store the locking information that
353 * pfnPhysBulkReleasePageMappingLock needs (@a cPages
354 * in length).
355 */
356int pdmR3IommuMemAccessBulkReadCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
357 uint32_t fFlags, const void **papvPages, PPGMPAGEMAPLOCK paLocks)
358{
359 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
360 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
361 if (pDevInsIommu)
362 {
363 if (pDevInsIommu != pDevIns)
364 { /* likely */ }
365 else
366 return VERR_IOMMU_CANNOT_CALL_SELF;
367
368 /* Allocate space for translated addresses. */
369 size_t const cbIovas = cPages * sizeof(uint64_t);
370 PRTGCPHYS paGCPhysOut = (PRTGCPHYS)RTMemAllocZ(cbIovas);
371 if (paGCPhysOut)
372 { /* likely */ }
373 else
374 {
375 LogFunc(("caller='%s'/%d: returns %Rrc - Failed to alloc %zu bytes for IOVA addresses\n",
376 pDevIns->pReg->szName, pDevIns->iInstance, VERR_NO_MEMORY, cbIovas));
377 return VERR_NO_MEMORY;
378 }
379
380 /* Ask the IOMMU for corresponding translated physical addresses. */
381 uint16_t const idDevice = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
382 AssertCompile(sizeof(RTGCPHYS) == sizeof(uint64_t));
383 int rc = pIommu->pfnMemBulkAccess(pDevInsIommu, idDevice, cPages, (uint64_t const *)paGCPhysPages, PDMIOMMU_MEM_F_READ,
384 paGCPhysOut);
385 if (RT_SUCCESS(rc))
386 {
387 /* Perform the bulk mapping but with the translated addresses. */
388 rc = pDevIns->pHlpR3->pfnPhysBulkGCPhys2CCPtrReadOnly(pDevIns, cPages, paGCPhysOut, fFlags, papvPages, paLocks);
389 if (RT_FAILURE(rc))
390 LogFunc(("Bulk mapping for read access failed. cPages=%zu fFlags=%#x rc=%Rrc\n", rc, cPages, fFlags));
391 }
392 else
393 LogFunc(("Bulk translation for read access failed. idDevice=%#x cPages=%zu rc=%Rrc\n", idDevice, cPages, rc));
394
395 RTMemFree(paGCPhysOut);
396 return rc;
397 }
398 return VERR_IOMMU_NOT_PRESENT;
399}
400
401
402/**
403 * Requests the mapping of multiple guest pages into ring-3 in prepartion for a bus
404 * master physical memory write operation.
405 *
406 * Refer pfnPhysBulkGCPhys2CCPtr() for further details.
407 *
408 * @returns VBox status code.
409 * @retval VERR_IOMMU_NOT_PRESENT if an IOMMU is not present.
410 *
411 * @param pDevIns The device instance.
412 * @param pPciDev The PCI device structure. Cannot be NULL.
413 * @param cPages Number of pages to lock.
414 * @param paGCPhysPages The guest physical address of the pages that
415 * should be mapped (@a cPages entries).
416 * @param fFlags Flags reserved for future use, MBZ.
417 * @param papvPages Where to store the ring-3 mapping addresses
418 * corresponding to @a paGCPhysPages.
419 * @param paLocks Where to store the locking information that
420 * pfnPhysBulkReleasePageMappingLock needs (@a cPages
421 * in length).
422 */
423int pdmR3IommuMemAccessBulkWriteCCPtr(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
424 uint32_t fFlags, void **papvPages, PPGMPAGEMAPLOCK paLocks)
425{
426 PPDMIOMMU pIommu = PDMDEVINS_TO_IOMMU(pDevIns);
427 PPDMDEVINS pDevInsIommu = pIommu->CTX_SUFF(pDevIns);
428 if (pDevInsIommu)
429 {
430 if (pDevInsIommu != pDevIns)
431 { /* likely */ }
432 else
433 return VERR_IOMMU_CANNOT_CALL_SELF;
434
435 /* Allocate space for translated addresses. */
436 size_t const cbIovas = cPages * sizeof(uint64_t);
437 PRTGCPHYS paGCPhysOut = (PRTGCPHYS)RTMemAllocZ(cbIovas);
438 if (paGCPhysOut)
439 { /* likely */ }
440 else
441 {
442 LogFunc(("caller='%s'/%d: returns %Rrc - Failed to alloc %zu bytes for IOVA addresses\n",
443 pDevIns->pReg->szName, pDevIns->iInstance, VERR_NO_MEMORY, cbIovas));
444 return VERR_NO_MEMORY;
445 }
446
447 /* Ask the IOMMU for corresponding translated physical addresses. */
448 uint16_t const idDevice = pdmIommuGetPciDeviceId(pDevIns, pPciDev);
449 AssertCompile(sizeof(RTGCPHYS) == sizeof(uint64_t));
450 int rc = pIommu->pfnMemBulkAccess(pDevInsIommu, idDevice, cPages, (uint64_t const *)paGCPhysPages, PDMIOMMU_MEM_F_WRITE,
451 paGCPhysOut);
452 if (RT_SUCCESS(rc))
453 {
454 /* Perform the bulk mapping but with the translated addresses. */
455 rc = pDevIns->pHlpR3->pfnPhysBulkGCPhys2CCPtr(pDevIns, cPages, paGCPhysOut, fFlags, papvPages, paLocks);
456 if (RT_FAILURE(rc))
457 LogFunc(("Bulk mapping of addresses failed. cPages=%zu fFlags=%#x rc=%Rrc\n", rc, cPages, fFlags));
458 }
459 else
460 LogFunc(("IOMMU bulk translation failed. idDevice=%#x cPages=%zu rc=%Rrc\n", idDevice, cPages, rc));
461
462 RTMemFree(paGCPhysOut);
463 return rc;
464 }
465 return VERR_IOMMU_NOT_PRESENT;
466}
467#endif /* IN_RING3 */
468
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette