VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp@ 95638

Last change on this file since 95638 was 93635, checked in by vboxsync, 3 years ago

VMM/PGM,VMM/PDM,VGA: Consolidate the user parameters of the physical access handlers into a single uint64_t value that shouldn't be a pointer, at least not for ring-0 callbacks. Special hack for devices where it's translated from a ring-0 device instance index into a current context PPDMDEVINS (not really tested yet). bugref:10094

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 27.8 KB
Line 
1/* $Id: IOMR3Mmio.cpp 93635 2022-02-07 10:43:45Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor, MMIO related APIs.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM_MMIO
23#include <VBox/vmm/iom.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/dbgf.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pdmdev.h>
30#include "IOMInternal.h"
31#include <VBox/vmm/vm.h>
32
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <iprt/mem.h>
36#include <iprt/string.h>
37#include <VBox/log.h>
38#include <VBox/err.h>
39
40#include "IOMInline.h"
41
42
43#ifdef VBOX_WITH_STATISTICS
44
45/**
46 * Register statistics for a MMIO entry.
47 */
48void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry)
49{
50 bool const fDoRZ = pRegEntry->fRing0 || pRegEntry->fRawMode;
51 PIOMMMIOSTATSENTRY pStats = &pVM->iom.s.paMmioStats[pRegEntry->idxStats];
52
53 /* Format the prefix: */
54 char szName[80];
55 size_t cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/MmioRegions/%RGp-%RGp",
56 pRegEntry->GCPhysMapping, pRegEntry->GCPhysMapping + pRegEntry->cbRegion - 1);
57
58 /* Mangle the description if this isn't the first device instance: */
59 const char *pszDesc = pRegEntry->pszDesc;
60 char *pszFreeDesc = NULL;
61 if (pRegEntry->pDevIns && pRegEntry->pDevIns->iInstance > 0 && pszDesc)
62 pszDesc = pszFreeDesc = RTStrAPrintf2("%u / %s", pRegEntry->pDevIns->iInstance, pszDesc);
63
64 /* Register statistics: */
65 int rc = STAMR3Register(pVM, &pRegEntry->idxSelf, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, pszDesc); AssertRC(rc);
66 RTStrFree(pszFreeDesc);
67
68# define SET_NM_SUFFIX(a_sz) memcpy(&szName[cchPrefix], a_sz, sizeof(a_sz))
69 SET_NM_SUFFIX("/Read-Complicated");
70 rc = STAMR3Register(pVM, &pStats->ComplicatedReads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
71 SET_NM_SUFFIX("/Read-FFor00");
72 rc = STAMR3Register(pVM, &pStats->FFor00Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
73 SET_NM_SUFFIX("/Read-R3");
74 rc = STAMR3Register(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
75 if (fDoRZ)
76 {
77 SET_NM_SUFFIX("/Read-RZ");
78 rc = STAMR3Register(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
79 SET_NM_SUFFIX("/Read-RZtoR3");
80 rc = STAMR3Register(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
81 }
82 SET_NM_SUFFIX("/Read-Total");
83 rc = STAMR3Register(pVM, &pStats->Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
84
85 SET_NM_SUFFIX("/Write-Complicated");
86 rc = STAMR3Register(pVM, &pStats->ComplicatedWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
87 SET_NM_SUFFIX("/Write-R3");
88 rc = STAMR3Register(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
89 if (fDoRZ)
90 {
91 SET_NM_SUFFIX("/Write-RZ");
92 rc = STAMR3Register(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
93 SET_NM_SUFFIX("/Write-RZtoR3");
94 rc = STAMR3Register(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
95 SET_NM_SUFFIX("/Write-RZtoR3-Commit");
96 rc = STAMR3Register(pVM, &pStats->CommitRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
97 }
98 SET_NM_SUFFIX("/Write-Total");
99 rc = STAMR3Register(pVM, &pStats->Writes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
100}
101
102
103/**
104 * Deregister statistics for a MMIO entry.
105 */
106static void iomR3MmioDeregStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry, RTGCPHYS GCPhys)
107{
108 char szPrefix[80];
109 RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/MmioRegions/%RGp-%RGp", GCPhys, GCPhys + pRegEntry->cbRegion - 1);
110 STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
111}
112
113
114/**
115 * Grows the statistics table.
116 *
117 * @returns VBox status code.
118 * @param pVM The cross context VM structure.
119 * @param cNewEntries The minimum number of new entrie.
120 * @see IOMR0IoPortGrowStatisticsTable
121 */
122static int iomR3MmioGrowStatisticsTable(PVM pVM, uint32_t cNewEntries)
123{
124 AssertReturn(cNewEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
125
126 int rc;
127 if (!SUPR3IsDriverless())
128 {
129 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_STATS, cNewEntries, NULL);
130 AssertLogRelRCReturn(rc, rc);
131 AssertReturn(cNewEntries <= pVM->iom.s.cMmioStatsAllocation, VERR_IOM_MMIO_IPE_2);
132 }
133 else
134 {
135 /*
136 * Validate input and state.
137 */
138 uint32_t const cOldEntries = pVM->iom.s.cMmioStatsAllocation;
139 AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
140 AssertReturn(pVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
141
142 /*
143 * Calc size and allocate a new table.
144 */
145 uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE);
146 cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
147
148 PIOMMMIOSTATSENTRY const paMmioStats = (PIOMMMIOSTATSENTRY)RTMemPageAllocZ(cbNew);
149 if (paMmioStats)
150 {
151 /*
152 * Anything to copy over, update and free the old one.
153 */
154 PIOMMMIOSTATSENTRY const pOldMmioStats = pVM->iom.s.paMmioStats;
155 if (pOldMmioStats)
156 memcpy(paMmioStats, pOldMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
157
158 pVM->iom.s.paMmioStats = paMmioStats;
159 pVM->iom.s.cMmioStatsAllocation = cNewEntries;
160
161 RTMemPageFree(pOldMmioStats, RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE));
162
163 rc = VINF_SUCCESS;
164 }
165 else
166 rc = VERR_NO_PAGE_MEMORY;
167 }
168
169 return rc;
170}
171
172#endif /* VBOX_WITH_STATISTICS */
173
174/**
175 * Grows the I/O port registration statistics table.
176 *
177 * @returns VBox status code.
178 * @param pVM The cross context VM structure.
179 * @param cNewEntries The minimum number of new entrie.
180 * @see IOMR0MmioGrowRegistrationTables
181 */
182static int iomR3MmioGrowTable(PVM pVM, uint32_t cNewEntries)
183{
184 AssertReturn(cNewEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
185
186 int rc;
187 if (!SUPR3IsDriverless())
188 {
189 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_REGS, cNewEntries, NULL);
190 AssertLogRelRCReturn(rc, rc);
191 AssertReturn(cNewEntries <= pVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_2);
192 }
193 else
194 {
195 /*
196 * Validate input and state.
197 */
198 uint32_t const cOldEntries = pVM->iom.s.cMmioAlloc;
199 AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_1);
200
201 /*
202 * Allocate the new tables. We use a single allocation for the three tables (ring-0,
203 * ring-3, lookup) and does a partial mapping of the result to ring-3.
204 */
205 uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE);
206 uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE);
207 uint32_t const cbNew = cbRing3 + cbShared;
208
209 /* Use the rounded up space as best we can. */
210 cNewEntries = RT_MIN(cbRing3 / sizeof(IOMMMIOENTRYR3), cbShared / sizeof(IOMMMIOLOOKUPENTRY));
211
212 PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)RTMemPageAllocZ(cbNew);
213 if (paRing3)
214 {
215 PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
216
217 /*
218 * Copy over the old info and initialize the idxSelf and idxStats members.
219 */
220 if (pVM->iom.s.paMmioRegs != NULL)
221 {
222 memcpy(paRing3, pVM->iom.s.paMmioRegs, sizeof(paRing3[0]) * cOldEntries);
223 memcpy(paLookup, pVM->iom.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
224 }
225
226 size_t i = cbRing3 / sizeof(*paRing3);
227 while (i-- > cOldEntries)
228 {
229 paRing3[i].idxSelf = (uint16_t)i;
230 paRing3[i].idxStats = UINT16_MAX;
231 }
232
233 /*
234 * Update the variables and free the old memory.
235 */
236 void * const pvFree = pVM->iom.s.paMmioRegs;
237
238 pVM->iom.s.paMmioRegs = paRing3;
239 pVM->iom.s.paMmioLookup = paLookup;
240 pVM->iom.s.cMmioAlloc = cNewEntries;
241
242 RTMemPageFree(pvFree,
243 RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE)
244 + RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE));
245
246 rc = VINF_SUCCESS;
247 }
248 else
249 rc = VERR_NO_PAGE_MEMORY;
250 }
251 return rc;
252}
253
254
255/**
256 * Worker for PDMDEVHLPR3::pfnMmioCreateEx.
257 */
258VMMR3_INT_DECL(int) IOMR3MmioCreate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS cbRegion, uint32_t fFlags, PPDMPCIDEV pPciDev,
259 uint32_t iPciRegion, PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead,
260 PFNIOMMMIONEWFILL pfnFill, void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
261{
262 /*
263 * Validate input.
264 */
265 AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
266 *phRegion = UINT32_MAX;
267 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
268 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
269 AssertReturn(!pVM->iom.s.fMmioFrozen, VERR_WRONG_ORDER);
270
271 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
272
273 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%#RGp (max %#RGp)\n", cbRegion, MM_MMIO_64_MAX),
274 VERR_OUT_OF_RANGE);
275 AssertMsgReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), ("cbRegion=%#RGp\n", cbRegion), VERR_UNSUPPORTED_ALIGNMENT);
276
277 AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
278 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
279 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
280 ("%#x\n", fFlags),
281 VERR_INVALID_FLAGS);
282
283 AssertReturn(pfnWrite || pfnRead, VERR_INVALID_PARAMETER);
284 AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
285 AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
286 AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
287
288 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
289 AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
290 AssertReturn(strlen(pszDesc) < 128, VERR_INVALID_POINTER);
291
292 /*
293 * Ensure that we've got table space for it.
294 */
295#ifndef VBOX_WITH_STATISTICS
296 uint16_t const idxStats = UINT16_MAX;
297#else
298 uint32_t const idxStats = pVM->iom.s.cMmioStats;
299 uint32_t const cNewMmioStats = idxStats + 1;
300 AssertReturn(cNewMmioStats <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
301 if (cNewMmioStats > pVM->iom.s.cMmioStatsAllocation)
302 {
303 int rc = iomR3MmioGrowStatisticsTable(pVM, cNewMmioStats);
304 AssertRCReturn(rc, rc);
305 AssertReturn(idxStats == pVM->iom.s.cMmioStats, VERR_IOM_MMIO_IPE_1);
306 }
307#endif
308
309 uint32_t idx = pVM->iom.s.cMmioRegs;
310 if (idx >= pVM->iom.s.cMmioAlloc)
311 {
312 int rc = iomR3MmioGrowTable(pVM, pVM->iom.s.cMmioAlloc + 1);
313 AssertRCReturn(rc, rc);
314 AssertReturn(idx == pVM->iom.s.cMmioRegs, VERR_IOM_MMIO_IPE_1);
315 }
316
317 /*
318 * Enter it.
319 */
320 pVM->iom.s.paMmioRegs[idx].cbRegion = cbRegion;
321 pVM->iom.s.paMmioRegs[idx].GCPhysMapping = NIL_RTGCPHYS;
322 pVM->iom.s.paMmioRegs[idx].pvUser = pvUser;
323 pVM->iom.s.paMmioRegs[idx].pDevIns = pDevIns;
324 pVM->iom.s.paMmioRegs[idx].pfnWriteCallback = pfnWrite;
325 pVM->iom.s.paMmioRegs[idx].pfnReadCallback = pfnRead;
326 pVM->iom.s.paMmioRegs[idx].pfnFillCallback = pfnFill;
327 pVM->iom.s.paMmioRegs[idx].pszDesc = pszDesc;
328 pVM->iom.s.paMmioRegs[idx].pPciDev = pPciDev;
329 pVM->iom.s.paMmioRegs[idx].iPciRegion = iPciRegion;
330 pVM->iom.s.paMmioRegs[idx].idxStats = (uint16_t)idxStats;
331 pVM->iom.s.paMmioRegs[idx].fMapped = false;
332 pVM->iom.s.paMmioRegs[idx].fFlags = fFlags;
333 pVM->iom.s.paMmioRegs[idx].idxSelf = idx;
334
335 pVM->iom.s.cMmioRegs = idx + 1;
336#ifdef VBOX_WITH_STATISTICS
337 pVM->iom.s.cMmioStats = cNewMmioStats;
338#endif
339 *phRegion = idx;
340 return VINF_SUCCESS;
341}
342
343
344/**
345 * Worker for PDMDEVHLPR3::pfnMmioMap.
346 */
347VMMR3_INT_DECL(int) IOMR3MmioMap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
348{
349 /*
350 * Validate input and state.
351 */
352 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
353 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
354 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
355 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
356
357 RTGCPHYS const cbRegion = pRegEntry->cbRegion;
358 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%RGp\n", cbRegion), VERR_IOM_MMIO_IPE_1);
359 RTGCPHYS const GCPhysLast = GCPhys + cbRegion - 1;
360
361 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK),
362 ("Misaligned! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
363 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
364 VERR_IOM_INVALID_MMIO_RANGE);
365 AssertLogRelMsgReturn(GCPhysLast > GCPhys,
366 ("Wrapped! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
367 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
368 VERR_IOM_INVALID_MMIO_RANGE);
369
370 /*
371 * Do the mapping.
372 */
373 int rc = VINF_SUCCESS;
374 IOM_LOCK_EXCL(pVM);
375
376 if (!pRegEntry->fMapped)
377 {
378 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
379 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
380
381 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
382 PIOMMMIOLOOKUPENTRY pEntry;
383 if (cEntries > 0)
384 {
385 uint32_t iFirst = 0;
386 uint32_t iEnd = cEntries;
387 uint32_t i = cEntries / 2;
388 for (;;)
389 {
390 pEntry = &paEntries[i];
391 if (pEntry->GCPhysLast < GCPhys)
392 {
393 i += 1;
394 if (i < iEnd)
395 iFirst = i;
396 else
397 {
398 /* Register with PGM before we shuffle the array: */
399 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
400 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
401 hRegion, pRegEntry->pszDesc);
402 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
403
404 /* Insert after the entry we just considered: */
405 pEntry += 1;
406 if (i < cEntries)
407 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
408 break;
409 }
410 }
411 else if (pEntry->GCPhysFirst > GCPhysLast)
412 {
413 if (i > iFirst)
414 iEnd = i;
415 else
416 {
417 /* Register with PGM before we shuffle the array: */
418 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
419 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
420 hRegion, pRegEntry->pszDesc);
421 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
422
423 /* Insert at the entry we just considered: */
424 if (i < cEntries)
425 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
426 break;
427 }
428 }
429 else
430 {
431 /* Oops! We've got a conflict. */
432 AssertLogRelMsgFailed(("%RGp..%RGp (%s) conflicts with existing mapping %RGp..%RGp (%s)\n",
433 GCPhys, GCPhysLast, pRegEntry->pszDesc,
434 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
435 IOM_UNLOCK_EXCL(pVM);
436 return VERR_IOM_MMIO_RANGE_CONFLICT;
437 }
438
439 i = iFirst + (iEnd - iFirst) / 2;
440 }
441 }
442 else
443 {
444 /* First entry in the lookup table: */
445 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
446 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType, hRegion, pRegEntry->pszDesc);
447 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
448
449 pEntry = paEntries;
450 }
451
452 /*
453 * Fill in the entry and bump the table size.
454 */
455 pRegEntry->fMapped = true;
456 pEntry->idx = hRegion;
457 pEntry->GCPhysFirst = GCPhys;
458 pEntry->GCPhysLast = GCPhysLast;
459 pVM->iom.s.cMmioLookupEntries = cEntries + 1;
460
461#ifdef VBOX_WITH_STATISTICS
462 /* Don't register stats here when we're creating the VM as the
463 statistics table may still be reallocated. */
464 if (pVM->enmVMState >= VMSTATE_CREATED)
465 iomR3MmioRegStats(pVM, pRegEntry);
466#endif
467
468#ifdef VBOX_STRICT
469 /*
470 * Assert table sanity.
471 */
472 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
473 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
474
475 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
476 for (size_t i = 1; i <= cEntries; i++)
477 {
478 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
479 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
480 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
481 GCPhysPrev = paEntries[i].GCPhysLast;
482 }
483#endif
484 }
485 else
486 {
487 AssertFailed();
488 rc = VERR_IOM_MMIO_REGION_ALREADY_MAPPED;
489 }
490
491 IOM_UNLOCK_EXCL(pVM);
492 return rc;
493}
494
495
496/**
497 * Worker for PDMDEVHLPR3::pfnMmioUnmap.
498 */
499VMMR3_INT_DECL(int) IOMR3MmioUnmap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
500{
501 /*
502 * Validate input and state.
503 */
504 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
505 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
506 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
507 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
508
509 /*
510 * Do the mapping.
511 */
512 int rc;
513 IOM_LOCK_EXCL(pVM);
514
515 if (pRegEntry->fMapped)
516 {
517 RTGCPHYS const GCPhys = pRegEntry->GCPhysMapping;
518 RTGCPHYS const GCPhysLast = GCPhys + pRegEntry->cbRegion - 1;
519 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
520 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
521 Assert(cEntries > 0);
522
523 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
524 uint32_t iFirst = 0;
525 uint32_t iEnd = cEntries;
526 uint32_t i = cEntries / 2;
527 for (;;)
528 {
529 PIOMMMIOLOOKUPENTRY pEntry = &paEntries[i];
530 if (pEntry->GCPhysLast < GCPhys)
531 {
532 i += 1;
533 if (i < iEnd)
534 iFirst = i;
535 else
536 {
537 rc = VERR_IOM_MMIO_IPE_1;
538 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
539 }
540 }
541 else if (pEntry->GCPhysFirst > GCPhysLast)
542 {
543 if (i > iFirst)
544 iEnd = i;
545 else
546 {
547 rc = VERR_IOM_MMIO_IPE_1;
548 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
549 }
550 }
551 else if (pEntry->idx == hRegion)
552 {
553 Assert(pEntry->GCPhysFirst == GCPhys);
554 Assert(pEntry->GCPhysLast == GCPhysLast);
555#ifdef VBOX_WITH_STATISTICS
556 iomR3MmioDeregStats(pVM, pRegEntry, GCPhys);
557#endif
558 if (i + 1 < cEntries)
559 memmove(pEntry, pEntry + 1, sizeof(*pEntry) * (cEntries - i - 1));
560 pVM->iom.s.cMmioLookupEntries = cEntries - 1;
561
562 rc = PGMR3PhysMMIODeregister(pVM, GCPhys, pRegEntry->cbRegion);
563 AssertRC(rc);
564
565 pRegEntry->fMapped = false;
566 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS);
567 break;
568 }
569 else
570 {
571 AssertLogRelMsgFailed(("Lookig for %RGp..%RGp (%s), found %RGp..%RGp (%s) instead!\n",
572 GCPhys, GCPhysLast, pRegEntry->pszDesc,
573 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
574 rc = VERR_IOM_MMIO_IPE_1;
575 break;
576 }
577
578 i = iFirst + (iEnd - iFirst) / 2;
579 }
580
581#ifdef VBOX_STRICT
582 /*
583 * Assert table sanity.
584 */
585 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
586 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
587
588 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
589 for (i = 1; i < cEntries - 1; i++)
590 {
591 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
592 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
593 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
594 GCPhysPrev = paEntries[i].GCPhysLast;
595 }
596#endif
597 }
598 else
599 {
600 AssertFailed();
601 rc = VERR_IOM_MMIO_REGION_NOT_MAPPED;
602 }
603
604 IOM_UNLOCK_EXCL(pVM);
605 return rc;
606}
607
608
609VMMR3_INT_DECL(int) IOMR3MmioReduce(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS cbRegion)
610{
611 RT_NOREF(pVM, pDevIns, hRegion, cbRegion);
612 return VERR_NOT_IMPLEMENTED;
613}
614
615
616/**
617 * Validates @a hRegion, making sure it belongs to @a pDevIns.
618 *
619 * @returns VBox status code.
620 * @param pVM The cross context VM structure.
621 * @param pDevIns The device which allegedly owns @a hRegion.
622 * @param hRegion The handle to validate.
623 */
624VMMR3_INT_DECL(int) IOMR3MmioValidateHandle(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
625{
626 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
627 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE);
628 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
629 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
630 return VINF_SUCCESS;
631}
632
633
634/**
635 * Gets the mapping address of MMIO region @a hRegion.
636 *
637 * @returns Mapping address if mapped, NIL_RTGCPHYS if not mapped or invalid
638 * input.
639 * @param pVM The cross context VM structure.
640 * @param pDevIns The device which allegedly owns @a hRegion.
641 * @param hRegion The handle to validate.
642 */
643VMMR3_INT_DECL(RTGCPHYS) IOMR3MmioGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
644{
645 AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
646 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), NIL_RTGCPHYS);
647 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
648 AssertReturn(pRegEntry->pDevIns == pDevIns, NIL_RTGCPHYS);
649 return pRegEntry->GCPhysMapping;
650}
651
652
653/**
654 * Display all registered MMIO ranges.
655 *
656 * @param pVM The cross context VM structure.
657 * @param pHlp The info helpers.
658 * @param pszArgs Arguments, ignored.
659 */
660DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
661{
662 RT_NOREF(pszArgs);
663
664 /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */
665 pHlp->pfnPrintf(pHlp,
666 "MMIO registrations: %u (%u allocated)\n"
667 " ## Ctx %.*s %.*s PCI Description\n",
668 pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc,
669 sizeof(RTGCPHYS) * 2, "Size",
670 sizeof(RTGCPHYS) * 2 * 2 + 1, "Mapping");
671 PIOMMMIOENTRYR3 paRegs = pVM->iom.s.paMmioRegs;
672 for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
673 {
674 const char * const pszRing = paRegs[i].fRing0 ? paRegs[i].fRawMode ? "+0+C" : "+0 "
675 : paRegs[i].fRawMode ? "+C " : " ";
676 if (paRegs[i].fMapped && paRegs[i].pPciDev)
677 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
678 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1,
679 paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
680 else if (paRegs[i].fMapped && !paRegs[i].pPciDev)
681 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
682 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1, paRegs[i].pszDesc);
683 else if (paRegs[i].pPciDev)
684 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
685 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
686 else
687 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
688 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pszDesc);
689 }
690}
691
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette