VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp@ 93554

Last change on this file since 93554 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 28.0 KB
Line 
1/* $Id: IOMR3Mmio.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor, MMIO related APIs.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM_MMIO
23#include <VBox/vmm/iom.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/dbgf.h>
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/pdmdev.h>
30#include "IOMInternal.h"
31#include <VBox/vmm/vm.h>
32
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <iprt/mem.h>
36#include <iprt/string.h>
37#include <VBox/log.h>
38#include <VBox/err.h>
39
40#include "IOMInline.h"
41
42
43#ifdef VBOX_WITH_STATISTICS
44
45/**
46 * Register statistics for a MMIO entry.
47 */
48void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry)
49{
50 bool const fDoRZ = pRegEntry->fRing0 || pRegEntry->fRawMode;
51 PIOMMMIOSTATSENTRY pStats = &pVM->iom.s.paMmioStats[pRegEntry->idxStats];
52
53 /* Format the prefix: */
54 char szName[80];
55 size_t cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/MmioRegions/%RGp-%RGp",
56 pRegEntry->GCPhysMapping, pRegEntry->GCPhysMapping + pRegEntry->cbRegion - 1);
57
58 /* Mangle the description if this isn't the first device instance: */
59 const char *pszDesc = pRegEntry->pszDesc;
60 char *pszFreeDesc = NULL;
61 if (pRegEntry->pDevIns && pRegEntry->pDevIns->iInstance > 0 && pszDesc)
62 pszDesc = pszFreeDesc = RTStrAPrintf2("%u / %s", pRegEntry->pDevIns->iInstance, pszDesc);
63
64 /* Register statistics: */
65 int rc = STAMR3Register(pVM, &pRegEntry->idxSelf, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, pszDesc); AssertRC(rc);
66 RTStrFree(pszFreeDesc);
67
68# define SET_NM_SUFFIX(a_sz) memcpy(&szName[cchPrefix], a_sz, sizeof(a_sz))
69 SET_NM_SUFFIX("/Read-Complicated");
70 rc = STAMR3Register(pVM, &pStats->ComplicatedReads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
71 SET_NM_SUFFIX("/Read-FFor00");
72 rc = STAMR3Register(pVM, &pStats->FFor00Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
73 SET_NM_SUFFIX("/Read-R3");
74 rc = STAMR3Register(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
75 if (fDoRZ)
76 {
77 SET_NM_SUFFIX("/Read-RZ");
78 rc = STAMR3Register(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
79 SET_NM_SUFFIX("/Read-RZtoR3");
80 rc = STAMR3Register(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
81 }
82 SET_NM_SUFFIX("/Read-Total");
83 rc = STAMR3Register(pVM, &pStats->Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
84
85 SET_NM_SUFFIX("/Write-Complicated");
86 rc = STAMR3Register(pVM, &pStats->ComplicatedWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
87 SET_NM_SUFFIX("/Write-R3");
88 rc = STAMR3Register(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
89 if (fDoRZ)
90 {
91 SET_NM_SUFFIX("/Write-RZ");
92 rc = STAMR3Register(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
93 SET_NM_SUFFIX("/Write-RZtoR3");
94 rc = STAMR3Register(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
95 SET_NM_SUFFIX("/Write-RZtoR3-Commit");
96 rc = STAMR3Register(pVM, &pStats->CommitRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
97 }
98 SET_NM_SUFFIX("/Write-Total");
99 rc = STAMR3Register(pVM, &pStats->Writes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
100}
101
102
103/**
104 * Deregister statistics for a MMIO entry.
105 */
106static void iomR3MmioDeregStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry, RTGCPHYS GCPhys)
107{
108 char szPrefix[80];
109 RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/MmioRegions/%RGp-%RGp", GCPhys, GCPhys + pRegEntry->cbRegion - 1);
110 STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
111}
112
113
114/**
115 * Grows the statistics table.
116 *
117 * @returns VBox status code.
118 * @param pVM The cross context VM structure.
119 * @param cNewEntries The minimum number of new entrie.
120 * @see IOMR0IoPortGrowStatisticsTable
121 */
122static int iomR3MmioGrowStatisticsTable(PVM pVM, uint32_t cNewEntries)
123{
124 AssertReturn(cNewEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
125
126 int rc;
127 if (!SUPR3IsDriverless())
128 {
129 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_STATS, cNewEntries, NULL);
130 AssertLogRelRCReturn(rc, rc);
131 AssertReturn(cNewEntries <= pVM->iom.s.cMmioStatsAllocation, VERR_IOM_MMIO_IPE_2);
132 }
133 else
134 {
135 /*
136 * Validate input and state.
137 */
138 uint32_t const cOldEntries = pVM->iom.s.cMmioStatsAllocation;
139 AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
140 AssertReturn(pVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
141
142 /*
143 * Calc size and allocate a new table.
144 */
145 uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE);
146 cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
147
148 PIOMMMIOSTATSENTRY const paMmioStats = (PIOMMMIOSTATSENTRY)RTMemPageAllocZ(cbNew);
149 if (paMmioStats)
150 {
151 /*
152 * Anything to copy over, update and free the old one.
153 */
154 PIOMMMIOSTATSENTRY const pOldMmioStats = pVM->iom.s.paMmioStats;
155 if (pOldMmioStats)
156 memcpy(paMmioStats, pOldMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
157
158 pVM->iom.s.paMmioStats = paMmioStats;
159 pVM->iom.s.cMmioStatsAllocation = cNewEntries;
160
161 RTMemPageFree(pOldMmioStats, RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE));
162
163 rc = VINF_SUCCESS;
164 }
165 else
166 rc = VERR_NO_PAGE_MEMORY;
167 }
168
169 return rc;
170}
171
172#endif /* VBOX_WITH_STATISTICS */
173
174/**
175 * Grows the I/O port registration statistics table.
176 *
177 * @returns VBox status code.
178 * @param pVM The cross context VM structure.
179 * @param cNewEntries The minimum number of new entrie.
180 * @see IOMR0MmioGrowRegistrationTables
181 */
182static int iomR3MmioGrowTable(PVM pVM, uint32_t cNewEntries)
183{
184 AssertReturn(cNewEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
185
186 int rc;
187 if (!SUPR3IsDriverless())
188 {
189 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_REGS, cNewEntries, NULL);
190 AssertLogRelRCReturn(rc, rc);
191 AssertReturn(cNewEntries <= pVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_2);
192 }
193 else
194 {
195 /*
196 * Validate input and state.
197 */
198 uint32_t const cOldEntries = pVM->iom.s.cMmioAlloc;
199 AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_1);
200
201 /*
202 * Allocate the new tables. We use a single allocation for the three tables (ring-0,
203 * ring-3, lookup) and does a partial mapping of the result to ring-3.
204 */
205 uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE);
206 uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE);
207 uint32_t const cbNew = cbRing3 + cbShared;
208
209 /* Use the rounded up space as best we can. */
210 cNewEntries = RT_MIN(cbRing3 / sizeof(IOMMMIOENTRYR3), cbShared / sizeof(IOMMMIOLOOKUPENTRY));
211
212 PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)RTMemPageAllocZ(cbNew);
213 if (paRing3)
214 {
215 PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
216
217 /*
218 * Copy over the old info and initialize the idxSelf and idxStats members.
219 */
220 if (pVM->iom.s.paMmioRegs != NULL)
221 {
222 memcpy(paRing3, pVM->iom.s.paMmioRegs, sizeof(paRing3[0]) * cOldEntries);
223 memcpy(paLookup, pVM->iom.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
224 }
225
226 size_t i = cbRing3 / sizeof(*paRing3);
227 while (i-- > cOldEntries)
228 {
229 paRing3[i].idxSelf = (uint16_t)i;
230 paRing3[i].idxStats = UINT16_MAX;
231 }
232
233 /*
234 * Update the variables and free the old memory.
235 */
236 void * const pvFree = pVM->iom.s.paMmioRegs;
237
238 pVM->iom.s.paMmioRegs = paRing3;
239 pVM->iom.s.paMmioLookup = paLookup;
240 pVM->iom.s.cMmioAlloc = cNewEntries;
241
242 RTMemPageFree(pvFree,
243 RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE)
244 + RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE));
245
246 rc = VINF_SUCCESS;
247 }
248 else
249 rc = VERR_NO_PAGE_MEMORY;
250 }
251 return rc;
252}
253
254
255/**
256 * Worker for PDMDEVHLPR3::pfnMmioCreateEx.
257 */
258VMMR3_INT_DECL(int) IOMR3MmioCreate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS cbRegion, uint32_t fFlags, PPDMPCIDEV pPciDev,
259 uint32_t iPciRegion, PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead,
260 PFNIOMMMIONEWFILL pfnFill, void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
261{
262 /*
263 * Validate input.
264 */
265 AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
266 *phRegion = UINT32_MAX;
267 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
268 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
269 AssertReturn(!pVM->iom.s.fMmioFrozen, VERR_WRONG_ORDER);
270
271 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
272
273 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%#RGp (max %#RGp)\n", cbRegion, MM_MMIO_64_MAX),
274 VERR_OUT_OF_RANGE);
275 AssertMsgReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), ("cbRegion=%#RGp\n", cbRegion), VERR_UNSUPPORTED_ALIGNMENT);
276
277 AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
278 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
279 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
280 ("%#x\n", fFlags),
281 VERR_INVALID_FLAGS);
282
283 AssertReturn(pfnWrite || pfnRead, VERR_INVALID_PARAMETER);
284 AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
285 AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
286 AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
287
288 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
289 AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
290 AssertReturn(strlen(pszDesc) < 128, VERR_INVALID_POINTER);
291
292 /*
293 * Ensure that we've got table space for it.
294 */
295#ifndef VBOX_WITH_STATISTICS
296 uint16_t const idxStats = UINT16_MAX;
297#else
298 uint32_t const idxStats = pVM->iom.s.cMmioStats;
299 uint32_t const cNewMmioStats = idxStats + 1;
300 AssertReturn(cNewMmioStats <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
301 if (cNewMmioStats > pVM->iom.s.cMmioStatsAllocation)
302 {
303 int rc = iomR3MmioGrowStatisticsTable(pVM, cNewMmioStats);
304 AssertRCReturn(rc, rc);
305 AssertReturn(idxStats == pVM->iom.s.cMmioStats, VERR_IOM_MMIO_IPE_1);
306 }
307#endif
308
309 uint32_t idx = pVM->iom.s.cMmioRegs;
310 if (idx >= pVM->iom.s.cMmioAlloc)
311 {
312 int rc = iomR3MmioGrowTable(pVM, pVM->iom.s.cMmioAlloc + 1);
313 AssertRCReturn(rc, rc);
314 AssertReturn(idx == pVM->iom.s.cMmioRegs, VERR_IOM_MMIO_IPE_1);
315 }
316
317 /*
318 * Enter it.
319 */
320 pVM->iom.s.paMmioRegs[idx].cbRegion = cbRegion;
321 pVM->iom.s.paMmioRegs[idx].GCPhysMapping = NIL_RTGCPHYS;
322 pVM->iom.s.paMmioRegs[idx].pvUser = pvUser;
323 pVM->iom.s.paMmioRegs[idx].pDevIns = pDevIns;
324 pVM->iom.s.paMmioRegs[idx].pfnWriteCallback = pfnWrite;
325 pVM->iom.s.paMmioRegs[idx].pfnReadCallback = pfnRead;
326 pVM->iom.s.paMmioRegs[idx].pfnFillCallback = pfnFill;
327 pVM->iom.s.paMmioRegs[idx].pszDesc = pszDesc;
328 pVM->iom.s.paMmioRegs[idx].pPciDev = pPciDev;
329 pVM->iom.s.paMmioRegs[idx].iPciRegion = iPciRegion;
330 pVM->iom.s.paMmioRegs[idx].idxStats = (uint16_t)idxStats;
331 pVM->iom.s.paMmioRegs[idx].fMapped = false;
332 pVM->iom.s.paMmioRegs[idx].fFlags = fFlags;
333 pVM->iom.s.paMmioRegs[idx].idxSelf = idx;
334
335 pVM->iom.s.cMmioRegs = idx + 1;
336#ifdef VBOX_WITH_STATISTICS
337 pVM->iom.s.cMmioStats = cNewMmioStats;
338#endif
339 *phRegion = idx;
340 return VINF_SUCCESS;
341}
342
343
344/**
345 * Worker for PDMDEVHLPR3::pfnMmioMap.
346 */
347VMMR3_INT_DECL(int) IOMR3MmioMap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
348{
349 /*
350 * Validate input and state.
351 */
352 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
353 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
354 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
355 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
356
357 RTGCPHYS const cbRegion = pRegEntry->cbRegion;
358 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%RGp\n", cbRegion), VERR_IOM_MMIO_IPE_1);
359 RTGCPHYS const GCPhysLast = GCPhys + cbRegion - 1;
360
361 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK),
362 ("Misaligned! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
363 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
364 VERR_IOM_INVALID_MMIO_RANGE);
365 AssertLogRelMsgReturn(GCPhysLast > GCPhys,
366 ("Wrapped! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
367 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
368 VERR_IOM_INVALID_MMIO_RANGE);
369
370 /*
371 * Do the mapping.
372 */
373 int rc = VINF_SUCCESS;
374 IOM_LOCK_EXCL(pVM);
375
376 if (!pRegEntry->fMapped)
377 {
378 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
379 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
380
381 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
382 PIOMMMIOLOOKUPENTRY pEntry;
383 if (cEntries > 0)
384 {
385 uint32_t iFirst = 0;
386 uint32_t iEnd = cEntries;
387 uint32_t i = cEntries / 2;
388 for (;;)
389 {
390 pEntry = &paEntries[i];
391 if (pEntry->GCPhysLast < GCPhys)
392 {
393 i += 1;
394 if (i < iEnd)
395 iFirst = i;
396 else
397 {
398 /* Register with PGM before we shuffle the array: */
399 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
400 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
401 (void *)(uintptr_t)hRegion, hRegion, hRegion, pRegEntry->pszDesc);
402 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
403
404 /* Insert after the entry we just considered: */
405 pEntry += 1;
406 if (i < cEntries)
407 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
408 break;
409 }
410 }
411 else if (pEntry->GCPhysFirst > GCPhysLast)
412 {
413 if (i > iFirst)
414 iEnd = i;
415 else
416 {
417 /* Register with PGM before we shuffle the array: */
418 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
419 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
420 (void *)(uintptr_t)hRegion, hRegion, hRegion, pRegEntry->pszDesc);
421 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
422
423 /* Insert at the entry we just considered: */
424 if (i < cEntries)
425 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
426 break;
427 }
428 }
429 else
430 {
431 /* Oops! We've got a conflict. */
432 AssertLogRelMsgFailed(("%RGp..%RGp (%s) conflicts with existing mapping %RGp..%RGp (%s)\n",
433 GCPhys, GCPhysLast, pRegEntry->pszDesc,
434 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
435 IOM_UNLOCK_EXCL(pVM);
436 return VERR_IOM_MMIO_RANGE_CONFLICT;
437 }
438
439 i = iFirst + (iEnd - iFirst) / 2;
440 }
441 }
442 else
443 {
444 /* First entry in the lookup table: */
445 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
446 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
447 (void *)(uintptr_t)hRegion, hRegion, hRegion, pRegEntry->pszDesc);
448 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
449
450 pEntry = paEntries;
451 }
452
453 /*
454 * Fill in the entry and bump the table size.
455 */
456 pRegEntry->fMapped = true;
457 pEntry->idx = hRegion;
458 pEntry->GCPhysFirst = GCPhys;
459 pEntry->GCPhysLast = GCPhysLast;
460 pVM->iom.s.cMmioLookupEntries = cEntries + 1;
461
462#ifdef VBOX_WITH_STATISTICS
463 /* Don't register stats here when we're creating the VM as the
464 statistics table may still be reallocated. */
465 if (pVM->enmVMState >= VMSTATE_CREATED)
466 iomR3MmioRegStats(pVM, pRegEntry);
467#endif
468
469#ifdef VBOX_STRICT
470 /*
471 * Assert table sanity.
472 */
473 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
474 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
475
476 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
477 for (size_t i = 1; i <= cEntries; i++)
478 {
479 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
480 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
481 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
482 GCPhysPrev = paEntries[i].GCPhysLast;
483 }
484#endif
485 }
486 else
487 {
488 AssertFailed();
489 rc = VERR_IOM_MMIO_REGION_ALREADY_MAPPED;
490 }
491
492 IOM_UNLOCK_EXCL(pVM);
493 return rc;
494}
495
496
497/**
498 * Worker for PDMDEVHLPR3::pfnMmioUnmap.
499 */
500VMMR3_INT_DECL(int) IOMR3MmioUnmap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
501{
502 /*
503 * Validate input and state.
504 */
505 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
506 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
507 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
508 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
509
510 /*
511 * Do the mapping.
512 */
513 int rc;
514 IOM_LOCK_EXCL(pVM);
515
516 if (pRegEntry->fMapped)
517 {
518 RTGCPHYS const GCPhys = pRegEntry->GCPhysMapping;
519 RTGCPHYS const GCPhysLast = GCPhys + pRegEntry->cbRegion - 1;
520 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
521 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
522 Assert(cEntries > 0);
523
524 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
525 uint32_t iFirst = 0;
526 uint32_t iEnd = cEntries;
527 uint32_t i = cEntries / 2;
528 for (;;)
529 {
530 PIOMMMIOLOOKUPENTRY pEntry = &paEntries[i];
531 if (pEntry->GCPhysLast < GCPhys)
532 {
533 i += 1;
534 if (i < iEnd)
535 iFirst = i;
536 else
537 {
538 rc = VERR_IOM_MMIO_IPE_1;
539 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
540 }
541 }
542 else if (pEntry->GCPhysFirst > GCPhysLast)
543 {
544 if (i > iFirst)
545 iEnd = i;
546 else
547 {
548 rc = VERR_IOM_MMIO_IPE_1;
549 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
550 }
551 }
552 else if (pEntry->idx == hRegion)
553 {
554 Assert(pEntry->GCPhysFirst == GCPhys);
555 Assert(pEntry->GCPhysLast == GCPhysLast);
556#ifdef VBOX_WITH_STATISTICS
557 iomR3MmioDeregStats(pVM, pRegEntry, GCPhys);
558#endif
559 if (i + 1 < cEntries)
560 memmove(pEntry, pEntry + 1, sizeof(*pEntry) * (cEntries - i - 1));
561 pVM->iom.s.cMmioLookupEntries = cEntries - 1;
562
563 rc = PGMR3PhysMMIODeregister(pVM, GCPhys, pRegEntry->cbRegion);
564 AssertRC(rc);
565
566 pRegEntry->fMapped = false;
567 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS);
568 break;
569 }
570 else
571 {
572 AssertLogRelMsgFailed(("Lookig for %RGp..%RGp (%s), found %RGp..%RGp (%s) instead!\n",
573 GCPhys, GCPhysLast, pRegEntry->pszDesc,
574 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
575 rc = VERR_IOM_MMIO_IPE_1;
576 break;
577 }
578
579 i = iFirst + (iEnd - iFirst) / 2;
580 }
581
582#ifdef VBOX_STRICT
583 /*
584 * Assert table sanity.
585 */
586 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
587 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
588
589 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
590 for (i = 1; i < cEntries - 1; i++)
591 {
592 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
593 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
594 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
595 GCPhysPrev = paEntries[i].GCPhysLast;
596 }
597#endif
598 }
599 else
600 {
601 AssertFailed();
602 rc = VERR_IOM_MMIO_REGION_NOT_MAPPED;
603 }
604
605 IOM_UNLOCK_EXCL(pVM);
606 return rc;
607}
608
609
610VMMR3_INT_DECL(int) IOMR3MmioReduce(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS cbRegion)
611{
612 RT_NOREF(pVM, pDevIns, hRegion, cbRegion);
613 return VERR_NOT_IMPLEMENTED;
614}
615
616
617/**
618 * Validates @a hRegion, making sure it belongs to @a pDevIns.
619 *
620 * @returns VBox status code.
621 * @param pVM The cross context VM structure.
622 * @param pDevIns The device which allegedly owns @a hRegion.
623 * @param hRegion The handle to validate.
624 */
625VMMR3_INT_DECL(int) IOMR3MmioValidateHandle(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
626{
627 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
628 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE);
629 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
630 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
631 return VINF_SUCCESS;
632}
633
634
635/**
636 * Gets the mapping address of MMIO region @a hRegion.
637 *
638 * @returns Mapping address if mapped, NIL_RTGCPHYS if not mapped or invalid
639 * input.
640 * @param pVM The cross context VM structure.
641 * @param pDevIns The device which allegedly owns @a hRegion.
642 * @param hRegion The handle to validate.
643 */
644VMMR3_INT_DECL(RTGCPHYS) IOMR3MmioGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
645{
646 AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
647 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), NIL_RTGCPHYS);
648 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
649 AssertReturn(pRegEntry->pDevIns == pDevIns, NIL_RTGCPHYS);
650 return pRegEntry->GCPhysMapping;
651}
652
653
654/**
655 * Display all registered MMIO ranges.
656 *
657 * @param pVM The cross context VM structure.
658 * @param pHlp The info helpers.
659 * @param pszArgs Arguments, ignored.
660 */
661DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
662{
663 RT_NOREF(pszArgs);
664
665 /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */
666 pHlp->pfnPrintf(pHlp,
667 "MMIO registrations: %u (%u allocated)\n"
668 " ## Ctx %.*s %.*s PCI Description\n",
669 pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc,
670 sizeof(RTGCPHYS) * 2, "Size",
671 sizeof(RTGCPHYS) * 2 * 2 + 1, "Mapping");
672 PIOMMMIOENTRYR3 paRegs = pVM->iom.s.paMmioRegs;
673 for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
674 {
675 const char * const pszRing = paRegs[i].fRing0 ? paRegs[i].fRawMode ? "+0+C" : "+0 "
676 : paRegs[i].fRawMode ? "+C " : " ";
677 if (paRegs[i].fMapped && paRegs[i].pPciDev)
678 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
679 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1,
680 paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
681 else if (paRegs[i].fMapped && !paRegs[i].pPciDev)
682 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
683 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1, paRegs[i].pszDesc);
684 else if (paRegs[i].pPciDev)
685 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
686 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
687 else
688 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
689 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pszDesc);
690 }
691}
692
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette