VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/IOMR0Mmio.cpp@ 93554

Last change on this file since 93554 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.3 KB
Line 
1/* $Id: IOMR0Mmio.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * IOM - Host Context Ring 0, MMIO.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM_MMIO
23#include <VBox/vmm/iom.h>
24#include "IOMInternal.h"
25#include <VBox/vmm/pdmdev.h>
26#include <VBox/vmm/vmcc.h>
27#include <VBox/err.h>
28#include <VBox/log.h>
29#include <iprt/assert.h>
30#include <iprt/mem.h>
31#include <iprt/memobj.h>
32#include <iprt/process.h>
33#include <iprt/string.h>
34
35
36
37/**
38 * Initializes the MMIO related members.
39 *
40 * @param pGVM Pointer to the global VM structure.
41 */
42void iomR0MmioInitPerVMData(PGVM pGVM)
43{
44 pGVM->iomr0.s.hMmioMapObj = NIL_RTR0MEMOBJ;
45 pGVM->iomr0.s.hMmioMemObj = NIL_RTR0MEMOBJ;
46#ifdef VBOX_WITH_STATISTICS
47 pGVM->iomr0.s.hMmioStatsMapObj = NIL_RTR0MEMOBJ;
48 pGVM->iomr0.s.hMmioStatsMemObj = NIL_RTR0MEMOBJ;
49#endif
50}
51
52
53/**
54 * Cleans up MMIO related resources.
55 */
56void iomR0MmioCleanupVM(PGVM pGVM)
57{
58 RTR0MemObjFree(pGVM->iomr0.s.hMmioMapObj, true /*fFreeMappings*/);
59 pGVM->iomr0.s.hMmioMapObj = NIL_RTR0MEMOBJ;
60 RTR0MemObjFree(pGVM->iomr0.s.hMmioMemObj, true /*fFreeMappings*/);
61 pGVM->iomr0.s.hMmioMemObj = NIL_RTR0MEMOBJ;
62#ifdef VBOX_WITH_STATISTICS
63 RTR0MemObjFree(pGVM->iomr0.s.hMmioStatsMapObj, true /*fFreeMappings*/);
64 pGVM->iomr0.s.hMmioStatsMapObj = NIL_RTR0MEMOBJ;
65 RTR0MemObjFree(pGVM->iomr0.s.hMmioStatsMemObj, true /*fFreeMappings*/);
66 pGVM->iomr0.s.hMmioStatsMemObj = NIL_RTR0MEMOBJ;
67#endif
68}
69
70
71/**
72 * Implements PDMDEVHLPR0::pfnMmioSetUpContext.
73 *
74 * @param pGVM The global (ring-0) VM structure.
75 * @param pDevIns The device instance.
76 * @param hRegion The MMIO region handle (already registered in
77 * ring-3).
78 * @param pfnWrite The write handler callback, optional.
79 * @param pfnRead The read handler callback, optional.
80 * @param pfnFill The fill handler callback, optional.
81 * @param pvUser User argument for the callbacks.
82 * @thread EMT(0)
83 * @note Only callable at VM creation time.
84 */
85VMMR0_INT_DECL(int) IOMR0MmioSetUpContext(PGVM pGVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, PFNIOMMMIONEWWRITE pfnWrite,
86 PFNIOMMMIONEWREAD pfnRead, PFNIOMMMIONEWFILL pfnFill, void *pvUser)
87{
88 /*
89 * Validate input and state.
90 */
91 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
92 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
93 AssertReturn(hRegion < pGVM->iomr0.s.cMmioAlloc, VERR_IOM_INVALID_MMIO_HANDLE);
94 AssertReturn(hRegion < pGVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
95 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
96 AssertReturn(pDevIns->pDevInsForR3 != NIL_RTR3PTR && !(pDevIns->pDevInsForR3 & HOST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
97 AssertReturn(pGVM->iomr0.s.paMmioRing3Regs[hRegion].pDevIns == pDevIns->pDevInsForR3, VERR_IOM_INVALID_MMIO_HANDLE);
98 AssertReturn(pGVM->iomr0.s.paMmioRegs[hRegion].pDevIns == NULL, VERR_WRONG_ORDER);
99 Assert(pGVM->iomr0.s.paMmioRegs[hRegion].idxSelf == hRegion);
100
101 AssertReturn(pfnWrite || pfnRead || pfnFill, VERR_INVALID_PARAMETER);
102 AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
103 AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
104 AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
105
106 uint32_t const fFlags = pGVM->iomr0.s.paMmioRing3Regs[hRegion].fFlags;
107 RTGCPHYS const cbRegion = pGVM->iomr0.s.paMmioRing3Regs[hRegion].cbRegion;
108 AssertMsgReturn(cbRegion > 0 && cbRegion <= _1T, ("cbRegion=%#RGp\n", cbRegion), VERR_IOM_INVALID_MMIO_HANDLE);
109
110 /*
111 * Do the job.
112 */
113 pGVM->iomr0.s.paMmioRegs[hRegion].cbRegion = cbRegion;
114 pGVM->iomr0.s.paMmioRegs[hRegion].pvUser = pvUser;
115 pGVM->iomr0.s.paMmioRegs[hRegion].pDevIns = pDevIns;
116 pGVM->iomr0.s.paMmioRegs[hRegion].pfnWriteCallback = pfnWrite;
117 pGVM->iomr0.s.paMmioRegs[hRegion].pfnReadCallback = pfnRead;
118 pGVM->iomr0.s.paMmioRegs[hRegion].pfnFillCallback = pfnFill;
119 pGVM->iomr0.s.paMmioRegs[hRegion].fFlags = fFlags;
120#ifdef VBOX_WITH_STATISTICS
121 uint16_t const idxStats = pGVM->iomr0.s.paMmioRing3Regs[hRegion].idxStats;
122 pGVM->iomr0.s.paMmioRegs[hRegion].idxStats = (uint32_t)idxStats < pGVM->iomr0.s.cMmioStatsAllocation
123 ? idxStats : UINT16_MAX;
124#else
125 pGVM->iomr0.s.paMmioRegs[hRegion].idxStats = UINT16_MAX;
126#endif
127
128 pGVM->iomr0.s.paMmioRing3Regs[hRegion].fRing0 = true;
129
130 return VINF_SUCCESS;
131}
132
133
134/**
135 * Grows the MMIO registration (all contexts) and lookup tables.
136 *
137 * @returns VBox status code.
138 * @param pGVM The global (ring-0) VM structure.
139 * @param cReqMinEntries The minimum growth (absolute).
140 * @thread EMT(0)
141 * @note Only callable at VM creation time.
142 */
143VMMR0_INT_DECL(int) IOMR0MmioGrowRegistrationTables(PGVM pGVM, uint64_t cReqMinEntries)
144{
145 /*
146 * Validate input and state.
147 */
148 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
149 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
150 AssertReturn(cReqMinEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
151 uint32_t cNewEntries = (uint32_t)cReqMinEntries;
152 AssertReturn(cNewEntries >= pGVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_1);
153 uint32_t const cOldEntries = pGVM->iomr0.s.cMmioAlloc;
154 ASMCompilerBarrier();
155 AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_2);
156 AssertReturn(pGVM->iom.s.cMmioRegs >= pGVM->iomr0.s.cMmioMax, VERR_IOM_MMIO_IPE_3);
157
158 /*
159 * Allocate the new tables. We use a single allocation for the three tables (ring-0,
160 * ring-3, lookup) and does a partial mapping of the result to ring-3.
161 */
162 uint32_t const cbRing0 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR0), HOST_PAGE_SIZE);
163 uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE);
164 uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE);
165 uint32_t const cbNew = cbRing0 + cbRing3 + cbShared;
166
167 /* Use the rounded up space as best we can. */
168 cNewEntries = RT_MIN(RT_MIN(cbRing0 / sizeof(IOMMMIOENTRYR0), cbRing3 / sizeof(IOMMMIOENTRYR3)),
169 cbShared / sizeof(IOMMMIOLOOKUPENTRY));
170
171 RTR0MEMOBJ hMemObj;
172 int rc = RTR0MemObjAllocPage(&hMemObj, cbNew, false /*fExecutable*/);
173 if (RT_SUCCESS(rc))
174 {
175 /*
176 * Zero and map it.
177 */
178 RT_BZERO(RTR0MemObjAddress(hMemObj), cbNew);
179
180 RTR0MEMOBJ hMapObj;
181 rc = RTR0MemObjMapUserEx(&hMapObj, hMemObj, (RTR3PTR)-1, HOST_PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
182 RTR0ProcHandleSelf(), cbRing0, cbNew - cbRing0);
183 if (RT_SUCCESS(rc))
184 {
185 PIOMMMIOENTRYR0 const paRing0 = (PIOMMMIOENTRYR0)RTR0MemObjAddress(hMemObj);
186 PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)((uintptr_t)paRing0 + cbRing0);
187 PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
188 RTR3UINTPTR const uAddrRing3 = RTR0MemObjAddressR3(hMapObj);
189
190 /*
191 * Copy over the old info and initialize the idxSelf and idxStats members.
192 */
193 if (pGVM->iomr0.s.paMmioRegs != NULL)
194 {
195 memcpy(paRing0, pGVM->iomr0.s.paMmioRegs, sizeof(paRing0[0]) * cOldEntries);
196 memcpy(paRing3, pGVM->iomr0.s.paMmioRing3Regs, sizeof(paRing3[0]) * cOldEntries);
197 memcpy(paLookup, pGVM->iomr0.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
198 }
199
200 size_t i = cbRing0 / sizeof(*paRing0);
201 while (i-- > cOldEntries)
202 {
203 paRing0[i].idxSelf = (uint16_t)i;
204 paRing0[i].idxStats = UINT16_MAX;
205 }
206 i = cbRing3 / sizeof(*paRing3);
207 while (i-- > cOldEntries)
208 {
209 paRing3[i].idxSelf = (uint16_t)i;
210 paRing3[i].idxStats = UINT16_MAX;
211 }
212
213 /*
214 * Switch the memory handles.
215 */
216 RTR0MEMOBJ hTmp = pGVM->iomr0.s.hMmioMapObj;
217 pGVM->iomr0.s.hMmioMapObj = hMapObj;
218 hMapObj = hTmp;
219
220 hTmp = pGVM->iomr0.s.hMmioMemObj;
221 pGVM->iomr0.s.hMmioMemObj = hMemObj;
222 hMemObj = hTmp;
223
224 /*
225 * Update the variables.
226 */
227 pGVM->iomr0.s.paMmioRegs = paRing0;
228 pGVM->iomr0.s.paMmioRing3Regs = paRing3;
229 pGVM->iomr0.s.paMmioLookup = paLookup;
230 pGVM->iom.s.paMmioRegs = uAddrRing3;
231 pGVM->iom.s.paMmioLookup = uAddrRing3 + cbRing3;
232 pGVM->iom.s.cMmioAlloc = cNewEntries;
233 pGVM->iomr0.s.cMmioAlloc = cNewEntries;
234
235 /*
236 * Free the old allocation.
237 */
238 RTR0MemObjFree(hMapObj, true /*fFreeMappings*/);
239 }
240 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
241 }
242
243 return rc;
244}
245
246
247/**
248 * Grows the MMIO statistics table.
249 *
250 * @returns VBox status code.
251 * @param pGVM The global (ring-0) VM structure.
252 * @param cReqMinEntries The minimum growth (absolute).
253 * @thread EMT(0)
254 * @note Only callable at VM creation time.
255 */
256VMMR0_INT_DECL(int) IOMR0MmioGrowStatisticsTable(PGVM pGVM, uint64_t cReqMinEntries)
257{
258 /*
259 * Validate input and state.
260 */
261 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
262 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
263 AssertReturn(cReqMinEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
264 uint32_t cNewEntries = (uint32_t)cReqMinEntries;
265#ifdef VBOX_WITH_STATISTICS
266 uint32_t const cOldEntries = pGVM->iomr0.s.cMmioStatsAllocation;
267 ASMCompilerBarrier();
268#else
269 uint32_t const cOldEntries = 0;
270#endif
271 AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
272 AssertReturn(pGVM->iom.s.cMmioStatsAllocation == cOldEntries, VERR_IOM_MMIO_IPE_1);
273 AssertReturn(pGVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
274#ifdef VBOX_WITH_STATISTICS
275 AssertReturn(!pGVM->iomr0.s.fMmioStatsFrozen, VERR_WRONG_ORDER);
276#endif
277
278 /*
279 * Allocate a new table, zero it and map it.
280 */
281#ifndef VBOX_WITH_STATISTICS
282 AssertFailedReturn(VERR_NOT_SUPPORTED);
283#else
284 uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE);
285 cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
286
287 RTR0MEMOBJ hMemObj;
288 int rc = RTR0MemObjAllocPage(&hMemObj, cbNew, false /*fExecutable*/);
289 if (RT_SUCCESS(rc))
290 {
291 RT_BZERO(RTR0MemObjAddress(hMemObj), cbNew);
292
293 RTR0MEMOBJ hMapObj;
294 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, HOST_PAGE_SIZE,
295 RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
296 if (RT_SUCCESS(rc))
297 {
298 PIOMMMIOSTATSENTRY pMmioStats = (PIOMMMIOSTATSENTRY)RTR0MemObjAddress(hMemObj);
299
300 /*
301 * Anything to copy over and free up?
302 */
303 if (pGVM->iomr0.s.paMmioStats)
304 memcpy(pMmioStats, pGVM->iomr0.s.paMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
305
306 /*
307 * Switch the memory handles.
308 */
309 RTR0MEMOBJ hTmp = pGVM->iomr0.s.hMmioStatsMapObj;
310 pGVM->iomr0.s.hMmioStatsMapObj = hMapObj;
311 hMapObj = hTmp;
312
313 hTmp = pGVM->iomr0.s.hMmioStatsMemObj;
314 pGVM->iomr0.s.hMmioStatsMemObj = hMemObj;
315 hMemObj = hTmp;
316
317 /*
318 * Update the variables.
319 */
320 pGVM->iomr0.s.paMmioStats = pMmioStats;
321 pGVM->iom.s.paMmioStats = RTR0MemObjAddressR3(pGVM->iomr0.s.hMmioStatsMapObj);
322 pGVM->iom.s.cMmioStatsAllocation = cNewEntries;
323 pGVM->iomr0.s.cMmioStatsAllocation = cNewEntries;
324
325 /*
326 * Free the old allocation.
327 */
328 RTR0MemObjFree(hMapObj, true /*fFreeMappings*/);
329 }
330 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
331 }
332 return rc;
333#endif /* VBOX_WITH_STATISTICS */
334}
335
336
337/**
338 * Called after all devices has been instantiated to copy over the statistics
339 * indices to the ring-0 MMIO registration table.
340 *
341 * This simplifies keeping statistics for MMIO ranges that are ring-3 only.
342 *
343 * @returns VBox status code.
344 * @param pGVM The global (ring-0) VM structure.
345 * @thread EMT(0)
346 * @note Only callable at VM creation time.
347 */
348VMMR0_INT_DECL(int) IOMR0MmioSyncStatisticsIndices(PGVM pGVM)
349{
350 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
351 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
352
353#ifdef VBOX_WITH_STATISTICS
354 /*
355 * First, freeze the statistics array:
356 */
357 pGVM->iomr0.s.fMmioStatsFrozen = true;
358
359 /*
360 * Second, synchronize the indices:
361 */
362 uint32_t const cRegs = RT_MIN(pGVM->iom.s.cMmioRegs, pGVM->iomr0.s.cMmioAlloc);
363 uint32_t const cStatsAlloc = pGVM->iomr0.s.cMmioStatsAllocation;
364 PIOMMMIOENTRYR0 paMmioRegs = pGVM->iomr0.s.paMmioRegs;
365 IOMMMIOENTRYR3 const *paMmioRegsR3 = pGVM->iomr0.s.paMmioRing3Regs;
366 AssertReturn((paMmioRegs && paMmioRegsR3) || cRegs == 0, VERR_IOM_MMIO_IPE_3);
367
368 for (uint32_t i = 0 ; i < cRegs; i++)
369 {
370 uint16_t idxStats = paMmioRegsR3[i].idxStats;
371 paMmioRegs[i].idxStats = idxStats < cStatsAlloc ? idxStats : UINT16_MAX;
372 }
373
374#else
375 RT_NOREF(pGVM);
376#endif
377 return VINF_SUCCESS;
378}
379
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette