VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/IOMR0Mmio.cpp@ 81947

Last change on this file since 81947 was 81383, checked in by vboxsync, 5 years ago

IOM: Split up the logging into two more groups, one for I/O ports and one for MMIO. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.2 KB
Line 
1/* $Id: IOMR0Mmio.cpp 81383 2019-10-19 23:58:44Z vboxsync $ */
2/** @file
3 * IOM - Host Context Ring 0, MMIO.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM_MMIO
23#include <VBox/vmm/iom.h>
24#include "IOMInternal.h"
25#include <VBox/vmm/pdmdev.h>
26#include <VBox/vmm/vmcc.h>
27#include <VBox/err.h>
28#include <VBox/log.h>
29#include <iprt/assert.h>
30#include <iprt/mem.h>
31#include <iprt/memobj.h>
32#include <iprt/process.h>
33#include <iprt/string.h>
34
35
36
37/**
38 * Initializes the MMIO related members.
39 *
40 * @param pGVM Pointer to the global VM structure.
41 */
42void iomR0MmioInitPerVMData(PGVM pGVM)
43{
44 pGVM->iomr0.s.hMmioMapObj = NIL_RTR0MEMOBJ;
45 pGVM->iomr0.s.hMmioMemObj = NIL_RTR0MEMOBJ;
46#ifdef VBOX_WITH_STATISTICS
47 pGVM->iomr0.s.hMmioStatsMapObj = NIL_RTR0MEMOBJ;
48 pGVM->iomr0.s.hMmioStatsMemObj = NIL_RTR0MEMOBJ;
49#endif
50}
51
52
53/**
54 * Cleans up MMIO related resources.
55 */
56void iomR0MmioCleanupVM(PGVM pGVM)
57{
58 RTR0MemObjFree(pGVM->iomr0.s.hMmioMapObj, true /*fFreeMappings*/);
59 pGVM->iomr0.s.hMmioMapObj = NIL_RTR0MEMOBJ;
60 RTR0MemObjFree(pGVM->iomr0.s.hMmioMemObj, true /*fFreeMappings*/);
61 pGVM->iomr0.s.hMmioMemObj = NIL_RTR0MEMOBJ;
62#ifdef VBOX_WITH_STATISTICS
63 RTR0MemObjFree(pGVM->iomr0.s.hMmioStatsMapObj, true /*fFreeMappings*/);
64 pGVM->iomr0.s.hMmioStatsMapObj = NIL_RTR0MEMOBJ;
65 RTR0MemObjFree(pGVM->iomr0.s.hMmioStatsMemObj, true /*fFreeMappings*/);
66 pGVM->iomr0.s.hMmioStatsMemObj = NIL_RTR0MEMOBJ;
67#endif
68}
69
70
71/**
72 * Implements PDMDEVHLPR0::pfnMmioSetUpContext.
73 *
74 * @param pGVM The global (ring-0) VM structure.
75 * @param pDevIns The device instance.
76 * @param hRegion The MMIO region handle (already registered in
77 * ring-3).
78 * @param pfnWrite The write handler callback, optional.
79 * @param pfnRead The read handler callback, optional.
80 * @param pfnFill The fill handler callback, optional.
81 * @param pvUser User argument for the callbacks.
82 * @thread EMT(0)
83 * @note Only callable at VM creation time.
84 */
85VMMR0_INT_DECL(int) IOMR0MmioSetUpContext(PGVM pGVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, PFNIOMMMIONEWWRITE pfnWrite,
86 PFNIOMMMIONEWREAD pfnRead, PFNIOMMMIONEWFILL pfnFill, void *pvUser)
87{
88 /*
89 * Validate input and state.
90 */
91 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
92 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
93 AssertReturn(hRegion < pGVM->iomr0.s.cMmioAlloc, VERR_IOM_INVALID_MMIO_HANDLE);
94 AssertReturn(hRegion < pGVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
95 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
96 AssertReturn(pDevIns->pDevInsForR3 != NIL_RTR3PTR && !(pDevIns->pDevInsForR3 & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
97 AssertReturn(pGVM->iomr0.s.paMmioRing3Regs[hRegion].pDevIns == pDevIns->pDevInsForR3, VERR_IOM_INVALID_MMIO_HANDLE);
98 AssertReturn(pGVM->iomr0.s.paMmioRegs[hRegion].pDevIns == NULL, VERR_WRONG_ORDER);
99 Assert(pGVM->iomr0.s.paMmioRegs[hRegion].idxSelf == hRegion);
100
101 AssertReturn(pfnWrite || pfnRead || pfnFill, VERR_INVALID_PARAMETER);
102 AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
103 AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
104 AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
105
106 uint32_t const fFlags = pGVM->iomr0.s.paMmioRing3Regs[hRegion].fFlags;
107 RTGCPHYS const cbRegion = pGVM->iomr0.s.paMmioRing3Regs[hRegion].cbRegion;
108 AssertMsgReturn(cbRegion > 0 && cbRegion <= _1T, ("cbRegion=%#RGp\n", cbRegion), VERR_IOM_INVALID_MMIO_HANDLE);
109
110 /*
111 * Do the job.
112 */
113 pGVM->iomr0.s.paMmioRegs[hRegion].cbRegion = cbRegion;
114 pGVM->iomr0.s.paMmioRegs[hRegion].pvUser = pvUser;
115 pGVM->iomr0.s.paMmioRegs[hRegion].pDevIns = pDevIns;
116 pGVM->iomr0.s.paMmioRegs[hRegion].pfnWriteCallback = pfnWrite;
117 pGVM->iomr0.s.paMmioRegs[hRegion].pfnReadCallback = pfnRead;
118 pGVM->iomr0.s.paMmioRegs[hRegion].pfnFillCallback = pfnFill;
119 pGVM->iomr0.s.paMmioRegs[hRegion].fFlags = fFlags;
120#ifdef VBOX_WITH_STATISTICS
121 uint16_t const idxStats = pGVM->iomr0.s.paMmioRing3Regs[hRegion].idxStats;
122 pGVM->iomr0.s.paMmioRegs[hRegion].idxStats = (uint32_t)idxStats < pGVM->iomr0.s.cMmioStatsAllocation
123 ? idxStats : UINT16_MAX;
124#else
125 pGVM->iomr0.s.paMmioRegs[hRegion].idxStats = UINT16_MAX;
126#endif
127
128 pGVM->iomr0.s.paMmioRing3Regs[hRegion].fRing0 = true;
129
130 return VINF_SUCCESS;
131}
132
133
134/**
135 * Grows the MMIO registration (all contexts) and lookup tables.
136 *
137 * @returns VBox status code.
138 * @param pGVM The global (ring-0) VM structure.
139 * @param cReqMinEntries The minimum growth (absolute).
140 * @thread EMT(0)
141 * @note Only callable at VM creation time.
142 */
143VMMR0_INT_DECL(int) IOMR0MmioGrowRegistrationTables(PGVM pGVM, uint64_t cReqMinEntries)
144{
145 /*
146 * Validate input and state.
147 */
148 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
149 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
150 AssertReturn(cReqMinEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
151 uint32_t cNewEntries = (uint32_t)cReqMinEntries;
152 AssertReturn(cNewEntries >= pGVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_1);
153 uint32_t const cOldEntries = pGVM->iomr0.s.cMmioAlloc;
154 ASMCompilerBarrier();
155 AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_2);
156 AssertReturn(pGVM->iom.s.cMmioRegs >= pGVM->iomr0.s.cMmioMax, VERR_IOM_MMIO_IPE_3);
157
158 /*
159 * Allocate the new tables. We use a single allocation for the three tables (ring-0,
160 * ring-3, lookup) and does a partial mapping of the result to ring-3.
161 */
162 uint32_t const cbRing0 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR0), PAGE_SIZE);
163 uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), PAGE_SIZE);
164 uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), PAGE_SIZE);
165 uint32_t const cbNew = cbRing0 + cbRing3 + cbShared;
166
167 /* Use the rounded up space as best we can. */
168 cNewEntries = RT_MIN(RT_MIN(cbRing0 / sizeof(IOMMMIOENTRYR0), cbRing3 / sizeof(IOMMMIOENTRYR3)),
169 cbShared / sizeof(IOMMMIOLOOKUPENTRY));
170
171 RTR0MEMOBJ hMemObj;
172 int rc = RTR0MemObjAllocPage(&hMemObj, cbNew, false /*fExecutable*/);
173 if (RT_SUCCESS(rc))
174 {
175 /*
176 * Zero and map it.
177 */
178 RT_BZERO(RTR0MemObjAddress(hMemObj), cbNew);
179
180 RTR0MEMOBJ hMapObj;
181 rc = RTR0MemObjMapUserEx(&hMapObj, hMemObj, (RTR3PTR)-1, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE,
182 RTR0ProcHandleSelf(), cbRing0, cbNew - cbRing0);
183 if (RT_SUCCESS(rc))
184 {
185 PIOMMMIOENTRYR0 const paRing0 = (PIOMMMIOENTRYR0)RTR0MemObjAddress(hMemObj);
186 PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)((uintptr_t)paRing0 + cbRing0);
187 PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
188 RTR3UINTPTR const uAddrRing3 = RTR0MemObjAddressR3(hMapObj);
189
190 /*
191 * Copy over the old info and initialize the idxSelf and idxStats members.
192 */
193 if (pGVM->iomr0.s.paMmioRegs != NULL)
194 {
195 memcpy(paRing0, pGVM->iomr0.s.paMmioRegs, sizeof(paRing0[0]) * cOldEntries);
196 memcpy(paRing3, pGVM->iomr0.s.paMmioRing3Regs, sizeof(paRing3[0]) * cOldEntries);
197 memcpy(paLookup, pGVM->iomr0.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
198 }
199
200 size_t i = cbRing0 / sizeof(*paRing0);
201 while (i-- > cOldEntries)
202 {
203 paRing0[i].idxSelf = (uint16_t)i;
204 paRing0[i].idxStats = UINT16_MAX;
205 paRing3[i].idxSelf = (uint16_t)i;
206 paRing3[i].idxStats = UINT16_MAX;
207 }
208
209 /*
210 * Switch the memory handles.
211 */
212 RTR0MEMOBJ hTmp = pGVM->iomr0.s.hMmioMapObj;
213 pGVM->iomr0.s.hMmioMapObj = hMapObj;
214 hMapObj = hTmp;
215
216 hTmp = pGVM->iomr0.s.hMmioMemObj;
217 pGVM->iomr0.s.hMmioMemObj = hMemObj;
218 hMemObj = hTmp;
219
220 /*
221 * Update the variables.
222 */
223 pGVM->iomr0.s.paMmioRegs = paRing0;
224 pGVM->iomr0.s.paMmioRing3Regs = paRing3;
225 pGVM->iomr0.s.paMmioLookup = paLookup;
226 pGVM->iom.s.paMmioRegs = uAddrRing3;
227 pGVM->iom.s.paMmioLookup = uAddrRing3 + cbRing3;
228 pGVM->iom.s.cMmioAlloc = cNewEntries;
229 pGVM->iomr0.s.cMmioAlloc = cNewEntries;
230
231 /*
232 * Free the old allocation.
233 */
234 RTR0MemObjFree(hMapObj, true /*fFreeMappings*/);
235 }
236 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
237 }
238
239 return rc;
240}
241
242
243/**
244 * Grows the MMIO statistics table.
245 *
246 * @returns VBox status code.
247 * @param pGVM The global (ring-0) VM structure.
248 * @param cReqMinEntries The minimum growth (absolute).
249 * @thread EMT(0)
250 * @note Only callable at VM creation time.
251 */
252VMMR0_INT_DECL(int) IOMR0MmioGrowStatisticsTable(PGVM pGVM, uint64_t cReqMinEntries)
253{
254 /*
255 * Validate input and state.
256 */
257 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
258 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
259 AssertReturn(cReqMinEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
260 uint32_t cNewEntries = (uint32_t)cReqMinEntries;
261#ifdef VBOX_WITH_STATISTICS
262 uint32_t const cOldEntries = pGVM->iomr0.s.cMmioStatsAllocation;
263 ASMCompilerBarrier();
264#else
265 uint32_t const cOldEntries = 0;
266#endif
267 AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
268 AssertReturn(pGVM->iom.s.cMmioStatsAllocation == cOldEntries, VERR_IOM_MMIO_IPE_1);
269 AssertReturn(pGVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
270#ifdef VBOX_WITH_STATISTICS
271 AssertReturn(!pGVM->iomr0.s.fMmioStatsFrozen, VERR_WRONG_ORDER);
272#endif
273
274 /*
275 * Allocate a new table, zero it and map it.
276 */
277#ifndef VBOX_WITH_STATISTICS
278 AssertFailedReturn(VERR_NOT_SUPPORTED);
279#else
280 uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), PAGE_SIZE);
281 cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
282
283 RTR0MEMOBJ hMemObj;
284 int rc = RTR0MemObjAllocPage(&hMemObj, cbNew, false /*fExecutable*/);
285 if (RT_SUCCESS(rc))
286 {
287 RT_BZERO(RTR0MemObjAddress(hMemObj), cbNew);
288
289 RTR0MEMOBJ hMapObj;
290 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
291 if (RT_SUCCESS(rc))
292 {
293 PIOMMMIOSTATSENTRY pMmioStats = (PIOMMMIOSTATSENTRY)RTR0MemObjAddress(hMemObj);
294
295 /*
296 * Anything to copy over and free up?
297 */
298 if (pGVM->iomr0.s.paMmioStats)
299 memcpy(pMmioStats, pGVM->iomr0.s.paMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
300
301 /*
302 * Switch the memory handles.
303 */
304 RTR0MEMOBJ hTmp = pGVM->iomr0.s.hMmioStatsMapObj;
305 pGVM->iomr0.s.hMmioStatsMapObj = hMapObj;
306 hMapObj = hTmp;
307
308 hTmp = pGVM->iomr0.s.hMmioStatsMemObj;
309 pGVM->iomr0.s.hMmioStatsMemObj = hMemObj;
310 hMemObj = hTmp;
311
312 /*
313 * Update the variables.
314 */
315 pGVM->iomr0.s.paMmioStats = pMmioStats;
316 pGVM->iom.s.paMmioStats = RTR0MemObjAddressR3(pGVM->iomr0.s.hMmioStatsMapObj);
317 pGVM->iom.s.cMmioStatsAllocation = cNewEntries;
318 pGVM->iomr0.s.cMmioStatsAllocation = cNewEntries;
319
320 /*
321 * Free the old allocation.
322 */
323 RTR0MemObjFree(hMapObj, true /*fFreeMappings*/);
324 }
325 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
326 }
327 return rc;
328#endif /* VBOX_WITH_STATISTICS */
329}
330
331
332/**
333 * Called after all devices has been instantiated to copy over the statistics
334 * indices to the ring-0 MMIO registration table.
335 *
336 * This simplifies keeping statistics for MMIO ranges that are ring-3 only.
337 *
338 * @returns VBox status code.
339 * @param pGVM The global (ring-0) VM structure.
340 * @thread EMT(0)
341 * @note Only callable at VM creation time.
342 */
343VMMR0_INT_DECL(int) IOMR0MmioSyncStatisticsIndices(PGVM pGVM)
344{
345 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
346 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
347
348#ifdef VBOX_WITH_STATISTICS
349 /*
350 * First, freeze the statistics array:
351 */
352 pGVM->iomr0.s.fMmioStatsFrozen = true;
353
354 /*
355 * Second, synchronize the indices:
356 */
357 uint32_t const cRegs = RT_MIN(pGVM->iom.s.cMmioRegs, pGVM->iomr0.s.cMmioAlloc);
358 uint32_t const cStatsAlloc = pGVM->iomr0.s.cMmioStatsAllocation;
359 PIOMMMIOENTRYR0 paMmioRegs = pGVM->iomr0.s.paMmioRegs;
360 IOMMMIOENTRYR3 const *paMmioRegsR3 = pGVM->iomr0.s.paMmioRing3Regs;
361 AssertReturn((paMmioRegs && paMmioRegsR3) || cRegs == 0, VERR_IOM_MMIO_IPE_3);
362
363 for (uint32_t i = 0 ; i < cRegs; i++)
364 {
365 uint16_t idxStats = paMmioRegsR3[i].idxStats;
366 paMmioRegs[i].idxStats = idxStats < cStatsAlloc ? idxStats : UINT16_MAX;
367 }
368
369#else
370 RT_NOREF(pGVM);
371#endif
372 return VINF_SUCCESS;
373}
374
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette