VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IOM.cpp@ 82312

Last change on this file since 82312 was 82312, checked in by vboxsync, 5 years ago

IOM: Removed unused function iomR3IOPortGetStandardName(). bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 39.6 KB
Line 
1/* $Id: IOM.cpp 82312 2019-12-01 01:48:30Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iom IOM - The Input / Output Monitor
20 *
21 * The input/output monitor will handle I/O exceptions routing them to the
22 * appropriate device. It implements an API to register and deregister virtual
23 * I/0 port handlers and memory mapped I/O handlers. A handler is PDM devices
24 * and a set of callback functions.
25 *
26 * @see grp_iom
27 *
28 *
29 * @section sec_iom_rawmode Raw-Mode
30 *
31 * In raw-mode I/O port access is trapped (\#GP(0)) by ensuring that the actual
32 * IOPL is 0 regardless of what the guest IOPL is. The \#GP handler use the
33 * disassembler (DIS) to figure which instruction caused it (there are a number
34 * of instructions in addition to the I/O ones) and if it's an I/O port access
35 * it will hand it to IOMRCIOPortHandler (via EMInterpretPortIO).
36 * IOMRCIOPortHandler will lookup the port in the AVL tree of registered
37 * handlers. If found, the handler will be called otherwise default action is
38 * taken. (Default action is to write into the void and read all set bits.)
39 *
40 * Memory Mapped I/O (MMIO) is implemented as a slightly special case of PGM
41 * access handlers. An MMIO range is registered with IOM which then registers it
42 * with the PGM access handler sub-system. The access handler catches all
43 * access and will be called in the context of a \#PF handler. In RC and R0 this
44 * handler is iomMmioPfHandler while in ring-3 it's iomR3MmioHandler (although
45 * in ring-3 there can be alternative ways). iomMmioPfHandler will attempt to
46 * emulate the instruction that is doing the access and pass the corresponding
47 * reads / writes to the device.
48 *
49 * Emulating I/O port access is less complex and should be slightly faster than
50 * emulating MMIO, so in most cases we should encourage the OS to use port I/O.
51 * Devices which are frequently accessed should register GC handlers to speed up
52 * execution.
53 *
54 *
55 * @section sec_iom_hm Hardware Assisted Virtualization Mode
56 *
57 * When running in hardware assisted virtualization mode we'll be doing much the
58 * same things as in raw-mode. The main difference is that we're running in the
59 * host ring-0 context and that we don't get faults (\#GP(0) and \#PG) but
60 * exits.
61 *
62 *
63 * @section sec_iom_rem Recompiled Execution Mode
64 *
65 * When running in the recompiler things are different. I/O port access is
66 * handled by calling IOMIOPortRead and IOMIOPortWrite directly. While MMIO can
67 * be handled in one of two ways. The normal way is that we have a registered a
68 * special RAM range with the recompiler and in the three callbacks (for byte,
69 * word and dword access) we call IOMMMIORead and IOMMMIOWrite directly. The
70 * alternative ways that the physical memory access which goes via PGM will take
71 * care of it by calling iomR3MmioHandler via the PGM access handler machinery
72 * - this shouldn't happen but it is an alternative...
73 *
74 *
75 * @section sec_iom_other Other Accesses
76 *
77 * I/O ports aren't really exposed in any other way, unless you count the
78 * instruction interpreter in EM, but that's just what we're doing in the
79 * raw-mode \#GP(0) case really. Now, it's possible to call IOMIOPortRead and
80 * IOMIOPortWrite directly to talk to a device, but this is really bad behavior
81 * and should only be done as temporary hacks (the PC BIOS device used to setup
82 * the CMOS this way back in the dark ages).
83 *
84 * MMIO has similar direct routes as the I/O ports and these shouldn't be used
85 * for the same reasons and with the same restrictions. OTOH since MMIO is
86 * mapped into the physical memory address space, it can be accessed in a number
87 * of ways thru PGM.
88 *
89 *
90 * @section sec_iom_logging Logging Levels
91 *
92 * Following assignments:
93 * - Level 5 is used for defering I/O port and MMIO writes to ring-3.
94 *
95 */
96
97/** @todo MMIO - simplifying the device end.
98 * - Add a return status for doing DBGFSTOP on access where there are no known
99 * registers.
100 * -
101 *
102 * */
103
104
105/*********************************************************************************************************************************
106* Header Files *
107*********************************************************************************************************************************/
108#define LOG_GROUP LOG_GROUP_IOM
109#include <VBox/vmm/iom.h>
110#include <VBox/vmm/cpum.h>
111#include <VBox/vmm/pgm.h>
112#include <VBox/sup.h>
113#include <VBox/vmm/hm.h>
114#include <VBox/vmm/mm.h>
115#include <VBox/vmm/stam.h>
116#include <VBox/vmm/dbgf.h>
117#include <VBox/vmm/pdmapi.h>
118#include <VBox/vmm/pdmdev.h>
119#include "IOMInternal.h"
120#include <VBox/vmm/vm.h>
121
122#include <VBox/param.h>
123#include <iprt/assert.h>
124#include <iprt/alloc.h>
125#include <iprt/string.h>
126#include <VBox/log.h>
127#include <VBox/err.h>
128
129#include "IOMInline.h"
130
131
132/*********************************************************************************************************************************
133* Internal Functions *
134*********************************************************************************************************************************/
135static void iomR3FlushCache(PVM pVM);
136
137
138/**
139 * Initializes the IOM.
140 *
141 * @returns VBox status code.
142 * @param pVM The cross context VM structure.
143 */
144VMMR3_INT_DECL(int) IOMR3Init(PVM pVM)
145{
146 LogFlow(("IOMR3Init:\n"));
147
148 /*
149 * Assert alignment and sizes.
150 */
151 AssertCompileMemberAlignment(VM, iom.s, 32);
152 AssertCompile(sizeof(pVM->iom.s) <= sizeof(pVM->iom.padding));
153 AssertCompileMemberAlignment(IOM, CritSect, sizeof(uintptr_t));
154
155 /*
156 * Initialize the REM critical section.
157 */
158#ifdef IOM_WITH_CRIT_SECT_RW
159 int rc = PDMR3CritSectRwInit(pVM, &pVM->iom.s.CritSect, RT_SRC_POS, "IOM Lock");
160#else
161 int rc = PDMR3CritSectInit(pVM, &pVM->iom.s.CritSect, RT_SRC_POS, "IOM Lock");
162#endif
163 AssertRCReturn(rc, rc);
164
165 /*
166 * Allocate the trees structure.
167 */
168 rc = MMHyperAlloc(pVM, sizeof(*pVM->iom.s.pTreesR3), 0, MM_TAG_IOM, (void **)&pVM->iom.s.pTreesR3);
169 AssertRCReturn(rc, rc);
170 pVM->iom.s.pTreesR0 = MMHyperR3ToR0(pVM, pVM->iom.s.pTreesR3);
171
172 /*
173 * Register the MMIO access handler type.
174 */
175 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO,
176 iomMmioHandler,
177 NULL, "iomMmioHandler", "iomMmioPfHandler",
178 NULL, "iomMmioHandler", "iomMmioPfHandler",
179 "MMIO", &pVM->iom.s.hMmioHandlerType);
180 AssertRCReturn(rc, rc);
181
182 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO,
183 iomMmioHandlerNew,
184 NULL, "iomMmioHandlerNew", "iomMmioPfHandlerNew",
185 NULL, "iomMmioHandlerNew", "iomMmioPfHandlerNew",
186 "MMIO New", &pVM->iom.s.hNewMmioHandlerType);
187 AssertRCReturn(rc, rc);
188
189 /*
190 * Info.
191 */
192 DBGFR3InfoRegisterInternal(pVM, "ioport", "Dumps all IOPort ranges. No arguments.", &iomR3IoPortInfo);
193 DBGFR3InfoRegisterInternal(pVM, "mmio", "Dumps all MMIO ranges. No arguments.", &iomR3MmioInfo);
194
195 /*
196 * Statistics.
197 */
198 STAM_REG(pVM, &pVM->iom.s.StatIoPortCommits, STAMTYPE_COUNTER, "/IOM/IoPortCommits", STAMUNIT_OCCURENCES, "Number of ring-3 I/O port commits.");
199
200 STAM_REL_REG(pVM, &pVM->iom.s.StatMMIOStaleMappings, STAMTYPE_PROFILE, "/IOM/MMIOStaleMappings", STAMUNIT_TICKS_PER_CALL, "Number of times iomMmioHandlerNew got a call for a remapped range at the old mapping.");
201 STAM_REG(pVM, &pVM->iom.s.StatRZMMIOHandler, STAMTYPE_PROFILE, "/IOM/RZ-MMIOHandler", STAMUNIT_TICKS_PER_CALL, "Profiling of the iomMmioPfHandler() body, only success calls.");
202 STAM_REG(pVM, &pVM->iom.s.StatRZMMIOReadsToR3, STAMTYPE_COUNTER, "/IOM/RZ-MMIOHandler/ReadsToR3", STAMUNIT_OCCURENCES, "Number of read deferred to ring-3.");
203 STAM_REG(pVM, &pVM->iom.s.StatRZMMIOWritesToR3, STAMTYPE_COUNTER, "/IOM/RZ-MMIOHandler/WritesToR3", STAMUNIT_OCCURENCES, "Number of writes deferred to ring-3.");
204 STAM_REG(pVM, &pVM->iom.s.StatRZMMIOCommitsToR3, STAMTYPE_COUNTER, "/IOM/RZ-MMIOHandler/CommitsToR3", STAMUNIT_OCCURENCES, "Number of commits deferred to ring-3.");
205 STAM_REG(pVM, &pVM->iom.s.StatRZMMIODevLockContention, STAMTYPE_COUNTER, "/IOM/RZ-MMIOHandler/DevLockContention", STAMUNIT_OCCURENCES, "Number of device lock contention force return to ring-3.");
206 STAM_REG(pVM, &pVM->iom.s.StatR3MMIOHandler, STAMTYPE_COUNTER, "/IOM/R3-MMIOHandler", STAMUNIT_OCCURENCES, "Number of calls to iomMmioHandler.");
207
208 STAM_REG(pVM, &pVM->iom.s.StatMmioHandlerR3, STAMTYPE_COUNTER, "/IOM/OldMmioHandlerR3", STAMUNIT_OCCURENCES, "Number of calls to old iomMmioHandler from ring-3.");
209 STAM_REG(pVM, &pVM->iom.s.StatMmioHandlerR0, STAMTYPE_COUNTER, "/IOM/OldMmioHandlerR0", STAMUNIT_OCCURENCES, "Number of calls to old iomMmioHandler from ring-0.");
210
211 STAM_REG(pVM, &pVM->iom.s.StatMmioHandlerNewR3, STAMTYPE_COUNTER, "/IOM/MmioHandlerNewR3", STAMUNIT_OCCURENCES, "Number of calls to iomMmioHandlerNew from ring-3.");
212 STAM_REG(pVM, &pVM->iom.s.StatMmioHandlerNewR0, STAMTYPE_COUNTER, "/IOM/MmioHandlerNewR0", STAMUNIT_OCCURENCES, "Number of calls to iomMmioHandlerNew from ring-0.");
213 STAM_REG(pVM, &pVM->iom.s.StatMmioPfHandlerNew, STAMTYPE_COUNTER, "/IOM/MmioPfHandlerNew", STAMUNIT_OCCURENCES, "Number of calls to iomMmioPfHandlerNew.");
214 STAM_REG(pVM, &pVM->iom.s.StatMmioPhysHandlerNew, STAMTYPE_COUNTER, "/IOM/MmioPhysHandlerNew", STAMUNIT_OCCURENCES, "Number of calls to IOMR0MmioPhysHandler.");
215 STAM_REG(pVM, &pVM->iom.s.StatMmioCommitsDirect, STAMTYPE_COUNTER, "/IOM/MmioCommitsDirect", STAMUNIT_OCCURENCES, "Number of ring-3 MMIO commits direct to handler via handle hint.");
216 STAM_REG(pVM, &pVM->iom.s.StatMmioCommitsPgm, STAMTYPE_COUNTER, "/IOM/MmioCommitsPgm", STAMUNIT_OCCURENCES, "Number of ring-3 MMIO commits via PGM.");
217
218 /* Redundant, but just in case we change something in the future */
219 iomR3FlushCache(pVM);
220
221 LogFlow(("IOMR3Init: returns VINF_SUCCESS\n"));
222 return VINF_SUCCESS;
223}
224
225
226/**
227 * Called when a VM initialization stage is completed.
228 *
229 * @returns VBox status code.
230 * @param pVM The cross context VM structure.
231 * @param enmWhat The initialization state that was completed.
232 */
233VMMR3_INT_DECL(int) IOMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
234{
235#ifdef VBOX_WITH_STATISTICS
236 if (enmWhat == VMINITCOMPLETED_RING0)
237 {
238 /*
239 * Synchronize the ring-3 I/O port and MMIO statistics indices into the
240 * ring-0 tables to simplify ring-0 code. This also make sure that any
241 * later calls to grow the statistics tables will fail.
242 */
243 int rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_SYNC_STATS_INDICES, 0, NULL);
244 AssertLogRelRCReturn(rc, rc);
245
246 /*
247 * Register I/O port and MMIO stats now that we're done registering MMIO
248 * regions and won't grow the table again.
249 */
250 for (uint32_t i = 0; i < pVM->iom.s.cIoPortRegs; i++)
251 {
252 PIOMIOPORTENTRYR3 pRegEntry = &pVM->iom.s.paIoPortRegs[i];
253 if ( pRegEntry->fMapped
254 && pRegEntry->idxStats != UINT16_MAX)
255 iomR3IoPortRegStats(pVM, pRegEntry);
256 }
257
258 for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
259 {
260 PIOMMMIOENTRYR3 pRegEntry = &pVM->iom.s.paMmioRegs[i];
261 if ( pRegEntry->fMapped
262 && pRegEntry->idxStats != UINT16_MAX)
263 iomR3MmioRegStats(pVM, pRegEntry);
264 }
265 }
266#else
267 RT_NOREF(pVM, enmWhat);
268#endif
269 return VINF_SUCCESS;
270}
271
272
273/**
274 * Flushes the IOM port & statistics lookup cache
275 *
276 * @param pVM The cross context VM structure.
277 */
278static void iomR3FlushCache(PVM pVM)
279{
280 /*
281 * Since all relevant (1) cache use requires at least read access to the
282 * critical section, we can exclude all other EMTs by grabbing exclusive
283 * access to the critical section and then safely update the caches of
284 * other EMTs.
285 * (1) The irrelvant access not holding the lock is in assertion code.
286 */
287 IOM_LOCK_EXCL(pVM);
288 VMCPUID idCpu = pVM->cCpus;
289 while (idCpu-- > 0)
290 {
291 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
292 pVCpu->iom.s.pMMIORangeLastR0 = NIL_RTR0PTR;
293 pVCpu->iom.s.pMMIOStatsLastR0 = NIL_RTR0PTR;
294
295 pVCpu->iom.s.pMMIORangeLastR3 = NULL;
296 pVCpu->iom.s.pMMIOStatsLastR3 = NULL;
297 }
298
299 IOM_UNLOCK_EXCL(pVM);
300}
301
302
303/**
304 * The VM is being reset.
305 *
306 * @param pVM The cross context VM structure.
307 */
308VMMR3_INT_DECL(void) IOMR3Reset(PVM pVM)
309{
310 iomR3FlushCache(pVM);
311}
312
313
314/**
315 * Applies relocations to data and code managed by this
316 * component. This function will be called at init and
317 * whenever the VMM need to relocate it self inside the GC.
318 *
319 * The IOM will update the addresses used by the switcher.
320 *
321 * @param pVM The cross context VM structure.
322 * @param offDelta Relocation delta relative to old location.
323 */
324VMMR3_INT_DECL(void) IOMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
325{
326 RT_NOREF(pVM, offDelta);
327}
328
329/**
330 * Terminates the IOM.
331 *
332 * Termination means cleaning up and freeing all resources,
333 * the VM it self is at this point powered off or suspended.
334 *
335 * @returns VBox status code.
336 * @param pVM The cross context VM structure.
337 */
338VMMR3_INT_DECL(int) IOMR3Term(PVM pVM)
339{
340 /*
341 * IOM is not owning anything but automatically freed resources,
342 * so there's nothing to do here.
343 */
344 NOREF(pVM);
345 return VINF_SUCCESS;
346}
347
348
349#ifdef VBOX_WITH_STATISTICS
350
351/**
352 * Create the statistics node for an MMIO address.
353 *
354 * @returns Pointer to new stats node.
355 *
356 * @param pVM The cross context VM structure.
357 * @param GCPhys The address.
358 * @param pszDesc Description.
359 */
360PIOMMMIOSTATS iomR3MMIOStatsCreate(PVM pVM, RTGCPHYS GCPhys, const char *pszDesc)
361{
362 IOM_LOCK_EXCL(pVM);
363
364 /* check if it already exists. */
365 PIOMMMIOSTATS pStats = (PIOMMMIOSTATS)RTAvloGCPhysGet(&pVM->iom.s.pTreesR3->MmioStatTree, GCPhys);
366 if (pStats)
367 {
368 IOM_UNLOCK_EXCL(pVM);
369 return pStats;
370 }
371
372 /* allocate stats node. */
373 int rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_IOM_STATS, (void **)&pStats);
374 AssertRC(rc);
375 if (RT_SUCCESS(rc))
376 {
377 /* insert into the tree. */
378 pStats->Core.Key = GCPhys;
379 if (RTAvloGCPhysInsert(&pVM->iom.s.pTreesR3->MmioStatTree, &pStats->Core))
380 {
381 IOM_UNLOCK_EXCL(pVM);
382
383 rc = STAMR3RegisterF(pVM, &pStats->Accesses, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, pszDesc, "/IOM/MMIO/%RGp", GCPhys); AssertRC(rc);
384 rc = STAMR3RegisterF(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Read-R3", GCPhys); AssertRC(rc);
385 rc = STAMR3RegisterF(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Write-R3", GCPhys); AssertRC(rc);
386 rc = STAMR3RegisterF(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Read-RZ", GCPhys); AssertRC(rc);
387 rc = STAMR3RegisterF(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Write-RZ", GCPhys); AssertRC(rc);
388 rc = STAMR3RegisterF(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, pszDesc, "/IOM/MMIO/%RGp/Read-RZtoR3", GCPhys); AssertRC(rc);
389 rc = STAMR3RegisterF(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, pszDesc, "/IOM/MMIO/%RGp/Write-RZtoR3", GCPhys); AssertRC(rc);
390
391 return pStats;
392 }
393 AssertMsgFailed(("what! GCPhys=%RGp\n", GCPhys));
394 MMHyperFree(pVM, pStats);
395 }
396 IOM_UNLOCK_EXCL(pVM);
397 return NULL;
398}
399
400#endif /* VBOX_WITH_STATISTICS */
401
402/**
403 * Registers a Memory Mapped I/O R3 handler.
404 *
405 * This API is called by PDM on behalf of a device. Devices must register ring-3 ranges
406 * before any GC and R0 ranges can be registered using IOMR3MMIORegisterRC() and IOMR3MMIORegisterR0().
407 *
408 * @returns VBox status code.
409 *
410 * @param pVM The cross context VM structure.
411 * @param pDevIns PDM device instance owning the MMIO range.
412 * @param GCPhysStart First physical address in the range.
413 * @param cbRange The size of the range (in bytes).
414 * @param pvUser User argument for the callbacks.
415 * @param pfnWriteCallback Pointer to function which is gonna handle Write operations.
416 * @param pfnReadCallback Pointer to function which is gonna handle Read operations.
417 * @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.
418 * @param fFlags Flags, see IOMMMIO_FLAGS_XXX.
419 * @param pszDesc Pointer to description string. This must not be freed.
420 */
421VMMR3_INT_DECL(int)
422IOMR3MmioRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTHCPTR pvUser,
423 R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback, R3PTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,
424 R3PTRTYPE(PFNIOMMMIOFILL) pfnFillCallback, uint32_t fFlags, const char *pszDesc)
425{
426 LogFlow(("IOMR3MmioRegisterR3: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp pvUser=%RHv pfnWriteCallback=%#x pfnReadCallback=%#x pfnFillCallback=%#x fFlags=%#x pszDesc=%s\n",
427 pDevIns, GCPhysStart, cbRange, pvUser, pfnWriteCallback, pfnReadCallback, pfnFillCallback, fFlags, pszDesc));
428 int rc;
429
430 /*
431 * Validate input.
432 */
433 AssertMsgReturn(GCPhysStart + (cbRange - 1) >= GCPhysStart,("Wrapped! %RGp LB %RGp\n", GCPhysStart, cbRange),
434 VERR_IOM_INVALID_MMIO_RANGE);
435 AssertMsgReturn( !(fFlags & ~(IOMMMIO_FLAGS_VALID_MASK & ~IOMMMIO_FLAGS_ABS))
436 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
437 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
438 ("%#x\n", fFlags),
439 VERR_INVALID_PARAMETER);
440
441 /*
442 * Allocate new range record and initialize it.
443 */
444 PIOMMMIORANGE pRange;
445 rc = MMHyperAlloc(pVM, sizeof(*pRange), 0, MM_TAG_IOM, (void **)&pRange);
446 if (RT_SUCCESS(rc))
447 {
448 pRange->Core.Key = GCPhysStart;
449 pRange->Core.KeyLast = GCPhysStart + (cbRange - 1);
450 pRange->GCPhys = GCPhysStart;
451 pRange->cb = cbRange;
452 pRange->cRefs = 1; /* The tree reference. */
453 pRange->pszDesc = pszDesc;
454
455 //pRange->pvUserR0 = NIL_RTR0PTR;
456 //pRange->pDevInsR0 = NIL_RTR0PTR;
457 //pRange->pfnReadCallbackR0 = NIL_RTR0PTR;
458 //pRange->pfnWriteCallbackR0 = NIL_RTR0PTR;
459 //pRange->pfnFillCallbackR0 = NIL_RTR0PTR;
460
461 //pRange->pvUserRC = NIL_RTRCPTR;
462 //pRange->pDevInsRC = NIL_RTRCPTR;
463 //pRange->pfnReadCallbackRC = NIL_RTRCPTR;
464 //pRange->pfnWriteCallbackRC = NIL_RTRCPTR;
465 //pRange->pfnFillCallbackRC = NIL_RTRCPTR;
466
467 pRange->fFlags = fFlags;
468
469 pRange->pvUserR3 = pvUser;
470 pRange->pDevInsR3 = pDevIns;
471 pRange->pfnReadCallbackR3 = pfnReadCallback;
472 pRange->pfnWriteCallbackR3 = pfnWriteCallback;
473 pRange->pfnFillCallbackR3 = pfnFillCallback;
474
475 /*
476 * Try register it with PGM and then insert it into the tree.
477 */
478 rc = PGMR3PhysMMIORegister(pVM, GCPhysStart, cbRange, pVM->iom.s.hMmioHandlerType,
479 pRange, MMHyperR3ToR0(pVM, pRange), MMHyperR3ToRC(pVM, pRange), pszDesc);
480 if (RT_SUCCESS(rc))
481 {
482 IOM_LOCK_EXCL(pVM);
483 if (RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRange->Core))
484 {
485 iomR3FlushCache(pVM);
486 IOM_UNLOCK_EXCL(pVM);
487 return VINF_SUCCESS;
488 }
489
490 /* bail out */
491 IOM_UNLOCK_EXCL(pVM);
492 DBGFR3Info(pVM->pUVM, "mmio", NULL, NULL);
493 AssertMsgFailed(("This cannot happen!\n"));
494 rc = VERR_IOM_IOPORT_IPE_3;
495 }
496
497 MMHyperFree(pVM, pRange);
498 }
499 if (pDevIns->iInstance > 0)
500 MMR3HeapFree((void *)pszDesc);
501 return rc;
502}
503
504
505#if 0
506/**
507 * Registers a Memory Mapped I/O RC handler range.
508 *
509 * This API is called by PDM on behalf of a device. Devices must first register ring-3 ranges
510 * using IOMMMIORegisterR3() before calling this function.
511 *
512 *
513 * @returns VBox status code.
514 *
515 * @param pVM The cross context VM structure.
516 * @param pDevIns PDM device instance owning the MMIO range.
517 * @param GCPhysStart First physical address in the range.
518 * @param cbRange The size of the range (in bytes).
519 * @param pvUser User argument for the callbacks.
520 * @param pfnWriteCallback Pointer to function which is gonna handle Write operations.
521 * @param pfnReadCallback Pointer to function which is gonna handle Read operations.
522 * @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.
523 * @thread EMT
524 */
525VMMR3_INT_DECL(int)
526IOMR3MmioRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTGCPTR pvUser,
527 RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback, RCPTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,
528 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillCallback)
529{
530 LogFlow(("IOMR3MmioRegisterRC: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp pvUser=%RGv pfnWriteCallback=%#x pfnReadCallback=%#x pfnFillCallback=%#x\n",
531 pDevIns, GCPhysStart, cbRange, pvUser, pfnWriteCallback, pfnReadCallback, pfnFillCallback));
532 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_IOM_HM_IPE);
533
534 /*
535 * Validate input.
536 */
537 if (!pfnWriteCallback && !pfnReadCallback)
538 {
539 AssertMsgFailed(("No callbacks! %RGp LB %RGp\n", GCPhysStart, cbRange));
540 return VERR_INVALID_PARAMETER;
541 }
542 PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);
543
544 /*
545 * Find the MMIO range and check that the input matches.
546 */
547 IOM_LOCK_EXCL(pVM);
548 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysStart);
549 AssertReturnStmt(pRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);
550 AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK_EXCL(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);
551 AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
552 AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
553
554 pRange->pvUserRC = pvUser;
555 pRange->pfnReadCallbackRC = pfnReadCallback;
556 pRange->pfnWriteCallbackRC= pfnWriteCallback;
557 pRange->pfnFillCallbackRC = pfnFillCallback;
558 pRange->pDevInsRC = pDevIns->pDevInsForRC;
559 IOM_UNLOCK_EXCL(pVM);
560
561 return VINF_SUCCESS;
562}
563#endif
564
565
566/**
567 * Registers a Memory Mapped I/O R0 handler range.
568 *
569 * This API is called by PDM on behalf of a device. Devices must first register ring-3 ranges
570 * using IOMMR3MIORegisterHC() before calling this function.
571 *
572 *
573 * @returns VBox status code.
574 *
575 * @param pVM The cross context VM structure.
576 * @param pDevIns PDM device instance owning the MMIO range.
577 * @param GCPhysStart First physical address in the range.
578 * @param cbRange The size of the range (in bytes).
579 * @param pvUser User argument for the callbacks.
580 * @param pfnWriteCallback Pointer to function which is gonna handle Write operations.
581 * @param pfnReadCallback Pointer to function which is gonna handle Read operations.
582 * @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.
583 * @thread EMT
584 */
585VMMR3_INT_DECL(int)
586IOMR3MmioRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTR0PTR pvUser,
587 R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback,
588 R0PTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,
589 R0PTRTYPE(PFNIOMMMIOFILL) pfnFillCallback)
590{
591 LogFlow(("IOMR3MmioRegisterR0: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp pvUser=%RHv pfnWriteCallback=%#x pfnReadCallback=%#x pfnFillCallback=%#x\n",
592 pDevIns, GCPhysStart, cbRange, pvUser, pfnWriteCallback, pfnReadCallback, pfnFillCallback));
593
594 /*
595 * Validate input.
596 */
597 if (!pfnWriteCallback && !pfnReadCallback)
598 {
599 AssertMsgFailed(("No callbacks! %RGp LB %RGp\n", GCPhysStart, cbRange));
600 return VERR_INVALID_PARAMETER;
601 }
602 PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);
603
604 /*
605 * Find the MMIO range and check that the input matches.
606 */
607 IOM_LOCK_EXCL(pVM);
608 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysStart);
609 AssertReturnStmt(pRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);
610 AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK_EXCL(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);
611 AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
612 AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);
613
614 pRange->pvUserR0 = pvUser;
615 pRange->pfnReadCallbackR0 = pfnReadCallback;
616 pRange->pfnWriteCallbackR0= pfnWriteCallback;
617 pRange->pfnFillCallbackR0 = pfnFillCallback;
618 pRange->pDevInsR0 = pDevIns->pDevInsR0RemoveMe;
619 IOM_UNLOCK_EXCL(pVM);
620
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Deregisters a Memory Mapped I/O handler range.
627 *
628 * Registered GC, R0, and R3 ranges are affected.
629 *
630 * @returns VBox status code.
631 *
632 * @param pVM The cross context VM structure.
633 * @param pDevIns Device instance which the MMIO region is registered.
634 * @param GCPhysStart First physical address (GC) in the range.
635 * @param cbRange Number of bytes to deregister.
636 *
637 * @remark This function mainly for PCI PnP Config and will not do
638 * all the checks you might expect it to do.
639 */
640VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange)
641{
642 LogFlow(("IOMR3MmioDeregister: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp\n", pDevIns, GCPhysStart, cbRange));
643
644 /*
645 * Validate input.
646 */
647 RTGCPHYS GCPhysLast = GCPhysStart + (cbRange - 1);
648 if (GCPhysLast < GCPhysStart)
649 {
650 AssertMsgFailed(("Wrapped! %#x LB %RGp\n", GCPhysStart, cbRange));
651 return VERR_IOM_INVALID_MMIO_RANGE;
652 }
653 PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);
654
655 IOM_LOCK_EXCL(pVM);
656
657 /*
658 * Check ownership and such for the entire area.
659 */
660 RTGCPHYS GCPhys = GCPhysStart;
661 while (GCPhys <= GCPhysLast && GCPhys >= GCPhysStart)
662 {
663 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
664 if (!pRange)
665 {
666 IOM_UNLOCK_EXCL(pVM);
667 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
668 }
669 AssertMsgReturnStmt(pRange->pDevInsR3 == pDevIns,
670 ("Not owner! GCPhys=%RGp %RGp LB %RGp %s\n", GCPhys, GCPhysStart, cbRange, pRange->pszDesc),
671 IOM_UNLOCK_EXCL(pVM),
672 VERR_IOM_NOT_MMIO_RANGE_OWNER);
673 AssertMsgReturnStmt(pRange->Core.KeyLast <= GCPhysLast,
674 ("Incomplete R3 range! GCPhys=%RGp %RGp LB %RGp %s\n", GCPhys, GCPhysStart, cbRange, pRange->pszDesc),
675 IOM_UNLOCK_EXCL(pVM),
676 VERR_IOM_INCOMPLETE_MMIO_RANGE);
677
678 /* next */
679 Assert(GCPhys <= pRange->Core.KeyLast);
680 GCPhys = pRange->Core.KeyLast + 1;
681 }
682
683 /*
684 * Do the actual removing of the MMIO ranges.
685 */
686 GCPhys = GCPhysStart;
687 while (GCPhys <= GCPhysLast && GCPhys >= GCPhysStart)
688 {
689 iomR3FlushCache(pVM);
690
691 PIOMMMIORANGE pRange = (PIOMMMIORANGE)RTAvlroGCPhysRemove(&pVM->iom.s.pTreesR3->MMIOTree, GCPhys);
692 Assert(pRange);
693 Assert(pRange->Core.Key == GCPhys && pRange->Core.KeyLast <= GCPhysLast);
694 IOM_UNLOCK_EXCL(pVM); /* Lock order fun. */
695
696 /* remove it from PGM */
697 int rc = PGMR3PhysMMIODeregister(pVM, GCPhys, pRange->cb);
698 AssertRC(rc);
699
700 IOM_LOCK_EXCL(pVM);
701
702 /* advance and free. */
703 GCPhys = pRange->Core.KeyLast + 1;
704 if (pDevIns->iInstance > 0)
705 {
706 void *pvDesc = ASMAtomicXchgPtr((void * volatile *)&pRange->pszDesc, NULL);
707 MMR3HeapFree(pvDesc);
708 }
709 iomMmioReleaseRange(pVM, pRange);
710 }
711
712 IOM_UNLOCK_EXCL(pVM);
713 return VINF_SUCCESS;
714}
715
716
717/**
718 * Notfication from PGM that the pre-registered MMIO region has been mapped into
719 * user address space.
720 *
721 * @returns VBox status code.
722 * @param pVM Pointer to the cross context VM structure.
723 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.
724 * @param GCPhys The mapping address.
725 * @remarks Called while owning the PGM lock.
726 */
727VMMR3_INT_DECL(int) IOMR3MmioExNotifyMapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys)
728{
729 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
730 AssertReturn(pRange->GCPhys == NIL_RTGCPHYS, VERR_IOM_MMIO_IPE_1);
731
732 IOM_LOCK_EXCL(pVM);
733 Assert(pRange->GCPhys == NIL_RTGCPHYS);
734 pRange->GCPhys = GCPhys;
735 pRange->Core.Key = GCPhys;
736 pRange->Core.KeyLast = GCPhys + pRange->cb - 1;
737 if (RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRange->Core))
738 {
739 iomR3FlushCache(pVM);
740 IOM_UNLOCK_EXCL(pVM);
741 return VINF_SUCCESS;
742 }
743 IOM_UNLOCK_EXCL(pVM);
744
745 AssertLogRelMsgFailed(("RTAvlroGCPhysInsert failed on %RGp..%RGp - %s\n", pRange->Core.Key, pRange->Core.KeyLast, pRange->pszDesc));
746 pRange->GCPhys = NIL_RTGCPHYS;
747 pRange->Core.Key = NIL_RTGCPHYS;
748 pRange->Core.KeyLast = NIL_RTGCPHYS;
749 return VERR_IOM_MMIO_IPE_2;
750}
751
752
753/**
754 * Notfication from PGM that the pre-registered MMIO region has been unmapped
755 * from user address space.
756 *
757 * @param pVM Pointer to the cross context VM structure.
758 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.
759 * @param GCPhys The mapping address.
760 * @remarks Called while owning the PGM lock.
761 */
762VMMR3_INT_DECL(void) IOMR3MmioExNotifyUnmapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys)
763{
764 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
765 AssertLogRelReturnVoid(pRange->GCPhys == GCPhys);
766
767 IOM_LOCK_EXCL(pVM);
768 Assert(pRange->GCPhys == GCPhys);
769 PIOMMMIORANGE pRemoved = (PIOMMMIORANGE)RTAvlroGCPhysRemove(&pVM->iom.s.pTreesR3->MMIOTree, GCPhys);
770 if (pRemoved == pRange)
771 {
772 pRange->GCPhys = NIL_RTGCPHYS;
773 pRange->Core.Key = NIL_RTGCPHYS;
774 pRange->Core.KeyLast = NIL_RTGCPHYS;
775 iomR3FlushCache(pVM);
776 IOM_UNLOCK_EXCL(pVM);
777 }
778 else
779 {
780 if (pRemoved)
781 RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRemoved->Core);
782 IOM_UNLOCK_EXCL(pVM);
783 AssertLogRelMsgFailed(("RTAvlroGCPhysRemove returned %p instead of %p for %RGp (%s)\n",
784 pRemoved, pRange, GCPhys, pRange->pszDesc));
785 }
786}
787
788
789/**
790 * Notfication from PGM that the pre-registered MMIO region has been mapped into
791 * user address space.
792 *
793 * @param pVM Pointer to the cross context VM structure.
794 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.
795 * @remarks Called while owning the PGM lock.
796 */
797VMMR3_INT_DECL(void) IOMR3MmioExNotifyDeregistered(PVM pVM, void *pvUser)
798{
799 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
800 AssertLogRelReturnVoid(pRange->GCPhys == NIL_RTGCPHYS);
801 iomMmioReleaseRange(pVM, pRange);
802}
803
804
805/**
806 * Handles the unlikely and probably fatal merge cases.
807 *
808 * @returns Merged status code.
809 * @param rcStrict Current EM status code.
810 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
811 * with @a rcStrict.
812 * @param rcIom For logging purposes only.
813 * @param pVCpu The cross context virtual CPU structure of the
814 * calling EMT. For logging purposes.
815 */
816DECL_NO_INLINE(static, VBOXSTRICTRC) iomR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
817 int rcIom, PVMCPU pVCpu)
818{
819 if (RT_FAILURE_NP(rcStrict))
820 return rcStrict;
821
822 if (RT_FAILURE_NP(rcStrictCommit))
823 return rcStrictCommit;
824
825 if (rcStrict == rcStrictCommit)
826 return rcStrictCommit;
827
828 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc IOPort={%#06x<-%#xx/%u} MMIO={%RGp<-%.*Rhxs} (rcIom=%Rrc)\n",
829 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict),
830 pVCpu->iom.s.PendingIOPortWrite.IOPort,
831 pVCpu->iom.s.PendingIOPortWrite.u32Value, pVCpu->iom.s.PendingIOPortWrite.cbValue,
832 pVCpu->iom.s.PendingMmioWrite.GCPhys,
833 pVCpu->iom.s.PendingMmioWrite.cbValue, &pVCpu->iom.s.PendingMmioWrite.abValue[0], rcIom));
834 return VERR_IOM_FF_STATUS_IPE;
835}
836
837
838/**
839 * Helper for IOMR3ProcessForceFlag.
840 *
841 * @returns Merged status code.
842 * @param rcStrict Current EM status code.
843 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
844 * with @a rcStrict.
845 * @param rcIom Either VINF_IOM_R3_IOPORT_COMMIT_WRITE or
846 * VINF_IOM_R3_MMIO_COMMIT_WRITE.
847 * @param pVCpu The cross context virtual CPU structure of the
848 * calling EMT.
849 */
850DECLINLINE(VBOXSTRICTRC) iomR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, int rcIom, PVMCPU pVCpu)
851{
852 /* Simple. */
853 if (RT_LIKELY(rcStrict == rcIom || rcStrict == VINF_EM_RAW_TO_R3 || rcStrict == VINF_SUCCESS))
854 return rcStrictCommit;
855
856 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
857 return rcStrict;
858
859 /* EM scheduling status codes. */
860 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
861 && rcStrict <= VINF_EM_LAST))
862 {
863 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
864 && rcStrictCommit <= VINF_EM_LAST))
865 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
866 }
867
868 /* Unlikely */
869 return iomR3MergeStatusSlow(rcStrict, rcStrictCommit, rcIom, pVCpu);
870}
871
872
873/**
874 * Called by force-flag handling code when VMCPU_FF_IOM is set.
875 *
876 * @returns Merge between @a rcStrict and what the commit operation returned.
877 * @param pVM The cross context VM structure.
878 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
879 * @param rcStrict The status code returned by ring-0 or raw-mode.
880 * @thread EMT(pVCpu)
881 *
882 * @remarks The VMCPU_FF_IOM flag is handled before the status codes by EM, so
883 * we're very likely to see @a rcStrict set to
884 * VINF_IOM_R3_IOPORT_COMMIT_WRITE and VINF_IOM_R3_MMIO_COMMIT_WRITE
885 * here.
886 */
887VMMR3_INT_DECL(VBOXSTRICTRC) IOMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
888{
889 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IOM);
890 Assert(pVCpu->iom.s.PendingIOPortWrite.cbValue || pVCpu->iom.s.PendingMmioWrite.cbValue);
891
892 if (pVCpu->iom.s.PendingIOPortWrite.cbValue)
893 {
894 Log5(("IOM: Dispatching pending I/O port write: %#x LB %u -> %RTiop\n", pVCpu->iom.s.PendingIOPortWrite.u32Value,
895 pVCpu->iom.s.PendingIOPortWrite.cbValue, pVCpu->iom.s.PendingIOPortWrite.IOPort));
896 STAM_COUNTER_INC(&pVM->iom.s.StatIoPortCommits);
897 VBOXSTRICTRC rcStrictCommit = IOMIOPortWrite(pVM, pVCpu, pVCpu->iom.s.PendingIOPortWrite.IOPort,
898 pVCpu->iom.s.PendingIOPortWrite.u32Value,
899 pVCpu->iom.s.PendingIOPortWrite.cbValue);
900 pVCpu->iom.s.PendingIOPortWrite.cbValue = 0;
901 rcStrict = iomR3MergeStatus(rcStrict, rcStrictCommit, VINF_IOM_R3_IOPORT_COMMIT_WRITE, pVCpu);
902 }
903
904
905 if (pVCpu->iom.s.PendingMmioWrite.cbValue)
906 {
907 Log5(("IOM: Dispatching pending MMIO write: %RGp LB %#x\n",
908 pVCpu->iom.s.PendingMmioWrite.GCPhys, pVCpu->iom.s.PendingMmioWrite.cbValue));
909
910 /* Use new MMIO handle hint and bypass PGM if it still looks right. */
911 size_t idxMmioRegionHint = pVCpu->iom.s.PendingMmioWrite.idxMmioRegionHint;
912 if (idxMmioRegionHint < pVM->iom.s.cMmioRegs)
913 {
914 PIOMMMIOENTRYR3 pRegEntry = &pVM->iom.s.paMmioRegs[idxMmioRegionHint];
915 RTGCPHYS const GCPhysMapping = pRegEntry->GCPhysMapping;
916 RTGCPHYS const offRegion = pVCpu->iom.s.PendingMmioWrite.GCPhys - GCPhysMapping;
917 if (offRegion < pRegEntry->cbRegion && GCPhysMapping != NIL_RTGCPHYS)
918 {
919 STAM_COUNTER_INC(&pVM->iom.s.StatMmioCommitsDirect);
920 VBOXSTRICTRC rcStrictCommit = iomR3MmioCommitWorker(pVM, pVCpu, pRegEntry, offRegion);
921 pVCpu->iom.s.PendingMmioWrite.cbValue = 0;
922 return iomR3MergeStatus(rcStrict, rcStrictCommit, VINF_IOM_R3_MMIO_COMMIT_WRITE, pVCpu);
923 }
924 }
925
926 /* Fall back on PGM. */
927 STAM_COUNTER_INC(&pVM->iom.s.StatMmioCommitsPgm);
928 VBOXSTRICTRC rcStrictCommit = PGMPhysWrite(pVM, pVCpu->iom.s.PendingMmioWrite.GCPhys,
929 pVCpu->iom.s.PendingMmioWrite.abValue, pVCpu->iom.s.PendingMmioWrite.cbValue,
930 PGMACCESSORIGIN_IOM);
931 pVCpu->iom.s.PendingMmioWrite.cbValue = 0;
932 rcStrict = iomR3MergeStatus(rcStrict, rcStrictCommit, VINF_IOM_R3_MMIO_COMMIT_WRITE, pVCpu);
933 }
934
935 return rcStrict;
936}
937
938
939/**
940 * Notification from DBGF that the number of active I/O port or MMIO
941 * breakpoints has change.
942 *
943 * For performance reasons, IOM will only call DBGF before doing I/O and MMIO
944 * accesses where there are armed breakpoints.
945 *
946 * @param pVM The cross context VM structure.
947 * @param fPortIo True if there are armed I/O port breakpoints.
948 * @param fMmio True if there are armed MMIO breakpoints.
949 */
950VMMR3_INT_DECL(void) IOMR3NotifyBreakpointCountChange(PVM pVM, bool fPortIo, bool fMmio)
951{
952 /** @todo I/O breakpoints. */
953 RT_NOREF3(pVM, fPortIo, fMmio);
954}
955
956
957/**
958 * Notification from DBGF that an event has been enabled or disabled.
959 *
960 * For performance reasons, IOM may cache the state of events it implements.
961 *
962 * @param pVM The cross context VM structure.
963 * @param enmEvent The event.
964 * @param fEnabled The new state.
965 */
966VMMR3_INT_DECL(void) IOMR3NotifyDebugEventChange(PVM pVM, DBGFEVENT enmEvent, bool fEnabled)
967{
968 /** @todo IOM debug events. */
969 RT_NOREF3(pVM, enmEvent, fEnabled);
970}
971
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette