VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 81333

Last change on this file since 81333 was 81333, checked in by vboxsync, 5 years ago

IOM: More MMIO stuff, almost there now... bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 37.8 KB
Line 
1/* $Id: IOMAllMMIO.cpp 81333 2019-10-17 23:49:39Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vmcc.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49
50#ifndef IN_RING3
51/**
52 * Defers a pending MMIO write to ring-3.
53 *
54 * @returns VINF_IOM_R3_MMIO_COMMIT_WRITE
55 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
56 * @param GCPhys The write address.
57 * @param pvBuf The bytes being written.
58 * @param cbBuf How many bytes.
59 * @param pRange The range, if resolved.
60 */
61static VBOXSTRICTRC iomMmioRing3WritePending(PVMCPU pVCpu, RTGCPHYS GCPhys, void const *pvBuf, size_t cbBuf, PIOMMMIORANGE pRange)
62{
63 Log5(("iomMmioRing3WritePending: %RGp LB %#x\n", GCPhys, cbBuf));
64 if (pVCpu->iom.s.PendingMmioWrite.cbValue == 0)
65 {
66 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys;
67 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
68 pVCpu->iom.s.PendingMmioWrite.cbValue = (uint32_t)cbBuf;
69 pVCpu->iom.s.PendingMmioWrite.idxMmioRegionHint = UINT32_MAX;
70 memcpy(pVCpu->iom.s.PendingMmioWrite.abValue, pvBuf, cbBuf);
71 }
72 else
73 {
74 /*
75 * Join with pending if adjecent.
76 *
77 * This may happen if the stack overflows into MMIO territory and RSP/ESP/SP
78 * isn't aligned. IEM will bounce buffer the access and do one write for each
79 * page. We get here when the 2nd page part is written.
80 */
81 uint32_t const cbOldValue = pVCpu->iom.s.PendingMmioWrite.cbValue;
82 AssertMsgReturn(GCPhys == pVCpu->iom.s.PendingMmioWrite.GCPhys + cbOldValue,
83 ("pending %RGp LB %#x; incoming %RGp LB %#x\n",
84 pVCpu->iom.s.PendingMmioWrite.GCPhys, cbOldValue, GCPhys, cbBuf),
85 VERR_IOM_MMIO_IPE_1);
86 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue) - cbOldValue, VERR_IOM_MMIO_IPE_2);
87 pVCpu->iom.s.PendingMmioWrite.cbValue = cbOldValue + (uint32_t)cbBuf;
88 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[cbOldValue], pvBuf, cbBuf);
89 }
90
91 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
92 RT_NOREF_PV(pRange);
93 return VINF_IOM_R3_MMIO_COMMIT_WRITE;
94}
95#endif
96
97
98/**
99 * Deals with complicated MMIO writes.
100 *
101 * Complicated means unaligned or non-dword/qword sized accesses depending on
102 * the MMIO region's access mode flags.
103 *
104 * @returns Strict VBox status code. Any EM scheduling status code,
105 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
106 * VINF_IOM_R3_MMIO_READ may be returned.
107 *
108 * @param pVM The cross context VM structure.
109 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
110 * @param pRange The range to write to.
111 * @param GCPhys The physical address to start writing.
112 * @param pvValue Where to store the value.
113 * @param cbValue The size of the value to write.
114 */
115static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
116 void const *pvValue, unsigned cbValue)
117{
118 RT_NOREF_PV(pVCpu);
119 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
120 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
121 VERR_IOM_MMIO_IPE_1);
122 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
123 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
124 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
125 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
126
127 /*
128 * Do debug stop if requested.
129 */
130 int rc = VINF_SUCCESS; NOREF(pVM);
131#ifdef VBOX_STRICT
132 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
133 {
134# ifdef IN_RING3
135 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
136 R3STRING(pRange->pszDesc)));
137 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
138 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
139 if (rc == VERR_DBGF_NOT_ATTACHED)
140 rc = VINF_SUCCESS;
141# else
142 return VINF_IOM_R3_MMIO_WRITE;
143# endif
144 }
145#endif
146
147 /*
148 * Check if we should ignore the write.
149 */
150 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
151 {
152 Assert(cbValue != 4 || (GCPhys & 3));
153 return VINF_SUCCESS;
154 }
155 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
156 {
157 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
158 return VINF_SUCCESS;
159 }
160
161 /*
162 * Split and conquer.
163 */
164 for (;;)
165 {
166 unsigned const offAccess = GCPhys & 3;
167 unsigned cbThisPart = 4 - offAccess;
168 if (cbThisPart > cbValue)
169 cbThisPart = cbValue;
170
171 /*
172 * Get the missing bits (if any).
173 */
174 uint32_t u32MissingValue = 0;
175 if (fReadMissing && cbThisPart != 4)
176 {
177 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
178 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
179 switch (rc2)
180 {
181 case VINF_SUCCESS:
182 break;
183 case VINF_IOM_MMIO_UNUSED_FF:
184 u32MissingValue = UINT32_C(0xffffffff);
185 break;
186 case VINF_IOM_MMIO_UNUSED_00:
187 u32MissingValue = 0;
188 break;
189#ifndef IN_RING3
190 case VINF_IOM_R3_MMIO_READ:
191 case VINF_IOM_R3_MMIO_READ_WRITE:
192 case VINF_IOM_R3_MMIO_WRITE:
193 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
194 rc2 = VBOXSTRICTRC_TODO(iomMmioRing3WritePending(pVCpu, GCPhys, pvValue, cbValue, pRange));
195 if (rc == VINF_SUCCESS || rc2 < rc)
196 rc = rc2;
197 return rc;
198#endif
199 default:
200 if (RT_FAILURE(rc2))
201 {
202 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
203 return rc2;
204 }
205 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
206 if (rc == VINF_SUCCESS || rc2 < rc)
207 rc = rc2;
208 break;
209 }
210 }
211
212 /*
213 * Merge missing and given bits.
214 */
215 uint32_t u32GivenMask;
216 uint32_t u32GivenValue;
217 switch (cbThisPart)
218 {
219 case 1:
220 u32GivenValue = *(uint8_t const *)pvValue;
221 u32GivenMask = UINT32_C(0x000000ff);
222 break;
223 case 2:
224 u32GivenValue = *(uint16_t const *)pvValue;
225 u32GivenMask = UINT32_C(0x0000ffff);
226 break;
227 case 3:
228 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
229 ((uint8_t const *)pvValue)[2], 0);
230 u32GivenMask = UINT32_C(0x00ffffff);
231 break;
232 case 4:
233 u32GivenValue = *(uint32_t const *)pvValue;
234 u32GivenMask = UINT32_C(0xffffffff);
235 break;
236 default:
237 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
238 }
239 if (offAccess)
240 {
241 u32GivenValue <<= offAccess * 8;
242 u32GivenMask <<= offAccess * 8;
243 }
244
245 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
246 | (u32GivenValue & u32GivenMask);
247
248 /*
249 * Do DWORD write to the device.
250 */
251 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
252 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
253 switch (rc2)
254 {
255 case VINF_SUCCESS:
256 break;
257#ifndef IN_RING3
258 case VINF_IOM_R3_MMIO_READ:
259 case VINF_IOM_R3_MMIO_READ_WRITE:
260 case VINF_IOM_R3_MMIO_WRITE:
261 Log3(("iomMMIODoComplicatedWrite: deferring GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
262 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
263 AssertReturn(cbValue + (GCPhys & 3) <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
264 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys & ~(RTGCPHYS)3;
265 pVCpu->iom.s.PendingMmioWrite.cbValue = cbValue + (GCPhys & 3);
266 *(uint32_t *)pVCpu->iom.s.PendingMmioWrite.abValue = u32Value;
267 if (cbValue > cbThisPart)
268 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[4],
269 (uint8_t const *)pvValue + cbThisPart, cbValue - cbThisPart);
270 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
271 if (rc == VINF_SUCCESS)
272 rc = VINF_IOM_R3_MMIO_COMMIT_WRITE;
273 return rc;
274#endif
275 default:
276 if (RT_FAILURE(rc2))
277 {
278 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
279 return rc2;
280 }
281 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
282 if (rc == VINF_SUCCESS || rc2 < rc)
283 rc = rc2;
284 break;
285 }
286
287 /*
288 * Advance.
289 */
290 cbValue -= cbThisPart;
291 if (!cbValue)
292 break;
293 GCPhys += cbThisPart;
294 pvValue = (uint8_t const *)pvValue + cbThisPart;
295 }
296
297 return rc;
298}
299
300
301
302
303/**
304 * Wrapper which does the write and updates range statistics when such are enabled.
305 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
306 */
307static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
308 const void *pvData, unsigned cb)
309{
310#ifdef VBOX_WITH_STATISTICS
311 int rcSem = IOM_LOCK_SHARED(pVM);
312 if (rcSem == VERR_SEM_BUSY)
313 return VINF_IOM_R3_MMIO_WRITE;
314 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
315 if (!pStats)
316# ifdef IN_RING3
317 return VERR_NO_MEMORY;
318# else
319 return VINF_IOM_R3_MMIO_WRITE;
320# endif
321 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
322#else
323 NOREF(pVCpu);
324#endif
325
326 VBOXSTRICTRC rcStrict;
327 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
328 {
329 if ( (cb == 4 && !(GCPhysFault & 3))
330 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
331 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
332 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
333 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
334 else
335 rcStrict = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhysFault, pvData, cb);
336 }
337 else
338 rcStrict = VINF_SUCCESS;
339
340 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
341 STAM_COUNTER_INC(&pStats->Accesses);
342 return rcStrict;
343}
344
345
346/**
347 * Deals with complicated MMIO reads.
348 *
349 * Complicated means unaligned or non-dword/qword sized accesses depending on
350 * the MMIO region's access mode flags.
351 *
352 * @returns Strict VBox status code. Any EM scheduling status code,
353 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
354 * VINF_IOM_R3_MMIO_WRITE may be returned.
355 *
356 * @param pVM The cross context VM structure.
357 * @param pRange The range to read from.
358 * @param GCPhys The physical address to start reading.
359 * @param pvValue Where to store the value.
360 * @param cbValue The size of the value to read.
361 */
362static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
363{
364 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
365 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
366 VERR_IOM_MMIO_IPE_1);
367 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
368 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
369
370 /*
371 * Do debug stop if requested.
372 */
373 int rc = VINF_SUCCESS; NOREF(pVM);
374#ifdef VBOX_STRICT
375 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
376 {
377# ifdef IN_RING3
378 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
379 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
380 if (rc == VERR_DBGF_NOT_ATTACHED)
381 rc = VINF_SUCCESS;
382# else
383 return VINF_IOM_R3_MMIO_READ;
384# endif
385 }
386#endif
387
388 /*
389 * Split and conquer.
390 */
391 for (;;)
392 {
393 /*
394 * Do DWORD read from the device.
395 */
396 uint32_t u32Value;
397 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
398 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
399 switch (rc2)
400 {
401 case VINF_SUCCESS:
402 break;
403 case VINF_IOM_MMIO_UNUSED_FF:
404 u32Value = UINT32_C(0xffffffff);
405 break;
406 case VINF_IOM_MMIO_UNUSED_00:
407 u32Value = 0;
408 break;
409 case VINF_IOM_R3_MMIO_READ:
410 case VINF_IOM_R3_MMIO_READ_WRITE:
411 case VINF_IOM_R3_MMIO_WRITE:
412 /** @todo What if we've split a transfer and already read
413 * something? Since reads can have sideeffects we could be
414 * kind of screwed here... */
415 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
416 return rc2;
417 default:
418 if (RT_FAILURE(rc2))
419 {
420 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
421 return rc2;
422 }
423 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
424 if (rc == VINF_SUCCESS || rc2 < rc)
425 rc = rc2;
426 break;
427 }
428 u32Value >>= (GCPhys & 3) * 8;
429
430 /*
431 * Write what we've read.
432 */
433 unsigned cbThisPart = 4 - (GCPhys & 3);
434 if (cbThisPart > cbValue)
435 cbThisPart = cbValue;
436
437 switch (cbThisPart)
438 {
439 case 1:
440 *(uint8_t *)pvValue = (uint8_t)u32Value;
441 break;
442 case 2:
443 *(uint16_t *)pvValue = (uint16_t)u32Value;
444 break;
445 case 3:
446 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
447 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
448 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
449 break;
450 case 4:
451 *(uint32_t *)pvValue = u32Value;
452 break;
453 }
454
455 /*
456 * Advance.
457 */
458 cbValue -= cbThisPart;
459 if (!cbValue)
460 break;
461 GCPhys += cbThisPart;
462 pvValue = (uint8_t *)pvValue + cbThisPart;
463 }
464
465 return rc;
466}
467
468
469/**
470 * Implements VINF_IOM_MMIO_UNUSED_FF.
471 *
472 * @returns VINF_SUCCESS.
473 * @param pvValue Where to store the zeros.
474 * @param cbValue How many bytes to read.
475 */
476static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
477{
478 switch (cbValue)
479 {
480 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
481 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
482 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
483 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
484 default:
485 {
486 uint8_t *pb = (uint8_t *)pvValue;
487 while (cbValue--)
488 *pb++ = UINT8_C(0xff);
489 break;
490 }
491 }
492 return VINF_SUCCESS;
493}
494
495
496/**
497 * Implements VINF_IOM_MMIO_UNUSED_00.
498 *
499 * @returns VINF_SUCCESS.
500 * @param pvValue Where to store the zeros.
501 * @param cbValue How many bytes to read.
502 */
503static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
504{
505 switch (cbValue)
506 {
507 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
508 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
509 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
510 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
511 default:
512 {
513 uint8_t *pb = (uint8_t *)pvValue;
514 while (cbValue--)
515 *pb++ = UINT8_C(0x00);
516 break;
517 }
518 }
519 return VINF_SUCCESS;
520}
521
522
523/**
524 * Wrapper which does the read and updates range statistics when such are enabled.
525 */
526DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
527 void *pvValue, unsigned cbValue)
528{
529#ifdef VBOX_WITH_STATISTICS
530 int rcSem = IOM_LOCK_SHARED(pVM);
531 if (rcSem == VERR_SEM_BUSY)
532 return VINF_IOM_R3_MMIO_READ;
533 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
534 if (!pStats)
535# ifdef IN_RING3
536 return VERR_NO_MEMORY;
537# else
538 return VINF_IOM_R3_MMIO_READ;
539# endif
540 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
541#else
542 NOREF(pVCpu);
543#endif
544
545 VBOXSTRICTRC rcStrict;
546 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
547 {
548 if ( ( cbValue == 4
549 && !(GCPhys & 3))
550 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
551 || ( cbValue == 8
552 && !(GCPhys & 7)
553 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
554 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
555 pvValue, cbValue);
556 else
557 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
558 }
559 else
560 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
561 if (rcStrict != VINF_SUCCESS)
562 {
563 switch (VBOXSTRICTRC_VAL(rcStrict))
564 {
565 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
566 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
567 }
568 }
569
570 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
571 STAM_COUNTER_INC(&pStats->Accesses);
572 return rcStrict;
573}
574
575/**
576 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
577 *
578 * @returns VBox status code (appropriate for GC return).
579 * @param pVM The cross context VM structure.
580 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
581 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
582 * any error code (the EPT misconfig hack).
583 * @param pCtxCore Trap register frame.
584 * @param GCPhysFault The GC physical address corresponding to pvFault.
585 * @param pvUser Pointer to the MMIO ring-3 range entry.
586 */
587VBOXSTRICTRC iomMmioCommonPfHandlerOld(PVMCC pVM, PVMCPUCC pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
588 RTGCPHYS GCPhysFault, void *pvUser)
589{
590 RT_NOREF_PV(uErrorCode);
591 int rc = IOM_LOCK_SHARED(pVM);
592#ifndef IN_RING3
593 if (rc == VERR_SEM_BUSY)
594 return VINF_IOM_R3_MMIO_READ_WRITE;
595#endif
596 AssertRC(rc);
597
598 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
599 Log(("iomMmioCommonPfHandlerOld: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
600
601 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
602 Assert(pRange);
603 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
604 iomMmioRetainRange(pRange);
605#ifndef VBOX_WITH_STATISTICS
606 IOM_UNLOCK_SHARED(pVM);
607
608#else
609 /*
610 * Locate the statistics.
611 */
612 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
613 if (!pStats)
614 {
615 iomMmioReleaseRange(pVM, pRange);
616# ifdef IN_RING3
617 return VERR_NO_MEMORY;
618# else
619 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
620 return VINF_IOM_R3_MMIO_READ_WRITE;
621# endif
622 }
623#endif
624
625#ifndef IN_RING3
626 /*
627 * Should we defer the request right away? This isn't usually the case, so
628 * do the simple test first and the try deal with uErrorCode being N/A.
629 */
630 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
631 || !pRange->CTX_SUFF(pfnReadCallback))
632 && ( uErrorCode == UINT32_MAX
633 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
634 : uErrorCode & X86_TRAP_PF_RW
635 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
636 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
637 )
638 )
639 )
640 {
641 if (uErrorCode & X86_TRAP_PF_RW)
642 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
643 else
644 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
645
646 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
647 iomMmioReleaseRange(pVM, pRange);
648 return VINF_IOM_R3_MMIO_READ_WRITE;
649 }
650#endif /* !IN_RING3 */
651
652 /*
653 * Retain the range and do locking.
654 */
655 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
656 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
657 if (rc != VINF_SUCCESS)
658 {
659 iomMmioReleaseRange(pVM, pRange);
660 return rc;
661 }
662
663 /*
664 * Let IEM call us back via iomMmioHandler.
665 */
666 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
667
668 NOREF(pCtxCore); NOREF(GCPhysFault);
669 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
670 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
671 iomMmioReleaseRange(pVM, pRange);
672 if (RT_SUCCESS(rcStrict))
673 return rcStrict;
674 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
675 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
676 {
677 Log(("IOM: Hit unsupported IEM feature!\n"));
678 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
679 }
680 return rcStrict;
681}
682
683
684/**
685 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
686 * \#PF access handler callback for MMIO pages.}
687 *
688 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
689 */
690DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
691 RTGCPHYS GCPhysFault, void *pvUser)
692{
693 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
694 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
695 return iomMmioCommonPfHandlerOld(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
696}
697
698
699/**
700 * Physical access handler for MMIO ranges.
701 *
702 * @returns VBox status code (appropriate for GC return).
703 * @param pVM The cross context VM structure.
704 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
705 * @param uErrorCode CPU Error code.
706 * @param pCtxCore Trap register frame.
707 * @param GCPhysFault The GC physical address.
708 */
709VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
710{
711 /*
712 * We don't have a range here, so look it up before calling the common function.
713 */
714 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
715#ifndef IN_RING3
716 if (rc2 == VERR_SEM_BUSY)
717 return VINF_IOM_R3_MMIO_READ_WRITE;
718#endif
719 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
720 if (RT_UNLIKELY(!pRange))
721 {
722 IOM_UNLOCK_SHARED(pVM);
723 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
724 }
725 iomMmioRetainRange(pRange);
726 IOM_UNLOCK_SHARED(pVM);
727
728 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandlerOld(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
729
730 iomMmioReleaseRange(pVM, pRange);
731 return VBOXSTRICTRC_VAL(rcStrict);
732}
733
734
735/**
736 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
737 *
738 * @remarks The @a pvUser argument points to the MMIO range entry.
739 */
740PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
741 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
742{
743 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
744 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
745
746 NOREF(pvPhys); NOREF(enmOrigin);
747 AssertPtr(pRange);
748 AssertMsg(cbBuf >= 1, ("%zu\n", cbBuf));
749
750
751#ifndef IN_RING3
752 /*
753 * If someone is doing FXSAVE, FXRSTOR, XSAVE, XRSTOR or other stuff dealing with
754 * large amounts of data, just go to ring-3 where we don't need to deal with partial
755 * successes. No chance any of these will be problematic read-modify-write stuff.
756 */
757 if (cbBuf > sizeof(pVCpu->iom.s.PendingMmioWrite.abValue))
758 return enmAccessType == PGMACCESSTYPE_WRITE ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
759#endif
760
761 /*
762 * Validate the range.
763 */
764 int rc = IOM_LOCK_SHARED(pVM);
765#ifndef IN_RING3
766 if (rc == VERR_SEM_BUSY)
767 {
768 if (enmAccessType == PGMACCESSTYPE_READ)
769 return VINF_IOM_R3_MMIO_READ;
770 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
771 return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
772 }
773#endif
774 AssertRC(rc);
775 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
776
777 /*
778 * Perform locking.
779 */
780 iomMmioRetainRange(pRange);
781 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
782 IOM_UNLOCK_SHARED(pVM);
783#ifdef IN_RING3
784 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
785#else
786 VBOXSTRICTRC rcStrict = pDevIns ? PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE)
787 : VINF_IOM_R3_MMIO_READ_WRITE;
788#endif
789 if (rcStrict == VINF_SUCCESS)
790 {
791 /*
792 * Perform the access.
793 */
794 if (enmAccessType == PGMACCESSTYPE_READ)
795 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
796 else
797 {
798 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
799#ifndef IN_RING3
800 if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
801 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
802#endif
803 }
804
805 /* Check the return code. */
806#ifdef IN_RING3
807 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - Access type %d - %RGp - %s\n",
808 VBOXSTRICTRC_VAL(rcStrict), enmAccessType, GCPhysFault, pRange->pszDesc));
809#else
810 AssertMsg( rcStrict == VINF_SUCCESS
811 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
812 || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
813 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
814 || rcStrict == VINF_EM_DBG_STOP
815 || rcStrict == VINF_EM_DBG_EVENT
816 || rcStrict == VINF_EM_DBG_BREAKPOINT
817 || rcStrict == VINF_EM_OFF
818 || rcStrict == VINF_EM_SUSPEND
819 || rcStrict == VINF_EM_RESET
820 //|| rcStrict == VINF_EM_HALT /* ?? */
821 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
822 , ("%Rrc - Access type %d - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), enmAccessType, GCPhysFault, pDevIns));
823#endif
824
825 iomMmioReleaseRange(pVM, pRange);
826 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
827 }
828#ifdef IN_RING3
829 else
830 iomMmioReleaseRange(pVM, pRange);
831#else
832 else
833 {
834 if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
835 {
836 if (enmAccessType == PGMACCESSTYPE_READ)
837 rcStrict = VINF_IOM_R3_MMIO_READ;
838 else
839 {
840 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
841 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
842 }
843 }
844 iomMmioReleaseRange(pVM, pRange);
845 }
846#endif
847 return rcStrict;
848}
849
850
851/**
852 * Mapping an MMIO2 page in place of an MMIO page for direct access.
853 *
854 * (This is a special optimization used by the VGA device.)
855 *
856 * @returns VBox status code. This API may return VINF_SUCCESS even if no
857 * remapping is made,.
858 *
859 * @param pVM The cross context VM structure.
860 * @param GCPhys The address of the MMIO page to be changed.
861 * @param GCPhysRemapped The address of the MMIO2 page.
862 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
863 * for the time being.
864 */
865VMMDECL(int) IOMMMIOMapMMIO2Page(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
866{
867 /* Currently only called from the VGA device during MMIO. */
868 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
869 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
870 PVMCPUCC pVCpu = VMMGetCpu(pVM);
871
872 /* This currently only works in real mode, protected mode without paging or with nested paging. */
873 /** @todo NEM: MMIO page aliasing. */
874 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
875 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
876 && !HMIsNestedPagingActive(pVM)))
877 return VINF_SUCCESS; /* ignore */
878
879 int rc = IOM_LOCK_SHARED(pVM);
880 if (RT_FAILURE(rc))
881 return VINF_SUCCESS; /* better luck the next time around */
882
883 /*
884 * Lookup the context range node the page belongs to.
885 */
886 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
887 AssertMsgReturn(pRange,
888 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
889
890 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
891 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
892
893 /*
894 * Do the aliasing; page align the addresses since PGM is picky.
895 */
896 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
897 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
898
899 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
900
901 IOM_UNLOCK_SHARED(pVM);
902 AssertRCReturn(rc, rc);
903
904 /*
905 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
906 * can simply prefetch it.
907 *
908 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
909 */
910# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
911# ifdef VBOX_STRICT
912 uint64_t fFlags;
913 RTHCPHYS HCPhys;
914 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
915 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
916# endif
917# endif
918 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
919 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
920 return VINF_SUCCESS;
921}
922
923
924/**
925 * Mapping a HC page in place of an MMIO page for direct access.
926 *
927 * (This is a special optimization used by the APIC in the VT-x case.)
928 *
929 * @returns VBox status code.
930 *
931 * @param pVM The cross context VM structure.
932 * @param pVCpu The cross context virtual CPU structure.
933 * @param GCPhys The address of the MMIO page to be changed.
934 * @param HCPhys The address of the host physical page.
935 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
936 * for the time being.
937 */
938VMMDECL(int) IOMMMIOMapMMIOHCPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
939{
940 /* Currently only called from VT-x code during a page fault. */
941 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
942
943 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
944 /** @todo NEM: MMIO page aliasing. */
945 Assert(HMIsEnabled(pVM));
946
947 /*
948 * Lookup the context range node the page belongs to.
949 */
950# ifdef VBOX_STRICT
951 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
952 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
953 AssertMsgReturn(pRange,
954 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
955 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
956 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
957# endif
958
959 /*
960 * Do the aliasing; page align the addresses since PGM is picky.
961 */
962 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
963 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
964
965 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
966 AssertRCReturn(rc, rc);
967
968 /*
969 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
970 * can simply prefetch it.
971 *
972 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
973 */
974 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
975 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
976 return VINF_SUCCESS;
977}
978
979
980/**
981 * Reset a previously modified MMIO region; restore the access flags.
982 *
983 * @returns VBox status code.
984 *
985 * @param pVM The cross context VM structure.
986 * @param GCPhys Physical address that's part of the MMIO region to be reset.
987 */
988VMMDECL(int) IOMMMIOResetRegion(PVMCC pVM, RTGCPHYS GCPhys)
989{
990 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
991
992 PVMCPUCC pVCpu = VMMGetCpu(pVM);
993
994 /* This currently only works in real mode, protected mode without paging or with nested paging. */
995 /** @todo NEM: MMIO page aliasing. */
996 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
997 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
998 && !HMIsNestedPagingActive(pVM)))
999 return VINF_SUCCESS; /* ignore */
1000
1001 /*
1002 * Lookup the context range node the page belongs to.
1003 */
1004# ifdef VBOX_STRICT
1005 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1006 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1007 AssertMsgReturn(pRange,
1008 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1009 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1010 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1011# endif
1012
1013 /*
1014 * Call PGM to do the job work.
1015 *
1016 * After the call, all the pages should be non-present... unless there is
1017 * a page pool flush pending (unlikely).
1018 */
1019 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
1020 AssertRC(rc);
1021
1022# ifdef VBOX_STRICT
1023 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
1024 {
1025 uint32_t cb = pRange->cb;
1026 GCPhys = pRange->GCPhys;
1027 while (cb)
1028 {
1029 uint64_t fFlags;
1030 RTHCPHYS HCPhys;
1031 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1032 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1033 cb -= PAGE_SIZE;
1034 GCPhys += PAGE_SIZE;
1035 }
1036 }
1037# endif
1038 return rc;
1039}
1040
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette