VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 61555

Last change on this file since 61555 was 61371, checked in by vboxsync, 9 years ago

iomMmioHandler: Gracefully deal with large MMIO writes and read (FXSAVE and such) in ring-0 and raw-mode by deflecting them to ring-3. These things shouldn't normally happen.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 45.9 KB
Line 
1/* $Id: IOMAllMMIO.cpp 61371 2016-06-01 12:58:24Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49
50#ifndef IN_RING3
51/**
52 * Defers a pending MMIO write to ring-3.
53 *
54 * @returns VINF_IOM_R3_MMIO_COMMIT_WRITE
55 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
56 * @param GCPhys The write address.
57 * @param pvBuf The bytes being written.
58 * @param cbBuf How many bytes.
59 * @param pRange The range, if resolved.
60 */
61static VBOXSTRICTRC iomMmioRing3WritePending(PVMCPU pVCpu, RTGCPHYS GCPhys, void const *pvBuf, size_t cbBuf, PIOMMMIORANGE pRange)
62{
63 Log5(("iomMmioRing3WritePending: %RGp LB %#x\n", GCPhys, cbBuf));
64 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
65 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys;
66 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
67 pVCpu->iom.s.PendingMmioWrite.cbValue = (uint32_t)cbBuf;
68 memcpy(pVCpu->iom.s.PendingMmioWrite.abValue, pvBuf, cbBuf);
69 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
70 return VINF_IOM_R3_MMIO_COMMIT_WRITE;
71}
72#endif
73
74
75/**
76 * Deals with complicated MMIO writes.
77 *
78 * Complicated means unaligned or non-dword/qword sized accesses depending on
79 * the MMIO region's access mode flags.
80 *
81 * @returns Strict VBox status code. Any EM scheduling status code,
82 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
83 * VINF_IOM_R3_MMIO_READ may be returned.
84 *
85 * @param pVM The cross context VM structure.
86 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
87 * @param pRange The range to write to.
88 * @param GCPhys The physical address to start writing.
89 * @param pvValue Where to store the value.
90 * @param cbValue The size of the value to write.
91 */
92static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
93 void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
101 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
102
103 /*
104 * Do debug stop if requested.
105 */
106 int rc = VINF_SUCCESS; NOREF(pVM);
107#ifdef VBOX_STRICT
108 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
109 {
110# ifdef IN_RING3
111 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
112 R3STRING(pRange->pszDesc)));
113 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
114 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
115 if (rc == VERR_DBGF_NOT_ATTACHED)
116 rc = VINF_SUCCESS;
117# else
118 return VINF_IOM_R3_MMIO_WRITE;
119# endif
120 }
121#endif
122
123 /*
124 * Check if we should ignore the write.
125 */
126 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
127 {
128 Assert(cbValue != 4 || (GCPhys & 3));
129 return VINF_SUCCESS;
130 }
131 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
132 {
133 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
134 return VINF_SUCCESS;
135 }
136
137 /*
138 * Split and conquer.
139 */
140 for (;;)
141 {
142 unsigned const offAccess = GCPhys & 3;
143 unsigned cbThisPart = 4 - offAccess;
144 if (cbThisPart > cbValue)
145 cbThisPart = cbValue;
146
147 /*
148 * Get the missing bits (if any).
149 */
150 uint32_t u32MissingValue = 0;
151 if (fReadMissing && cbThisPart != 4)
152 {
153 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
154 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
155 switch (rc2)
156 {
157 case VINF_SUCCESS:
158 break;
159 case VINF_IOM_MMIO_UNUSED_FF:
160 u32MissingValue = UINT32_C(0xffffffff);
161 break;
162 case VINF_IOM_MMIO_UNUSED_00:
163 u32MissingValue = 0;
164 break;
165#ifndef IN_RING3
166 case VINF_IOM_R3_MMIO_READ:
167 case VINF_IOM_R3_MMIO_READ_WRITE:
168 case VINF_IOM_R3_MMIO_WRITE:
169 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
170 rc2 = VBOXSTRICTRC_TODO(iomMmioRing3WritePending(pVCpu, GCPhys, pvValue, cbValue, pRange));
171 if (rc == VINF_SUCCESS || rc2 < rc)
172 rc = rc2;
173 return rc;
174#endif
175 default:
176 if (RT_FAILURE(rc2))
177 {
178 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
179 return rc2;
180 }
181 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
182 if (rc == VINF_SUCCESS || rc2 < rc)
183 rc = rc2;
184 break;
185 }
186 }
187
188 /*
189 * Merge missing and given bits.
190 */
191 uint32_t u32GivenMask;
192 uint32_t u32GivenValue;
193 switch (cbThisPart)
194 {
195 case 1:
196 u32GivenValue = *(uint8_t const *)pvValue;
197 u32GivenMask = UINT32_C(0x000000ff);
198 break;
199 case 2:
200 u32GivenValue = *(uint16_t const *)pvValue;
201 u32GivenMask = UINT32_C(0x0000ffff);
202 break;
203 case 3:
204 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
205 ((uint8_t const *)pvValue)[2], 0);
206 u32GivenMask = UINT32_C(0x00ffffff);
207 break;
208 case 4:
209 u32GivenValue = *(uint32_t const *)pvValue;
210 u32GivenMask = UINT32_C(0xffffffff);
211 break;
212 default:
213 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
214 }
215 if (offAccess)
216 {
217 u32GivenValue <<= offAccess * 8;
218 u32GivenMask <<= offAccess * 8;
219 }
220
221 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
222 | (u32GivenValue & u32GivenMask);
223
224 /*
225 * Do DWORD write to the device.
226 */
227 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
228 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
229 switch (rc2)
230 {
231 case VINF_SUCCESS:
232 break;
233#ifndef IN_RING3
234 case VINF_IOM_R3_MMIO_READ:
235 case VINF_IOM_R3_MMIO_READ_WRITE:
236 case VINF_IOM_R3_MMIO_WRITE:
237 Log3(("iomMMIODoComplicatedWrite: deferring GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
238 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
239 AssertReturn(cbValue + (GCPhys & 3) <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
240 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys & ~(RTGCPHYS)3;
241 pVCpu->iom.s.PendingMmioWrite.cbValue = cbValue + (GCPhys & 3);
242 *(uint32_t *)pVCpu->iom.s.PendingMmioWrite.abValue = u32Value;
243 if (cbValue > cbThisPart)
244 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[4],
245 (uint8_t const *)pvValue + cbThisPart, cbValue - cbThisPart);
246 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
247 if (rc == VINF_SUCCESS)
248 rc = VINF_IOM_R3_MMIO_COMMIT_WRITE;
249 return rc2;
250#endif
251 default:
252 if (RT_FAILURE(rc2))
253 {
254 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
255 return rc2;
256 }
257 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
258 if (rc == VINF_SUCCESS || rc2 < rc)
259 rc = rc2;
260 break;
261 }
262
263 /*
264 * Advance.
265 */
266 cbValue -= cbThisPart;
267 if (!cbValue)
268 break;
269 GCPhys += cbThisPart;
270 pvValue = (uint8_t const *)pvValue + cbThisPart;
271 }
272
273 return rc;
274}
275
276
277
278
279/**
280 * Wrapper which does the write and updates range statistics when such are enabled.
281 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
282 */
283static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
284 const void *pvData, unsigned cb)
285{
286#ifdef VBOX_WITH_STATISTICS
287 int rcSem = IOM_LOCK_SHARED(pVM);
288 if (rcSem == VERR_SEM_BUSY)
289 return VINF_IOM_R3_MMIO_WRITE;
290 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
291 if (!pStats)
292# ifdef IN_RING3
293 return VERR_NO_MEMORY;
294# else
295 return VINF_IOM_R3_MMIO_WRITE;
296# endif
297 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
298#else
299 NOREF(pVCpu);
300#endif
301
302 VBOXSTRICTRC rcStrict;
303 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
304 {
305 if ( (cb == 4 && !(GCPhysFault & 3))
306 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
307 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
308 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
309 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
310 else
311 rcStrict = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhysFault, pvData, cb);
312 }
313 else
314 rcStrict = VINF_SUCCESS;
315
316 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
317 STAM_COUNTER_INC(&pStats->Accesses);
318 return rcStrict;
319}
320
321
322/**
323 * Deals with complicated MMIO reads.
324 *
325 * Complicated means unaligned or non-dword/qword sized accesses depending on
326 * the MMIO region's access mode flags.
327 *
328 * @returns Strict VBox status code. Any EM scheduling status code,
329 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
330 * VINF_IOM_R3_MMIO_WRITE may be returned.
331 *
332 * @param pVM The cross context VM structure.
333 * @param pRange The range to read from.
334 * @param GCPhys The physical address to start reading.
335 * @param pvValue Where to store the value.
336 * @param cbValue The size of the value to read.
337 */
338static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
339{
340 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
341 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
342 VERR_IOM_MMIO_IPE_1);
343 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
344 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
345
346 /*
347 * Do debug stop if requested.
348 */
349 int rc = VINF_SUCCESS; NOREF(pVM);
350#ifdef VBOX_STRICT
351 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
352 {
353# ifdef IN_RING3
354 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
355 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
356 if (rc == VERR_DBGF_NOT_ATTACHED)
357 rc = VINF_SUCCESS;
358# else
359 return VINF_IOM_R3_MMIO_READ;
360# endif
361 }
362#endif
363
364 /*
365 * Split and conquer.
366 */
367 for (;;)
368 {
369 /*
370 * Do DWORD read from the device.
371 */
372 uint32_t u32Value;
373 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
374 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
375 switch (rc2)
376 {
377 case VINF_SUCCESS:
378 break;
379 case VINF_IOM_MMIO_UNUSED_FF:
380 u32Value = UINT32_C(0xffffffff);
381 break;
382 case VINF_IOM_MMIO_UNUSED_00:
383 u32Value = 0;
384 break;
385 case VINF_IOM_R3_MMIO_READ:
386 case VINF_IOM_R3_MMIO_READ_WRITE:
387 case VINF_IOM_R3_MMIO_WRITE:
388 /** @todo What if we've split a transfer and already read
389 * something? Since reads can have sideeffects we could be
390 * kind of screwed here... */
391 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
392 return rc2;
393 default:
394 if (RT_FAILURE(rc2))
395 {
396 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
397 return rc2;
398 }
399 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
400 if (rc == VINF_SUCCESS || rc2 < rc)
401 rc = rc2;
402 break;
403 }
404 u32Value >>= (GCPhys & 3) * 8;
405
406 /*
407 * Write what we've read.
408 */
409 unsigned cbThisPart = 4 - (GCPhys & 3);
410 if (cbThisPart > cbValue)
411 cbThisPart = cbValue;
412
413 switch (cbThisPart)
414 {
415 case 1:
416 *(uint8_t *)pvValue = (uint8_t)u32Value;
417 break;
418 case 2:
419 *(uint16_t *)pvValue = (uint16_t)u32Value;
420 break;
421 case 3:
422 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
423 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
424 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
425 break;
426 case 4:
427 *(uint32_t *)pvValue = u32Value;
428 break;
429 }
430
431 /*
432 * Advance.
433 */
434 cbValue -= cbThisPart;
435 if (!cbValue)
436 break;
437 GCPhys += cbThisPart;
438 pvValue = (uint8_t *)pvValue + cbThisPart;
439 }
440
441 return rc;
442}
443
444
445/**
446 * Implements VINF_IOM_MMIO_UNUSED_FF.
447 *
448 * @returns VINF_SUCCESS.
449 * @param pvValue Where to store the zeros.
450 * @param cbValue How many bytes to read.
451 */
452static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
453{
454 switch (cbValue)
455 {
456 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
457 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
458 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
459 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
460 default:
461 {
462 uint8_t *pb = (uint8_t *)pvValue;
463 while (cbValue--)
464 *pb++ = UINT8_C(0xff);
465 break;
466 }
467 }
468 return VINF_SUCCESS;
469}
470
471
472/**
473 * Implements VINF_IOM_MMIO_UNUSED_00.
474 *
475 * @returns VINF_SUCCESS.
476 * @param pvValue Where to store the zeros.
477 * @param cbValue How many bytes to read.
478 */
479static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
480{
481 switch (cbValue)
482 {
483 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
484 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
485 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
486 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
487 default:
488 {
489 uint8_t *pb = (uint8_t *)pvValue;
490 while (cbValue--)
491 *pb++ = UINT8_C(0x00);
492 break;
493 }
494 }
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Wrapper which does the read and updates range statistics when such are enabled.
501 */
502DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
503 void *pvValue, unsigned cbValue)
504{
505#ifdef VBOX_WITH_STATISTICS
506 int rcSem = IOM_LOCK_SHARED(pVM);
507 if (rcSem == VERR_SEM_BUSY)
508 return VINF_IOM_R3_MMIO_READ;
509 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
510 if (!pStats)
511# ifdef IN_RING3
512 return VERR_NO_MEMORY;
513# else
514 return VINF_IOM_R3_MMIO_READ;
515# endif
516 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
517#else
518 NOREF(pVCpu);
519#endif
520
521 VBOXSTRICTRC rcStrict;
522 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
523 {
524 if ( ( cbValue == 4
525 && !(GCPhys & 3))
526 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
527 || ( cbValue == 8
528 && !(GCPhys & 7)
529 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
530 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
531 pvValue, cbValue);
532 else
533 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
534 }
535 else
536 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
537 if (rcStrict != VINF_SUCCESS)
538 {
539 switch (VBOXSTRICTRC_VAL(rcStrict))
540 {
541 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
542 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
543 }
544 }
545
546 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
547 STAM_COUNTER_INC(&pStats->Accesses);
548 return rcStrict;
549}
550
551
552/**
553 * Internal - statistics only.
554 */
555DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
556{
557#ifdef VBOX_WITH_STATISTICS
558 switch (cb)
559 {
560 case 1:
561 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
562 break;
563 case 2:
564 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
565 break;
566 case 4:
567 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
568 break;
569 case 8:
570 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
571 break;
572 default:
573 /* No way. */
574 AssertMsgFailed(("Invalid data length %d\n", cb));
575 break;
576 }
577#else
578 NOREF(pVM); NOREF(cb);
579#endif
580}
581
582
583
584/**
585 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
586 *
587 * @returns VBox status code (appropriate for GC return).
588 * @param pVM The cross context VM structure.
589 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
590 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
591 * any error code (the EPT misconfig hack).
592 * @param pCtxCore Trap register frame.
593 * @param GCPhysFault The GC physical address corresponding to pvFault.
594 * @param pvUser Pointer to the MMIO ring-3 range entry.
595 */
596static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
597 RTGCPHYS GCPhysFault, void *pvUser)
598{
599 int rc = IOM_LOCK_SHARED(pVM);
600#ifndef IN_RING3
601 if (rc == VERR_SEM_BUSY)
602 return VINF_IOM_R3_MMIO_READ_WRITE;
603#endif
604 AssertRC(rc);
605
606 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
607 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
608
609 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
610 Assert(pRange);
611 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
612 iomMmioRetainRange(pRange);
613#ifndef VBOX_WITH_STATISTICS
614 IOM_UNLOCK_SHARED(pVM);
615
616#else
617 /*
618 * Locate the statistics.
619 */
620 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
621 if (!pStats)
622 {
623 iomMmioReleaseRange(pVM, pRange);
624# ifdef IN_RING3
625 return VERR_NO_MEMORY;
626# else
627 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
628 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
629 return VINF_IOM_R3_MMIO_READ_WRITE;
630# endif
631 }
632#endif
633
634#ifndef IN_RING3
635 /*
636 * Should we defer the request right away? This isn't usually the case, so
637 * do the simple test first and the try deal with uErrorCode being N/A.
638 */
639 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
640 || !pRange->CTX_SUFF(pfnReadCallback))
641 && ( uErrorCode == UINT32_MAX
642 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
643 : uErrorCode & X86_TRAP_PF_RW
644 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
645 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
646 )
647 )
648 )
649 {
650 if (uErrorCode & X86_TRAP_PF_RW)
651 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
652 else
653 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
654
655 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
656 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
657 iomMmioReleaseRange(pVM, pRange);
658 return VINF_IOM_R3_MMIO_READ_WRITE;
659 }
660#endif /* !IN_RING3 */
661
662 /*
663 * Retain the range and do locking.
664 */
665 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
666 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
667 if (rc != VINF_SUCCESS)
668 {
669 iomMmioReleaseRange(pVM, pRange);
670 return rc;
671 }
672
673 /*
674 * Let IEM call us back via iomMmioHandler.
675 */
676 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
677
678 NOREF(pCtxCore); NOREF(GCPhysFault);
679 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
680 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
681 iomMmioReleaseRange(pVM, pRange);
682 if (RT_SUCCESS(rcStrict))
683 return rcStrict;
684 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
685 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
686 {
687 Log(("IOM: Hit unsupported IEM feature!\n"));
688 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
689 }
690 return rcStrict;
691}
692
693
694/**
695 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
696 * \#PF access handler callback for MMIO pages.}
697 *
698 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
699 */
700DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
701 RTGCPHYS GCPhysFault, void *pvUser)
702{
703 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
704 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
705 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
706}
707
708
709/**
710 * Physical access handler for MMIO ranges.
711 *
712 * @returns VBox status code (appropriate for GC return).
713 * @param pVM The cross context VM structure.
714 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
715 * @param uErrorCode CPU Error code.
716 * @param pCtxCore Trap register frame.
717 * @param GCPhysFault The GC physical address.
718 */
719VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
720{
721 /*
722 * We don't have a range here, so look it up before calling the common function.
723 */
724 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
725#ifndef IN_RING3
726 if (rc2 == VERR_SEM_BUSY)
727 return VINF_IOM_R3_MMIO_READ_WRITE;
728#endif
729 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
730 if (RT_UNLIKELY(!pRange))
731 {
732 IOM_UNLOCK_SHARED(pVM);
733 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
734 }
735 iomMmioRetainRange(pRange);
736 IOM_UNLOCK_SHARED(pVM);
737
738 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
739
740 iomMmioReleaseRange(pVM, pRange);
741 return VBOXSTRICTRC_VAL(rcStrict);
742}
743
744
745/**
746 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
747 *
748 * @remarks The @a pvUser argument points to the MMIO range entry.
749 */
750PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
751 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
752{
753 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
754 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
755
756 NOREF(pvPhys); NOREF(enmOrigin);
757 AssertPtr(pRange);
758 AssertMsg(cbBuf >= 1, ("%zu\n", cbBuf));
759
760
761#ifndef IN_RING3
762 /*
763 * If someone is doing FXSAVE, FXRSTOR, XSAVE, XRSTOR or other stuff dealing with
764 * large amounts of data, just go to ring-3 where we don't need to deal with partial
765 * successes. No chance any of these will be problematic read-modify-write stuff.
766 */
767 if (cbBuf > sizeof(pVCpu->iom.s.PendingMmioWrite.abValue))
768 return enmAccessType == PGMACCESSTYPE_WRITE ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
769#endif
770
771 /*
772 * Validate the range.
773 */
774 int rc = IOM_LOCK_SHARED(pVM);
775#ifndef IN_RING3
776 if (rc == VERR_SEM_BUSY)
777 {
778 if (enmAccessType == PGMACCESSTYPE_READ)
779 return VINF_IOM_R3_MMIO_READ;
780 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
781 return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
782 }
783#endif
784 AssertRC(rc);
785 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
786
787 /*
788 * Perform locking.
789 */
790 iomMmioRetainRange(pRange);
791 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
792 IOM_UNLOCK_SHARED(pVM);
793 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
794 if (rcStrict == VINF_SUCCESS)
795 {
796 /*
797 * Perform the access.
798 */
799 if (enmAccessType == PGMACCESSTYPE_READ)
800 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
801 else
802 {
803 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
804#ifndef IN_RING3
805 if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
806 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
807#endif
808 }
809
810 /* Check the return code. */
811#ifdef IN_RING3
812 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
813#else
814 AssertMsg( rcStrict == VINF_SUCCESS
815 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
816 || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
817 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
818 || rcStrict == VINF_EM_DBG_STOP
819 || rcStrict == VINF_EM_DBG_EVENT
820 || rcStrict == VINF_EM_DBG_BREAKPOINT
821 || rcStrict == VINF_EM_OFF
822 || rcStrict == VINF_EM_SUSPEND
823 || rcStrict == VINF_EM_RESET
824 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
825 //|| rcStrict == VINF_EM_HALT /* ?? */
826 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
827 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
828#endif
829
830 iomMmioReleaseRange(pVM, pRange);
831 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
832 }
833#ifdef IN_RING3
834 else
835 iomMmioReleaseRange(pVM, pRange);
836#else
837 else
838 {
839 if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
840 {
841 if (enmAccessType == PGMACCESSTYPE_READ)
842 rcStrict = VINF_IOM_R3_MMIO_READ;
843 else
844 {
845 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
846 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
847 }
848 }
849 iomMmioReleaseRange(pVM, pRange);
850 }
851#endif
852 return rcStrict;
853}
854
855
856#ifdef IN_RING3 /* Only used by REM. */
857
858/**
859 * Reads a MMIO register.
860 *
861 * @returns VBox status code.
862 *
863 * @param pVM The cross context VM structure.
864 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
865 * @param GCPhys The physical address to read.
866 * @param pu32Value Where to store the value read.
867 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
868 */
869VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
870{
871 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
872 /* Take the IOM lock before performing any MMIO. */
873 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
874#ifndef IN_RING3
875 if (rc == VERR_SEM_BUSY)
876 return VINF_IOM_R3_MMIO_WRITE;
877#endif
878 AssertRC(VBOXSTRICTRC_VAL(rc));
879#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
880 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
881#endif
882
883 /*
884 * Lookup the current context range node and statistics.
885 */
886 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
887 if (!pRange)
888 {
889 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
890 IOM_UNLOCK_SHARED(pVM);
891 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
892 }
893 iomMmioRetainRange(pRange);
894#ifndef VBOX_WITH_STATISTICS
895 IOM_UNLOCK_SHARED(pVM);
896
897#else /* VBOX_WITH_STATISTICS */
898 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
899 if (!pStats)
900 {
901 iomMmioReleaseRange(pVM, pRange);
902# ifdef IN_RING3
903 return VERR_NO_MEMORY;
904# else
905 return VINF_IOM_R3_MMIO_READ;
906# endif
907 }
908 STAM_COUNTER_INC(&pStats->Accesses);
909#endif /* VBOX_WITH_STATISTICS */
910
911 if (pRange->CTX_SUFF(pfnReadCallback))
912 {
913 /*
914 * Perform locking.
915 */
916 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
917 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
918 if (rc != VINF_SUCCESS)
919 {
920 iomMmioReleaseRange(pVM, pRange);
921 return rc;
922 }
923
924 /*
925 * Perform the read and deal with the result.
926 */
927 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
928 if ( (cbValue == 4 && !(GCPhys & 3))
929 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
930 || (cbValue == 8 && !(GCPhys & 7)) )
931 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
932 pu32Value, (unsigned)cbValue);
933 else
934 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
935 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
936 switch (VBOXSTRICTRC_VAL(rc))
937 {
938 case VINF_SUCCESS:
939 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
940 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
941 iomMmioReleaseRange(pVM, pRange);
942 return rc;
943#ifndef IN_RING3
944 case VINF_IOM_R3_MMIO_READ:
945 case VINF_IOM_R3_MMIO_READ_WRITE:
946 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
947#endif
948 default:
949 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
950 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
951 iomMmioReleaseRange(pVM, pRange);
952 return rc;
953
954 case VINF_IOM_MMIO_UNUSED_00:
955 iomMMIODoRead00s(pu32Value, cbValue);
956 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
957 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
958 iomMmioReleaseRange(pVM, pRange);
959 return VINF_SUCCESS;
960
961 case VINF_IOM_MMIO_UNUSED_FF:
962 iomMMIODoReadFFs(pu32Value, cbValue);
963 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
964 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
965 iomMmioReleaseRange(pVM, pRange);
966 return VINF_SUCCESS;
967 }
968 /* not reached */
969 }
970#ifndef IN_RING3
971 if (pRange->pfnReadCallbackR3)
972 {
973 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
974 iomMmioReleaseRange(pVM, pRange);
975 return VINF_IOM_R3_MMIO_READ;
976 }
977#endif
978
979 /*
980 * Unassigned memory - this is actually not supposed t happen...
981 */
982 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
983 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
984 iomMMIODoReadFFs(pu32Value, cbValue);
985 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
986 iomMmioReleaseRange(pVM, pRange);
987 return VINF_SUCCESS;
988}
989
990
991/**
992 * Writes to a MMIO register.
993 *
994 * @returns VBox status code.
995 *
996 * @param pVM The cross context VM structure.
997 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
998 * @param GCPhys The physical address to write to.
999 * @param u32Value The value to write.
1000 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1001 */
1002VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1003{
1004 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
1005 /* Take the IOM lock before performing any MMIO. */
1006 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1007#ifndef IN_RING3
1008 if (rc == VERR_SEM_BUSY)
1009 return VINF_IOM_R3_MMIO_WRITE;
1010#endif
1011 AssertRC(VBOXSTRICTRC_VAL(rc));
1012#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1013 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1014#endif
1015
1016 /*
1017 * Lookup the current context range node.
1018 */
1019 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1020 if (!pRange)
1021 {
1022 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1023 IOM_UNLOCK_SHARED(pVM);
1024 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1025 }
1026 iomMmioRetainRange(pRange);
1027#ifndef VBOX_WITH_STATISTICS
1028 IOM_UNLOCK_SHARED(pVM);
1029
1030#else /* VBOX_WITH_STATISTICS */
1031 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1032 if (!pStats)
1033 {
1034 iomMmioReleaseRange(pVM, pRange);
1035# ifdef IN_RING3
1036 return VERR_NO_MEMORY;
1037# else
1038 return VINF_IOM_R3_MMIO_WRITE;
1039# endif
1040 }
1041 STAM_COUNTER_INC(&pStats->Accesses);
1042#endif /* VBOX_WITH_STATISTICS */
1043
1044 if (pRange->CTX_SUFF(pfnWriteCallback))
1045 {
1046 /*
1047 * Perform locking.
1048 */
1049 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1050 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1051 if (rc != VINF_SUCCESS)
1052 {
1053 iomMmioReleaseRange(pVM, pRange);
1054 return rc;
1055 }
1056
1057 /*
1058 * Perform the write.
1059 */
1060 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1061 if ( (cbValue == 4 && !(GCPhys & 3))
1062 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1063 || (cbValue == 8 && !(GCPhys & 7)) )
1064 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1065 GCPhys, &u32Value, (unsigned)cbValue);
1066 else
1067 rc = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhys, &u32Value, (unsigned)cbValue);
1068 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1069#ifndef IN_RING3
1070 if ( rc == VINF_IOM_R3_MMIO_WRITE
1071 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
1072 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1073#endif
1074 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1075 iomMmioReleaseRange(pVM, pRange);
1076 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1077 return rc;
1078 }
1079#ifndef IN_RING3
1080 if (pRange->pfnWriteCallbackR3)
1081 {
1082 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1083 iomMmioReleaseRange(pVM, pRange);
1084 return VINF_IOM_R3_MMIO_WRITE;
1085 }
1086#endif
1087
1088 /*
1089 * No write handler, nothing to do.
1090 */
1091 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1092 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1093 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1094 iomMmioReleaseRange(pVM, pRange);
1095 return VINF_SUCCESS;
1096}
1097
1098#endif /* IN_RING3 - only used by REM. */
1099#ifndef IN_RC
1100
1101/**
1102 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1103 *
1104 * (This is a special optimization used by the VGA device.)
1105 *
1106 * @returns VBox status code. This API may return VINF_SUCCESS even if no
1107 * remapping is made,.
1108 *
1109 * @param pVM The cross context VM structure.
1110 * @param GCPhys The address of the MMIO page to be changed.
1111 * @param GCPhysRemapped The address of the MMIO2 page.
1112 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1113 * for the time being.
1114 */
1115VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1116{
1117# ifndef IEM_VERIFICATION_MODE_FULL
1118 /* Currently only called from the VGA device during MMIO. */
1119 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1120 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1121 PVMCPU pVCpu = VMMGetCpu(pVM);
1122
1123 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1124 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1125 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1126 && !HMIsNestedPagingActive(pVM)))
1127 return VINF_SUCCESS; /* ignore */
1128
1129 int rc = IOM_LOCK_SHARED(pVM);
1130 if (RT_FAILURE(rc))
1131 return VINF_SUCCESS; /* better luck the next time around */
1132
1133 /*
1134 * Lookup the context range node the page belongs to.
1135 */
1136 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1137 AssertMsgReturn(pRange,
1138 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1139
1140 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1141 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1142
1143 /*
1144 * Do the aliasing; page align the addresses since PGM is picky.
1145 */
1146 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1147 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1148
1149 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1150
1151 IOM_UNLOCK_SHARED(pVM);
1152 AssertRCReturn(rc, rc);
1153
1154 /*
1155 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1156 * can simply prefetch it.
1157 *
1158 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1159 */
1160# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1161# ifdef VBOX_STRICT
1162 uint64_t fFlags;
1163 RTHCPHYS HCPhys;
1164 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1165 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1166# endif
1167# endif
1168 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1169 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1170# endif /* !IEM_VERIFICATION_MODE_FULL */
1171 return VINF_SUCCESS;
1172}
1173
1174
1175# ifndef IEM_VERIFICATION_MODE_FULL
1176/**
1177 * Mapping a HC page in place of an MMIO page for direct access.
1178 *
1179 * (This is a special optimization used by the APIC in the VT-x case.)
1180 *
1181 * @returns VBox status code.
1182 *
1183 * @param pVM The cross context VM structure.
1184 * @param pVCpu The cross context virtual CPU structure.
1185 * @param GCPhys The address of the MMIO page to be changed.
1186 * @param HCPhys The address of the host physical page.
1187 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1188 * for the time being.
1189 */
1190VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1191{
1192 /* Currently only called from VT-x code during a page fault. */
1193 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1194
1195 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1196 Assert(HMIsEnabled(pVM));
1197
1198 /*
1199 * Lookup the context range node the page belongs to.
1200 */
1201# ifdef VBOX_STRICT
1202 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1203 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1204 AssertMsgReturn(pRange,
1205 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1206 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1207 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1208# endif
1209
1210 /*
1211 * Do the aliasing; page align the addresses since PGM is picky.
1212 */
1213 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1214 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
1215
1216 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
1217 AssertRCReturn(rc, rc);
1218
1219 /*
1220 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1221 * can simply prefetch it.
1222 *
1223 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1224 */
1225 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1226 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1227 return VINF_SUCCESS;
1228}
1229# endif /* !IEM_VERIFICATION_MODE_FULL */
1230
1231
1232/**
1233 * Reset a previously modified MMIO region; restore the access flags.
1234 *
1235 * @returns VBox status code.
1236 *
1237 * @param pVM The cross context VM structure.
1238 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1239 */
1240VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1241{
1242 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1243
1244 PVMCPU pVCpu = VMMGetCpu(pVM);
1245
1246 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1247 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1248 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1249 && !HMIsNestedPagingActive(pVM)))
1250 return VINF_SUCCESS; /* ignore */
1251
1252 /*
1253 * Lookup the context range node the page belongs to.
1254 */
1255# ifdef VBOX_STRICT
1256 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1257 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1258 AssertMsgReturn(pRange,
1259 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1260 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1261 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1262# endif
1263
1264 /*
1265 * Call PGM to do the job work.
1266 *
1267 * After the call, all the pages should be non-present... unless there is
1268 * a page pool flush pending (unlikely).
1269 */
1270 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
1271 AssertRC(rc);
1272
1273# ifdef VBOX_STRICT
1274 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
1275 {
1276 uint32_t cb = pRange->cb;
1277 GCPhys = pRange->GCPhys;
1278 while (cb)
1279 {
1280 uint64_t fFlags;
1281 RTHCPHYS HCPhys;
1282 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1283 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1284 cb -= PAGE_SIZE;
1285 GCPhys += PAGE_SIZE;
1286 }
1287 }
1288# endif
1289 return rc;
1290}
1291
1292#endif /* !IN_RC */
1293
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette