VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 72306

Last change on this file since 72306 was 72248, checked in by vboxsync, 7 years ago

iomMmioRing3WritePending: Deal with fun IEM cross MMIO page scenario.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.7 KB
Line 
1/* $Id: IOMAllMMIO.cpp 72248 2018-05-17 17:32:22Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49
50#ifndef IN_RING3
51/**
52 * Defers a pending MMIO write to ring-3.
53 *
54 * @returns VINF_IOM_R3_MMIO_COMMIT_WRITE
55 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
56 * @param GCPhys The write address.
57 * @param pvBuf The bytes being written.
58 * @param cbBuf How many bytes.
59 * @param pRange The range, if resolved.
60 */
61static VBOXSTRICTRC iomMmioRing3WritePending(PVMCPU pVCpu, RTGCPHYS GCPhys, void const *pvBuf, size_t cbBuf, PIOMMMIORANGE pRange)
62{
63 Log5(("iomMmioRing3WritePending: %RGp LB %#x\n", GCPhys, cbBuf));
64 if (pVCpu->iom.s.PendingMmioWrite.cbValue == 0)
65 {
66 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys;
67 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
68 pVCpu->iom.s.PendingMmioWrite.cbValue = (uint32_t)cbBuf;
69 memcpy(pVCpu->iom.s.PendingMmioWrite.abValue, pvBuf, cbBuf);
70 }
71 else
72 {
73 /*
74 * Join with pending if adjecent.
75 *
76 * This may happen if the stack overflows into MMIO territory and RSP/ESP/SP
77 * isn't aligned. IEM will bounce buffer the access and do one write for each
78 * page. We get here when the 2nd page part is written.
79 */
80 uint32_t const cbOldValue = pVCpu->iom.s.PendingMmioWrite.cbValue;
81 AssertMsgReturn(GCPhys == pVCpu->iom.s.PendingMmioWrite.GCPhys + cbOldValue,
82 ("pending %RGp LB %#x; incoming %RGp LB %#x\n",
83 pVCpu->iom.s.PendingMmioWrite.GCPhys, cbOldValue, GCPhys, cbBuf),
84 VERR_IOM_MMIO_IPE_1);
85 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue) - cbOldValue, VERR_IOM_MMIO_IPE_2);
86 pVCpu->iom.s.PendingMmioWrite.cbValue = cbOldValue + (uint32_t)cbBuf;
87 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[cbOldValue], pvBuf, cbBuf);
88 }
89
90 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
91 RT_NOREF_PV(pRange);
92 return VINF_IOM_R3_MMIO_COMMIT_WRITE;
93}
94#endif
95
96
97/**
98 * Deals with complicated MMIO writes.
99 *
100 * Complicated means unaligned or non-dword/qword sized accesses depending on
101 * the MMIO region's access mode flags.
102 *
103 * @returns Strict VBox status code. Any EM scheduling status code,
104 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
105 * VINF_IOM_R3_MMIO_READ may be returned.
106 *
107 * @param pVM The cross context VM structure.
108 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
109 * @param pRange The range to write to.
110 * @param GCPhys The physical address to start writing.
111 * @param pvValue Where to store the value.
112 * @param cbValue The size of the value to write.
113 */
114static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
115 void const *pvValue, unsigned cbValue)
116{
117 RT_NOREF_PV(pVCpu);
118 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
119 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
120 VERR_IOM_MMIO_IPE_1);
121 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
122 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
123 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
124 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
125
126 /*
127 * Do debug stop if requested.
128 */
129 int rc = VINF_SUCCESS; NOREF(pVM);
130#ifdef VBOX_STRICT
131 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
132 {
133# ifdef IN_RING3
134 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
135 R3STRING(pRange->pszDesc)));
136 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
137 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
138 if (rc == VERR_DBGF_NOT_ATTACHED)
139 rc = VINF_SUCCESS;
140# else
141 return VINF_IOM_R3_MMIO_WRITE;
142# endif
143 }
144#endif
145
146 /*
147 * Check if we should ignore the write.
148 */
149 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
150 {
151 Assert(cbValue != 4 || (GCPhys & 3));
152 return VINF_SUCCESS;
153 }
154 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
155 {
156 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
157 return VINF_SUCCESS;
158 }
159
160 /*
161 * Split and conquer.
162 */
163 for (;;)
164 {
165 unsigned const offAccess = GCPhys & 3;
166 unsigned cbThisPart = 4 - offAccess;
167 if (cbThisPart > cbValue)
168 cbThisPart = cbValue;
169
170 /*
171 * Get the missing bits (if any).
172 */
173 uint32_t u32MissingValue = 0;
174 if (fReadMissing && cbThisPart != 4)
175 {
176 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
177 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
178 switch (rc2)
179 {
180 case VINF_SUCCESS:
181 break;
182 case VINF_IOM_MMIO_UNUSED_FF:
183 u32MissingValue = UINT32_C(0xffffffff);
184 break;
185 case VINF_IOM_MMIO_UNUSED_00:
186 u32MissingValue = 0;
187 break;
188#ifndef IN_RING3
189 case VINF_IOM_R3_MMIO_READ:
190 case VINF_IOM_R3_MMIO_READ_WRITE:
191 case VINF_IOM_R3_MMIO_WRITE:
192 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
193 rc2 = VBOXSTRICTRC_TODO(iomMmioRing3WritePending(pVCpu, GCPhys, pvValue, cbValue, pRange));
194 if (rc == VINF_SUCCESS || rc2 < rc)
195 rc = rc2;
196 return rc;
197#endif
198 default:
199 if (RT_FAILURE(rc2))
200 {
201 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
202 return rc2;
203 }
204 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
205 if (rc == VINF_SUCCESS || rc2 < rc)
206 rc = rc2;
207 break;
208 }
209 }
210
211 /*
212 * Merge missing and given bits.
213 */
214 uint32_t u32GivenMask;
215 uint32_t u32GivenValue;
216 switch (cbThisPart)
217 {
218 case 1:
219 u32GivenValue = *(uint8_t const *)pvValue;
220 u32GivenMask = UINT32_C(0x000000ff);
221 break;
222 case 2:
223 u32GivenValue = *(uint16_t const *)pvValue;
224 u32GivenMask = UINT32_C(0x0000ffff);
225 break;
226 case 3:
227 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
228 ((uint8_t const *)pvValue)[2], 0);
229 u32GivenMask = UINT32_C(0x00ffffff);
230 break;
231 case 4:
232 u32GivenValue = *(uint32_t const *)pvValue;
233 u32GivenMask = UINT32_C(0xffffffff);
234 break;
235 default:
236 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
237 }
238 if (offAccess)
239 {
240 u32GivenValue <<= offAccess * 8;
241 u32GivenMask <<= offAccess * 8;
242 }
243
244 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
245 | (u32GivenValue & u32GivenMask);
246
247 /*
248 * Do DWORD write to the device.
249 */
250 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
251 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
252 switch (rc2)
253 {
254 case VINF_SUCCESS:
255 break;
256#ifndef IN_RING3
257 case VINF_IOM_R3_MMIO_READ:
258 case VINF_IOM_R3_MMIO_READ_WRITE:
259 case VINF_IOM_R3_MMIO_WRITE:
260 Log3(("iomMMIODoComplicatedWrite: deferring GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
261 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
262 AssertReturn(cbValue + (GCPhys & 3) <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
263 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys & ~(RTGCPHYS)3;
264 pVCpu->iom.s.PendingMmioWrite.cbValue = cbValue + (GCPhys & 3);
265 *(uint32_t *)pVCpu->iom.s.PendingMmioWrite.abValue = u32Value;
266 if (cbValue > cbThisPart)
267 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[4],
268 (uint8_t const *)pvValue + cbThisPart, cbValue - cbThisPart);
269 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
270 if (rc == VINF_SUCCESS)
271 rc = VINF_IOM_R3_MMIO_COMMIT_WRITE;
272 return rc2;
273#endif
274 default:
275 if (RT_FAILURE(rc2))
276 {
277 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
278 return rc2;
279 }
280 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
281 if (rc == VINF_SUCCESS || rc2 < rc)
282 rc = rc2;
283 break;
284 }
285
286 /*
287 * Advance.
288 */
289 cbValue -= cbThisPart;
290 if (!cbValue)
291 break;
292 GCPhys += cbThisPart;
293 pvValue = (uint8_t const *)pvValue + cbThisPart;
294 }
295
296 return rc;
297}
298
299
300
301
302/**
303 * Wrapper which does the write and updates range statistics when such are enabled.
304 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
305 */
306static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
307 const void *pvData, unsigned cb)
308{
309#ifdef VBOX_WITH_STATISTICS
310 int rcSem = IOM_LOCK_SHARED(pVM);
311 if (rcSem == VERR_SEM_BUSY)
312 return VINF_IOM_R3_MMIO_WRITE;
313 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
314 if (!pStats)
315# ifdef IN_RING3
316 return VERR_NO_MEMORY;
317# else
318 return VINF_IOM_R3_MMIO_WRITE;
319# endif
320 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
321#else
322 NOREF(pVCpu);
323#endif
324
325 VBOXSTRICTRC rcStrict;
326 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
327 {
328 if ( (cb == 4 && !(GCPhysFault & 3))
329 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
330 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
331 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
332 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
333 else
334 rcStrict = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhysFault, pvData, cb);
335 }
336 else
337 rcStrict = VINF_SUCCESS;
338
339 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
340 STAM_COUNTER_INC(&pStats->Accesses);
341 return rcStrict;
342}
343
344
345/**
346 * Deals with complicated MMIO reads.
347 *
348 * Complicated means unaligned or non-dword/qword sized accesses depending on
349 * the MMIO region's access mode flags.
350 *
351 * @returns Strict VBox status code. Any EM scheduling status code,
352 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
353 * VINF_IOM_R3_MMIO_WRITE may be returned.
354 *
355 * @param pVM The cross context VM structure.
356 * @param pRange The range to read from.
357 * @param GCPhys The physical address to start reading.
358 * @param pvValue Where to store the value.
359 * @param cbValue The size of the value to read.
360 */
361static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
362{
363 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
364 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
365 VERR_IOM_MMIO_IPE_1);
366 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
367 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
368
369 /*
370 * Do debug stop if requested.
371 */
372 int rc = VINF_SUCCESS; NOREF(pVM);
373#ifdef VBOX_STRICT
374 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
375 {
376# ifdef IN_RING3
377 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
378 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
379 if (rc == VERR_DBGF_NOT_ATTACHED)
380 rc = VINF_SUCCESS;
381# else
382 return VINF_IOM_R3_MMIO_READ;
383# endif
384 }
385#endif
386
387 /*
388 * Split and conquer.
389 */
390 for (;;)
391 {
392 /*
393 * Do DWORD read from the device.
394 */
395 uint32_t u32Value;
396 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
397 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
398 switch (rc2)
399 {
400 case VINF_SUCCESS:
401 break;
402 case VINF_IOM_MMIO_UNUSED_FF:
403 u32Value = UINT32_C(0xffffffff);
404 break;
405 case VINF_IOM_MMIO_UNUSED_00:
406 u32Value = 0;
407 break;
408 case VINF_IOM_R3_MMIO_READ:
409 case VINF_IOM_R3_MMIO_READ_WRITE:
410 case VINF_IOM_R3_MMIO_WRITE:
411 /** @todo What if we've split a transfer and already read
412 * something? Since reads can have sideeffects we could be
413 * kind of screwed here... */
414 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
415 return rc2;
416 default:
417 if (RT_FAILURE(rc2))
418 {
419 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
420 return rc2;
421 }
422 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
423 if (rc == VINF_SUCCESS || rc2 < rc)
424 rc = rc2;
425 break;
426 }
427 u32Value >>= (GCPhys & 3) * 8;
428
429 /*
430 * Write what we've read.
431 */
432 unsigned cbThisPart = 4 - (GCPhys & 3);
433 if (cbThisPart > cbValue)
434 cbThisPart = cbValue;
435
436 switch (cbThisPart)
437 {
438 case 1:
439 *(uint8_t *)pvValue = (uint8_t)u32Value;
440 break;
441 case 2:
442 *(uint16_t *)pvValue = (uint16_t)u32Value;
443 break;
444 case 3:
445 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
446 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
447 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
448 break;
449 case 4:
450 *(uint32_t *)pvValue = u32Value;
451 break;
452 }
453
454 /*
455 * Advance.
456 */
457 cbValue -= cbThisPart;
458 if (!cbValue)
459 break;
460 GCPhys += cbThisPart;
461 pvValue = (uint8_t *)pvValue + cbThisPart;
462 }
463
464 return rc;
465}
466
467
468/**
469 * Implements VINF_IOM_MMIO_UNUSED_FF.
470 *
471 * @returns VINF_SUCCESS.
472 * @param pvValue Where to store the zeros.
473 * @param cbValue How many bytes to read.
474 */
475static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
476{
477 switch (cbValue)
478 {
479 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
480 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
481 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
482 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
483 default:
484 {
485 uint8_t *pb = (uint8_t *)pvValue;
486 while (cbValue--)
487 *pb++ = UINT8_C(0xff);
488 break;
489 }
490 }
491 return VINF_SUCCESS;
492}
493
494
495/**
496 * Implements VINF_IOM_MMIO_UNUSED_00.
497 *
498 * @returns VINF_SUCCESS.
499 * @param pvValue Where to store the zeros.
500 * @param cbValue How many bytes to read.
501 */
502static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
503{
504 switch (cbValue)
505 {
506 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
507 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
508 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
509 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
510 default:
511 {
512 uint8_t *pb = (uint8_t *)pvValue;
513 while (cbValue--)
514 *pb++ = UINT8_C(0x00);
515 break;
516 }
517 }
518 return VINF_SUCCESS;
519}
520
521
522/**
523 * Wrapper which does the read and updates range statistics when such are enabled.
524 */
525DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
526 void *pvValue, unsigned cbValue)
527{
528#ifdef VBOX_WITH_STATISTICS
529 int rcSem = IOM_LOCK_SHARED(pVM);
530 if (rcSem == VERR_SEM_BUSY)
531 return VINF_IOM_R3_MMIO_READ;
532 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
533 if (!pStats)
534# ifdef IN_RING3
535 return VERR_NO_MEMORY;
536# else
537 return VINF_IOM_R3_MMIO_READ;
538# endif
539 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
540#else
541 NOREF(pVCpu);
542#endif
543
544 VBOXSTRICTRC rcStrict;
545 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
546 {
547 if ( ( cbValue == 4
548 && !(GCPhys & 3))
549 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
550 || ( cbValue == 8
551 && !(GCPhys & 7)
552 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
553 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
554 pvValue, cbValue);
555 else
556 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
557 }
558 else
559 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
560 if (rcStrict != VINF_SUCCESS)
561 {
562 switch (VBOXSTRICTRC_VAL(rcStrict))
563 {
564 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
565 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
566 }
567 }
568
569 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
570 STAM_COUNTER_INC(&pStats->Accesses);
571 return rcStrict;
572}
573
574/**
575 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
576 *
577 * @returns VBox status code (appropriate for GC return).
578 * @param pVM The cross context VM structure.
579 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
580 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
581 * any error code (the EPT misconfig hack).
582 * @param pCtxCore Trap register frame.
583 * @param GCPhysFault The GC physical address corresponding to pvFault.
584 * @param pvUser Pointer to the MMIO ring-3 range entry.
585 */
586static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
587 RTGCPHYS GCPhysFault, void *pvUser)
588{
589 RT_NOREF_PV(uErrorCode);
590 int rc = IOM_LOCK_SHARED(pVM);
591#ifndef IN_RING3
592 if (rc == VERR_SEM_BUSY)
593 return VINF_IOM_R3_MMIO_READ_WRITE;
594#endif
595 AssertRC(rc);
596
597 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
598 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
599
600 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
601 Assert(pRange);
602 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
603 iomMmioRetainRange(pRange);
604#ifndef VBOX_WITH_STATISTICS
605 IOM_UNLOCK_SHARED(pVM);
606
607#else
608 /*
609 * Locate the statistics.
610 */
611 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
612 if (!pStats)
613 {
614 iomMmioReleaseRange(pVM, pRange);
615# ifdef IN_RING3
616 return VERR_NO_MEMORY;
617# else
618 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
619 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
620 return VINF_IOM_R3_MMIO_READ_WRITE;
621# endif
622 }
623#endif
624
625#ifndef IN_RING3
626 /*
627 * Should we defer the request right away? This isn't usually the case, so
628 * do the simple test first and the try deal with uErrorCode being N/A.
629 */
630 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
631 || !pRange->CTX_SUFF(pfnReadCallback))
632 && ( uErrorCode == UINT32_MAX
633 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
634 : uErrorCode & X86_TRAP_PF_RW
635 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
636 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
637 )
638 )
639 )
640 {
641 if (uErrorCode & X86_TRAP_PF_RW)
642 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
643 else
644 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
645
646 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
647 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
648 iomMmioReleaseRange(pVM, pRange);
649 return VINF_IOM_R3_MMIO_READ_WRITE;
650 }
651#endif /* !IN_RING3 */
652
653 /*
654 * Retain the range and do locking.
655 */
656 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
657 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
658 if (rc != VINF_SUCCESS)
659 {
660 iomMmioReleaseRange(pVM, pRange);
661 return rc;
662 }
663
664 /*
665 * Let IEM call us back via iomMmioHandler.
666 */
667 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
668
669 NOREF(pCtxCore); NOREF(GCPhysFault);
670 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
671 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
672 iomMmioReleaseRange(pVM, pRange);
673 if (RT_SUCCESS(rcStrict))
674 return rcStrict;
675 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
676 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
677 {
678 Log(("IOM: Hit unsupported IEM feature!\n"));
679 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
680 }
681 return rcStrict;
682}
683
684
685/**
686 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
687 * \#PF access handler callback for MMIO pages.}
688 *
689 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
690 */
691DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
692 RTGCPHYS GCPhysFault, void *pvUser)
693{
694 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
695 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
696 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
697}
698
699
700/**
701 * Physical access handler for MMIO ranges.
702 *
703 * @returns VBox status code (appropriate for GC return).
704 * @param pVM The cross context VM structure.
705 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
706 * @param uErrorCode CPU Error code.
707 * @param pCtxCore Trap register frame.
708 * @param GCPhysFault The GC physical address.
709 */
710VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
711{
712 /*
713 * We don't have a range here, so look it up before calling the common function.
714 */
715 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
716#ifndef IN_RING3
717 if (rc2 == VERR_SEM_BUSY)
718 return VINF_IOM_R3_MMIO_READ_WRITE;
719#endif
720 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
721 if (RT_UNLIKELY(!pRange))
722 {
723 IOM_UNLOCK_SHARED(pVM);
724 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
725 }
726 iomMmioRetainRange(pRange);
727 IOM_UNLOCK_SHARED(pVM);
728
729 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
730
731 iomMmioReleaseRange(pVM, pRange);
732 return VBOXSTRICTRC_VAL(rcStrict);
733}
734
735
736/**
737 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
738 *
739 * @remarks The @a pvUser argument points to the MMIO range entry.
740 */
741PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
742 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
743{
744 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
745 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
746
747 NOREF(pvPhys); NOREF(enmOrigin);
748 AssertPtr(pRange);
749 AssertMsg(cbBuf >= 1, ("%zu\n", cbBuf));
750
751
752#ifndef IN_RING3
753 /*
754 * If someone is doing FXSAVE, FXRSTOR, XSAVE, XRSTOR or other stuff dealing with
755 * large amounts of data, just go to ring-3 where we don't need to deal with partial
756 * successes. No chance any of these will be problematic read-modify-write stuff.
757 */
758 if (cbBuf > sizeof(pVCpu->iom.s.PendingMmioWrite.abValue))
759 return enmAccessType == PGMACCESSTYPE_WRITE ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
760#endif
761
762 /*
763 * Validate the range.
764 */
765 int rc = IOM_LOCK_SHARED(pVM);
766#ifndef IN_RING3
767 if (rc == VERR_SEM_BUSY)
768 {
769 if (enmAccessType == PGMACCESSTYPE_READ)
770 return VINF_IOM_R3_MMIO_READ;
771 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
772 return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
773 }
774#endif
775 AssertRC(rc);
776 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
777
778 /*
779 * Perform locking.
780 */
781 iomMmioRetainRange(pRange);
782 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
783 IOM_UNLOCK_SHARED(pVM);
784#ifdef IN_RING3
785 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
786#else
787 VBOXSTRICTRC rcStrict = pDevIns ? PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE)
788 : VINF_IOM_R3_MMIO_READ_WRITE;
789#endif
790 if (rcStrict == VINF_SUCCESS)
791 {
792 /*
793 * Perform the access.
794 */
795 if (enmAccessType == PGMACCESSTYPE_READ)
796 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
797 else
798 {
799 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
800#ifndef IN_RING3
801 if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
802 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
803#endif
804 }
805
806 /* Check the return code. */
807#ifdef IN_RING3
808 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
809#else
810 AssertMsg( rcStrict == VINF_SUCCESS
811 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
812 || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
813 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
814 || rcStrict == VINF_EM_DBG_STOP
815 || rcStrict == VINF_EM_DBG_EVENT
816 || rcStrict == VINF_EM_DBG_BREAKPOINT
817 || rcStrict == VINF_EM_OFF
818 || rcStrict == VINF_EM_SUSPEND
819 || rcStrict == VINF_EM_RESET
820 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
821 //|| rcStrict == VINF_EM_HALT /* ?? */
822 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
823 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
824#endif
825
826 iomMmioReleaseRange(pVM, pRange);
827 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
828 }
829#ifdef IN_RING3
830 else
831 iomMmioReleaseRange(pVM, pRange);
832#else
833 else
834 {
835 if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
836 {
837 if (enmAccessType == PGMACCESSTYPE_READ)
838 rcStrict = VINF_IOM_R3_MMIO_READ;
839 else
840 {
841 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
842 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
843 }
844 }
845 iomMmioReleaseRange(pVM, pRange);
846 }
847#endif
848 return rcStrict;
849}
850
851
852#ifdef IN_RING3 /* Only used by REM. */
853
854/**
855 * Reads a MMIO register.
856 *
857 * @returns VBox status code.
858 *
859 * @param pVM The cross context VM structure.
860 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
861 * @param GCPhys The physical address to read.
862 * @param pu32Value Where to store the value read.
863 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
864 */
865VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
866{
867 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
868 /* Take the IOM lock before performing any MMIO. */
869 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
870#ifndef IN_RING3
871 if (rc == VERR_SEM_BUSY)
872 return VINF_IOM_R3_MMIO_WRITE;
873#endif
874 AssertRC(VBOXSTRICTRC_VAL(rc));
875#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
876 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
877#endif
878
879 /*
880 * Lookup the current context range node and statistics.
881 */
882 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
883 if (!pRange)
884 {
885 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
886 IOM_UNLOCK_SHARED(pVM);
887 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
888 }
889 iomMmioRetainRange(pRange);
890#ifndef VBOX_WITH_STATISTICS
891 IOM_UNLOCK_SHARED(pVM);
892
893#else /* VBOX_WITH_STATISTICS */
894 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
895 if (!pStats)
896 {
897 iomMmioReleaseRange(pVM, pRange);
898# ifdef IN_RING3
899 return VERR_NO_MEMORY;
900# else
901 return VINF_IOM_R3_MMIO_READ;
902# endif
903 }
904 STAM_COUNTER_INC(&pStats->Accesses);
905#endif /* VBOX_WITH_STATISTICS */
906
907 if (pRange->CTX_SUFF(pfnReadCallback))
908 {
909 /*
910 * Perform locking.
911 */
912 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
913 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
914 if (rc != VINF_SUCCESS)
915 {
916 iomMmioReleaseRange(pVM, pRange);
917 return rc;
918 }
919
920 /*
921 * Perform the read and deal with the result.
922 */
923 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
924 if ( (cbValue == 4 && !(GCPhys & 3))
925 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
926 || (cbValue == 8 && !(GCPhys & 7)) )
927 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
928 pu32Value, (unsigned)cbValue);
929 else
930 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
931 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
932 switch (VBOXSTRICTRC_VAL(rc))
933 {
934 case VINF_SUCCESS:
935 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
936 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
937 iomMmioReleaseRange(pVM, pRange);
938 return rc;
939#ifndef IN_RING3
940 case VINF_IOM_R3_MMIO_READ:
941 case VINF_IOM_R3_MMIO_READ_WRITE:
942 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
943#endif
944 default:
945 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
946 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
947 iomMmioReleaseRange(pVM, pRange);
948 return rc;
949
950 case VINF_IOM_MMIO_UNUSED_00:
951 iomMMIODoRead00s(pu32Value, cbValue);
952 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
953 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
954 iomMmioReleaseRange(pVM, pRange);
955 return VINF_SUCCESS;
956
957 case VINF_IOM_MMIO_UNUSED_FF:
958 iomMMIODoReadFFs(pu32Value, cbValue);
959 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
960 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
961 iomMmioReleaseRange(pVM, pRange);
962 return VINF_SUCCESS;
963 }
964 /* not reached */
965 }
966#ifndef IN_RING3
967 if (pRange->pfnReadCallbackR3)
968 {
969 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
970 iomMmioReleaseRange(pVM, pRange);
971 return VINF_IOM_R3_MMIO_READ;
972 }
973#endif
974
975 /*
976 * Unassigned memory - this is actually not supposed t happen...
977 */
978 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
979 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
980 iomMMIODoReadFFs(pu32Value, cbValue);
981 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
982 iomMmioReleaseRange(pVM, pRange);
983 return VINF_SUCCESS;
984}
985
986
987/**
988 * Writes to a MMIO register.
989 *
990 * @returns VBox status code.
991 *
992 * @param pVM The cross context VM structure.
993 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
994 * @param GCPhys The physical address to write to.
995 * @param u32Value The value to write.
996 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
997 */
998VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
999{
1000 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
1001 /* Take the IOM lock before performing any MMIO. */
1002 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1003#ifndef IN_RING3
1004 if (rc == VERR_SEM_BUSY)
1005 return VINF_IOM_R3_MMIO_WRITE;
1006#endif
1007 AssertRC(VBOXSTRICTRC_VAL(rc));
1008#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1009 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1010#endif
1011
1012 /*
1013 * Lookup the current context range node.
1014 */
1015 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1016 if (!pRange)
1017 {
1018 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1019 IOM_UNLOCK_SHARED(pVM);
1020 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1021 }
1022 iomMmioRetainRange(pRange);
1023#ifndef VBOX_WITH_STATISTICS
1024 IOM_UNLOCK_SHARED(pVM);
1025
1026#else /* VBOX_WITH_STATISTICS */
1027 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1028 if (!pStats)
1029 {
1030 iomMmioReleaseRange(pVM, pRange);
1031# ifdef IN_RING3
1032 return VERR_NO_MEMORY;
1033# else
1034 return VINF_IOM_R3_MMIO_WRITE;
1035# endif
1036 }
1037 STAM_COUNTER_INC(&pStats->Accesses);
1038#endif /* VBOX_WITH_STATISTICS */
1039
1040 if (pRange->CTX_SUFF(pfnWriteCallback))
1041 {
1042 /*
1043 * Perform locking.
1044 */
1045 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1046 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1047 if (rc != VINF_SUCCESS)
1048 {
1049 iomMmioReleaseRange(pVM, pRange);
1050 return rc;
1051 }
1052
1053 /*
1054 * Perform the write.
1055 */
1056 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1057 if ( (cbValue == 4 && !(GCPhys & 3))
1058 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1059 || (cbValue == 8 && !(GCPhys & 7)) )
1060 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1061 GCPhys, &u32Value, (unsigned)cbValue);
1062 else
1063 rc = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhys, &u32Value, (unsigned)cbValue);
1064 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1065#ifndef IN_RING3
1066 if ( rc == VINF_IOM_R3_MMIO_WRITE
1067 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
1068 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1069#endif
1070 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1071 iomMmioReleaseRange(pVM, pRange);
1072 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1073 return rc;
1074 }
1075#ifndef IN_RING3
1076 if (pRange->pfnWriteCallbackR3)
1077 {
1078 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1079 iomMmioReleaseRange(pVM, pRange);
1080 return VINF_IOM_R3_MMIO_WRITE;
1081 }
1082#endif
1083
1084 /*
1085 * No write handler, nothing to do.
1086 */
1087 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1088 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1089 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1090 iomMmioReleaseRange(pVM, pRange);
1091 return VINF_SUCCESS;
1092}
1093
1094#endif /* IN_RING3 - only used by REM. */
1095#ifndef IN_RC
1096
1097/**
1098 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1099 *
1100 * (This is a special optimization used by the VGA device.)
1101 *
1102 * @returns VBox status code. This API may return VINF_SUCCESS even if no
1103 * remapping is made,.
1104 *
1105 * @param pVM The cross context VM structure.
1106 * @param GCPhys The address of the MMIO page to be changed.
1107 * @param GCPhysRemapped The address of the MMIO2 page.
1108 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1109 * for the time being.
1110 */
1111VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1112{
1113# ifndef IEM_VERIFICATION_MODE_FULL
1114 /* Currently only called from the VGA device during MMIO. */
1115 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1116 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1117 PVMCPU pVCpu = VMMGetCpu(pVM);
1118
1119 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1120 /** @todo NEM: MMIO page aliasing. */
1121 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1122 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1123 && !HMIsNestedPagingActive(pVM)))
1124 return VINF_SUCCESS; /* ignore */
1125
1126 int rc = IOM_LOCK_SHARED(pVM);
1127 if (RT_FAILURE(rc))
1128 return VINF_SUCCESS; /* better luck the next time around */
1129
1130 /*
1131 * Lookup the context range node the page belongs to.
1132 */
1133 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1134 AssertMsgReturn(pRange,
1135 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1136
1137 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1138 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1139
1140 /*
1141 * Do the aliasing; page align the addresses since PGM is picky.
1142 */
1143 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1144 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1145
1146 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1147
1148 IOM_UNLOCK_SHARED(pVM);
1149 AssertRCReturn(rc, rc);
1150
1151 /*
1152 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1153 * can simply prefetch it.
1154 *
1155 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1156 */
1157# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1158# ifdef VBOX_STRICT
1159 uint64_t fFlags;
1160 RTHCPHYS HCPhys;
1161 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1162 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1163# endif
1164# endif
1165 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1166 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1167# else
1168 RT_NOREF_PV(pVM); RT_NOREF(GCPhys); RT_NOREF(GCPhysRemapped); RT_NOREF(fPageFlags);
1169# endif /* !IEM_VERIFICATION_MODE_FULL */
1170 return VINF_SUCCESS;
1171}
1172
1173
1174# ifndef IEM_VERIFICATION_MODE_FULL
1175/**
1176 * Mapping a HC page in place of an MMIO page for direct access.
1177 *
1178 * (This is a special optimization used by the APIC in the VT-x case.)
1179 *
1180 * @returns VBox status code.
1181 *
1182 * @param pVM The cross context VM structure.
1183 * @param pVCpu The cross context virtual CPU structure.
1184 * @param GCPhys The address of the MMIO page to be changed.
1185 * @param HCPhys The address of the host physical page.
1186 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1187 * for the time being.
1188 */
1189VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1190{
1191 /* Currently only called from VT-x code during a page fault. */
1192 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1193
1194 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1195 /** @todo NEM: MMIO page aliasing. */
1196 Assert(HMIsEnabled(pVM));
1197
1198 /*
1199 * Lookup the context range node the page belongs to.
1200 */
1201# ifdef VBOX_STRICT
1202 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1203 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1204 AssertMsgReturn(pRange,
1205 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1206 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1207 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1208# endif
1209
1210 /*
1211 * Do the aliasing; page align the addresses since PGM is picky.
1212 */
1213 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1214 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
1215
1216 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
1217 AssertRCReturn(rc, rc);
1218
1219 /*
1220 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1221 * can simply prefetch it.
1222 *
1223 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1224 */
1225 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1226 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1227 return VINF_SUCCESS;
1228}
1229# endif /* !IEM_VERIFICATION_MODE_FULL */
1230
1231
1232/**
1233 * Reset a previously modified MMIO region; restore the access flags.
1234 *
1235 * @returns VBox status code.
1236 *
1237 * @param pVM The cross context VM structure.
1238 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1239 */
1240VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1241{
1242 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1243
1244 PVMCPU pVCpu = VMMGetCpu(pVM);
1245
1246 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1247 /** @todo NEM: MMIO page aliasing. */
1248 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1249 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1250 && !HMIsNestedPagingActive(pVM)))
1251 return VINF_SUCCESS; /* ignore */
1252
1253 /*
1254 * Lookup the context range node the page belongs to.
1255 */
1256# ifdef VBOX_STRICT
1257 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1258 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1259 AssertMsgReturn(pRange,
1260 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1261 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1262 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1263# endif
1264
1265 /*
1266 * Call PGM to do the job work.
1267 *
1268 * After the call, all the pages should be non-present... unless there is
1269 * a page pool flush pending (unlikely).
1270 */
1271 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
1272 AssertRC(rc);
1273
1274# ifdef VBOX_STRICT
1275 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
1276 {
1277 uint32_t cb = pRange->cb;
1278 GCPhys = pRange->GCPhys;
1279 while (cb)
1280 {
1281 uint64_t fFlags;
1282 RTHCPHYS HCPhys;
1283 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1284 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1285 cb -= PAGE_SIZE;
1286 GCPhys += PAGE_SIZE;
1287 }
1288 }
1289# endif
1290 return rc;
1291}
1292
1293#endif /* !IN_RC */
1294
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette