VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 56034

Last change on this file since 56034 was 56017, checked in by vboxsync, 10 years ago

Physical access handler cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 93.3 KB
Line 
1/* $Id: IOMAllMMIO.cpp 56017 2015-05-21 18:14:21Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0U, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0U, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0U, /* 5 - invalid */
66 ~0U, /* 6 - invalid */
67 ~0U, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Deals with complicated MMIO writes.
79 *
80 * Complicated means unaligned or non-dword/qword sized accesses depending on
81 * the MMIO region's access mode flags.
82 *
83 * @returns Strict VBox status code. Any EM scheduling status code,
84 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
85 * VINF_IOM_R3_MMIO_READ may be returned.
86 *
87 * @param pVM Pointer to the VM.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
101 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
102
103 /*
104 * Do debug stop if requested.
105 */
106 int rc = VINF_SUCCESS; NOREF(pVM);
107#ifdef VBOX_STRICT
108 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
109 {
110# ifdef IN_RING3
111 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
112 R3STRING(pRange->pszDesc)));
113 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
114 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
115 if (rc == VERR_DBGF_NOT_ATTACHED)
116 rc = VINF_SUCCESS;
117# else
118 return VINF_IOM_R3_MMIO_WRITE;
119# endif
120 }
121#endif
122
123 /*
124 * Check if we should ignore the write.
125 */
126 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
127 {
128 Assert(cbValue != 4 || (GCPhys & 3));
129 return VINF_SUCCESS;
130 }
131 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
132 {
133 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
134 return VINF_SUCCESS;
135 }
136
137 /*
138 * Split and conquer.
139 */
140 for (;;)
141 {
142 unsigned const offAccess = GCPhys & 3;
143 unsigned cbThisPart = 4 - offAccess;
144 if (cbThisPart > cbValue)
145 cbThisPart = cbValue;
146
147 /*
148 * Get the missing bits (if any).
149 */
150 uint32_t u32MissingValue = 0;
151 if (fReadMissing && cbThisPart != 4)
152 {
153 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
154 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
155 switch (rc2)
156 {
157 case VINF_SUCCESS:
158 break;
159 case VINF_IOM_MMIO_UNUSED_FF:
160 u32MissingValue = UINT32_C(0xffffffff);
161 break;
162 case VINF_IOM_MMIO_UNUSED_00:
163 u32MissingValue = 0;
164 break;
165 case VINF_IOM_R3_MMIO_READ:
166 case VINF_IOM_R3_MMIO_READ_WRITE:
167 case VINF_IOM_R3_MMIO_WRITE:
168 /** @todo What if we've split a transfer and already read
169 * something? Since writes generally have sideeffects we
170 * could be kind of screwed here...
171 *
172 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
173 * to REM for MMIO accesses (like may currently do). */
174
175 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
176 return rc2;
177 default:
178 if (RT_FAILURE(rc2))
179 {
180 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
181 return rc2;
182 }
183 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
184 if (rc == VINF_SUCCESS || rc2 < rc)
185 rc = rc2;
186 break;
187 }
188 }
189
190 /*
191 * Merge missing and given bits.
192 */
193 uint32_t u32GivenMask;
194 uint32_t u32GivenValue;
195 switch (cbThisPart)
196 {
197 case 1:
198 u32GivenValue = *(uint8_t const *)pvValue;
199 u32GivenMask = UINT32_C(0x000000ff);
200 break;
201 case 2:
202 u32GivenValue = *(uint16_t const *)pvValue;
203 u32GivenMask = UINT32_C(0x0000ffff);
204 break;
205 case 3:
206 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
207 ((uint8_t const *)pvValue)[2], 0);
208 u32GivenMask = UINT32_C(0x00ffffff);
209 break;
210 case 4:
211 u32GivenValue = *(uint32_t const *)pvValue;
212 u32GivenMask = UINT32_C(0xffffffff);
213 break;
214 default:
215 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
216 }
217 if (offAccess)
218 {
219 u32GivenValue <<= offAccess * 8;
220 u32GivenMask <<= offAccess * 8;
221 }
222
223 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
224 | (u32GivenValue & u32GivenMask);
225
226 /*
227 * Do DWORD write to the device.
228 */
229 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
230 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
231 switch (rc2)
232 {
233 case VINF_SUCCESS:
234 break;
235 case VINF_IOM_R3_MMIO_READ:
236 case VINF_IOM_R3_MMIO_READ_WRITE:
237 case VINF_IOM_R3_MMIO_WRITE:
238 /** @todo What if we've split a transfer and already read
239 * something? Since reads can have sideeffects we could be
240 * kind of screwed here...
241 *
242 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
243 * to REM for MMIO accesses (like may currently do). */
244 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
245 return rc2;
246 default:
247 if (RT_FAILURE(rc2))
248 {
249 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
250 return rc2;
251 }
252 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
253 if (rc == VINF_SUCCESS || rc2 < rc)
254 rc = rc2;
255 break;
256 }
257
258 /*
259 * Advance.
260 */
261 cbValue -= cbThisPart;
262 if (!cbValue)
263 break;
264 GCPhys += cbThisPart;
265 pvValue = (uint8_t const *)pvValue + cbThisPart;
266 }
267
268 return rc;
269}
270
271
272
273
274/**
275 * Wrapper which does the write and updates range statistics when such are enabled.
276 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
277 */
278static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
279 const void *pvData, unsigned cb)
280{
281#ifdef VBOX_WITH_STATISTICS
282 int rcSem = IOM_LOCK_SHARED(pVM);
283 if (rcSem == VERR_SEM_BUSY)
284 return VINF_IOM_R3_MMIO_WRITE;
285 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
286 if (!pStats)
287# ifdef IN_RING3
288 return VERR_NO_MEMORY;
289# else
290 return VINF_IOM_R3_MMIO_WRITE;
291# endif
292 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
293#endif
294
295 VBOXSTRICTRC rcStrict;
296 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
297 {
298 if ( (cb == 4 && !(GCPhysFault & 3))
299 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
300 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
301 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
302 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
303 else
304 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
305 }
306 else
307 rcStrict = VINF_SUCCESS;
308
309 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
310 STAM_COUNTER_INC(&pStats->Accesses);
311 return rcStrict;
312}
313
314
315/**
316 * Deals with complicated MMIO reads.
317 *
318 * Complicated means unaligned or non-dword/qword sized accesses depending on
319 * the MMIO region's access mode flags.
320 *
321 * @returns Strict VBox status code. Any EM scheduling status code,
322 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
323 * VINF_IOM_R3_MMIO_WRITE may be returned.
324 *
325 * @param pVM Pointer to the VM.
326 * @param pRange The range to read from.
327 * @param GCPhys The physical address to start reading.
328 * @param pvValue Where to store the value.
329 * @param cbValue The size of the value to read.
330 */
331static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
332{
333 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
334 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
335 VERR_IOM_MMIO_IPE_1);
336 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
337 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
338
339 /*
340 * Do debug stop if requested.
341 */
342 int rc = VINF_SUCCESS; NOREF(pVM);
343#ifdef VBOX_STRICT
344 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
345 {
346# ifdef IN_RING3
347 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
348 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
349 if (rc == VERR_DBGF_NOT_ATTACHED)
350 rc = VINF_SUCCESS;
351# else
352 return VINF_IOM_R3_MMIO_READ;
353# endif
354 }
355#endif
356
357 /*
358 * Split and conquer.
359 */
360 for (;;)
361 {
362 /*
363 * Do DWORD read from the device.
364 */
365 uint32_t u32Value;
366 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
367 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
368 switch (rc2)
369 {
370 case VINF_SUCCESS:
371 break;
372 case VINF_IOM_MMIO_UNUSED_FF:
373 u32Value = UINT32_C(0xffffffff);
374 break;
375 case VINF_IOM_MMIO_UNUSED_00:
376 u32Value = 0;
377 break;
378 case VINF_IOM_R3_MMIO_READ:
379 case VINF_IOM_R3_MMIO_READ_WRITE:
380 case VINF_IOM_R3_MMIO_WRITE:
381 /** @todo What if we've split a transfer and already read
382 * something? Since reads can have sideeffects we could be
383 * kind of screwed here... */
384 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
385 return rc2;
386 default:
387 if (RT_FAILURE(rc2))
388 {
389 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
390 return rc2;
391 }
392 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
393 if (rc == VINF_SUCCESS || rc2 < rc)
394 rc = rc2;
395 break;
396 }
397 u32Value >>= (GCPhys & 3) * 8;
398
399 /*
400 * Write what we've read.
401 */
402 unsigned cbThisPart = 4 - (GCPhys & 3);
403 if (cbThisPart > cbValue)
404 cbThisPart = cbValue;
405
406 switch (cbThisPart)
407 {
408 case 1:
409 *(uint8_t *)pvValue = (uint8_t)u32Value;
410 break;
411 case 2:
412 *(uint16_t *)pvValue = (uint16_t)u32Value;
413 break;
414 case 3:
415 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
416 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
417 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
418 break;
419 case 4:
420 *(uint32_t *)pvValue = u32Value;
421 break;
422 }
423
424 /*
425 * Advance.
426 */
427 cbValue -= cbThisPart;
428 if (!cbValue)
429 break;
430 GCPhys += cbThisPart;
431 pvValue = (uint8_t *)pvValue + cbThisPart;
432 }
433
434 return rc;
435}
436
437
438/**
439 * Implements VINF_IOM_MMIO_UNUSED_FF.
440 *
441 * @returns VINF_SUCCESS.
442 * @param pvValue Where to store the zeros.
443 * @param cbValue How many bytes to read.
444 */
445static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
446{
447 switch (cbValue)
448 {
449 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
450 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
451 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
452 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
453 default:
454 {
455 uint8_t *pb = (uint8_t *)pvValue;
456 while (cbValue--)
457 *pb++ = UINT8_C(0xff);
458 break;
459 }
460 }
461 return VINF_SUCCESS;
462}
463
464
465/**
466 * Implements VINF_IOM_MMIO_UNUSED_00.
467 *
468 * @returns VINF_SUCCESS.
469 * @param pvValue Where to store the zeros.
470 * @param cbValue How many bytes to read.
471 */
472static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
473{
474 switch (cbValue)
475 {
476 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
477 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
478 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
479 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
480 default:
481 {
482 uint8_t *pb = (uint8_t *)pvValue;
483 while (cbValue--)
484 *pb++ = UINT8_C(0x00);
485 break;
486 }
487 }
488 return VINF_SUCCESS;
489}
490
491
492/**
493 * Wrapper which does the read and updates range statistics when such are enabled.
494 */
495DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
496 void *pvValue, unsigned cbValue)
497{
498#ifdef VBOX_WITH_STATISTICS
499 int rcSem = IOM_LOCK_SHARED(pVM);
500 if (rcSem == VERR_SEM_BUSY)
501 return VINF_IOM_R3_MMIO_READ;
502 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
503 if (!pStats)
504# ifdef IN_RING3
505 return VERR_NO_MEMORY;
506# else
507 return VINF_IOM_R3_MMIO_READ;
508# endif
509 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
510#endif
511
512 VBOXSTRICTRC rcStrict;
513 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
514 {
515 if ( ( cbValue == 4
516 && !(GCPhys & 3))
517 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
518 || ( cbValue == 8
519 && !(GCPhys & 7)
520 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
521 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
522 pvValue, cbValue);
523 else
524 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
525 }
526 else
527 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
528 if (rcStrict != VINF_SUCCESS)
529 {
530 switch (VBOXSTRICTRC_VAL(rcStrict))
531 {
532 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
533 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
534 }
535 }
536
537 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
538 STAM_COUNTER_INC(&pStats->Accesses);
539 return rcStrict;
540}
541
542
543/**
544 * Internal - statistics only.
545 */
546DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
547{
548#ifdef VBOX_WITH_STATISTICS
549 switch (cb)
550 {
551 case 1:
552 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
553 break;
554 case 2:
555 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
556 break;
557 case 4:
558 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
559 break;
560 case 8:
561 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
562 break;
563 default:
564 /* No way. */
565 AssertMsgFailed(("Invalid data length %d\n", cb));
566 break;
567 }
568#else
569 NOREF(pVM); NOREF(cb);
570#endif
571}
572
573
574/**
575 * MOV reg, mem (read)
576 * MOVZX reg, mem (read)
577 * MOVSX reg, mem (read)
578 *
579 * @returns VBox status code.
580 *
581 * @param pVM The virtual machine.
582 * @param pVCpu Pointer to the virtual CPU structure of the caller.
583 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
584 * @param pCpu Disassembler CPU state.
585 * @param pRange Pointer MMIO range.
586 * @param GCPhysFault The GC physical address corresponding to pvFault.
587 */
588static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
589 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
590{
591 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
592
593 /*
594 * Get the data size from parameter 2,
595 * and call the handler function to get the data.
596 */
597 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
598 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
599
600 uint64_t u64Data = 0;
601 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
602 if (rc == VINF_SUCCESS)
603 {
604 /*
605 * Do sign extension for MOVSX.
606 */
607 /** @todo checkup MOVSX implementation! */
608 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
609 {
610 if (cb == 1)
611 {
612 /* DWORD <- BYTE */
613 int64_t iData = (int8_t)u64Data;
614 u64Data = (uint64_t)iData;
615 }
616 else
617 {
618 /* DWORD <- WORD */
619 int64_t iData = (int16_t)u64Data;
620 u64Data = (uint64_t)iData;
621 }
622 }
623
624 /*
625 * Store the result to register (parameter 1).
626 */
627 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
628 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
629 }
630
631 if (rc == VINF_SUCCESS)
632 iomMMIOStatLength(pVM, cb);
633 return rc;
634}
635
636
637/**
638 * MOV mem, reg|imm (write)
639 *
640 * @returns VBox status code.
641 *
642 * @param pVM The virtual machine.
643 * @param pVCpu Pointer to the virtual CPU structure of the caller.
644 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
645 * @param pCpu Disassembler CPU state.
646 * @param pRange Pointer MMIO range.
647 * @param GCPhysFault The GC physical address corresponding to pvFault.
648 */
649static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
650 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
651{
652 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
653
654 /*
655 * Get data to write from second parameter,
656 * and call the callback to write it.
657 */
658 unsigned cb = 0;
659 uint64_t u64Data = 0;
660 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
661 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
662
663 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
664 if (rc == VINF_SUCCESS)
665 iomMMIOStatLength(pVM, cb);
666 return rc;
667}
668
669
670/** Wrapper for reading virtual memory. */
671DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
672{
673 /* Note: This will fail in R0 or RC if it hits an access handler. That
674 isn't a problem though since the operation can be restarted in REM. */
675#ifdef IN_RC
676 NOREF(pVCpu);
677 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
678 /* Page may be protected and not directly accessible. */
679 if (rc == VERR_ACCESS_DENIED)
680 rc = VINF_IOM_R3_IOPORT_WRITE;
681 return rc;
682#else
683 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM);
684#endif
685}
686
687
688/** Wrapper for writing virtual memory. */
689DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
690{
691 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
692 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
693 * as well since we're not behind the pgm lock and handler may change between calls.
694 *
695 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
696 * the state of some shadowed structures. */
697#if defined(IN_RING0) || defined(IN_RC)
698 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
699#else
700 NOREF(pCtxCore);
701 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM);
702#endif
703}
704
705
706#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
707/**
708 * [REP] MOVSB
709 * [REP] MOVSW
710 * [REP] MOVSD
711 *
712 * Restricted implementation.
713 *
714 *
715 * @returns VBox status code.
716 *
717 * @param pVM The virtual machine.
718 * @param uErrorCode CPU Error code.
719 * @param pRegFrame Trap register frame.
720 * @param GCPhysFault The GC physical address corresponding to pvFault.
721 * @param pCpu Disassembler CPU state.
722 * @param pRange Pointer MMIO range.
723 * @param ppStat Which sub-sample to attribute this call to.
724 */
725static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
726 PSTAMPROFILE *ppStat)
727{
728 /*
729 * We do not support segment prefixes or REPNE.
730 */
731 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
732 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
733
734 PVMCPU pVCpu = VMMGetCpu(pVM);
735
736 /*
737 * Get bytes/words/dwords/qword count to copy.
738 */
739 uint32_t cTransfers = 1;
740 if (pCpu->fPrefix & DISPREFIX_REP)
741 {
742#ifndef IN_RC
743 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
744 && pRegFrame->rcx >= _4G)
745 return VINF_EM_RAW_EMULATE_INSTR;
746#endif
747
748 cTransfers = pRegFrame->ecx;
749 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
750 cTransfers &= 0xffff;
751
752 if (!cTransfers)
753 return VINF_SUCCESS;
754 }
755
756 /* Get the current privilege level. */
757 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
758
759 /*
760 * Get data size.
761 */
762 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
763 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
764 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
765
766#ifdef VBOX_WITH_STATISTICS
767 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
768 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
769#endif
770
771/** @todo re-evaluate on page boundaries. */
772
773 RTGCPHYS Phys = GCPhysFault;
774 int rc;
775 if (fWriteAccess)
776 {
777 /*
778 * Write operation: [Mem] -> [MMIO]
779 * ds:esi (Virt Src) -> es:edi (Phys Dst)
780 */
781 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
782
783 /* Check callback. */
784 if (!pRange->CTX_SUFF(pfnWriteCallback))
785 return VINF_IOM_R3_MMIO_WRITE;
786
787 /* Convert source address ds:esi. */
788 RTGCUINTPTR pu8Virt;
789 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
790 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
791 (PRTGCPTR)&pu8Virt);
792 if (RT_SUCCESS(rc))
793 {
794
795 /* Access verification first; we currently can't recover properly from traps inside this instruction */
796 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
797 if (rc != VINF_SUCCESS)
798 {
799 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
800 return VINF_EM_RAW_EMULATE_INSTR;
801 }
802
803#ifdef IN_RC
804 MMGCRamRegisterTrapHandler(pVM);
805#endif
806
807 /* copy loop. */
808 while (cTransfers)
809 {
810 uint32_t u32Data = 0;
811 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
812 if (rc != VINF_SUCCESS)
813 break;
814 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
815 if (rc != VINF_SUCCESS)
816 break;
817
818 pu8Virt += offIncrement;
819 Phys += offIncrement;
820 pRegFrame->rsi += offIncrement;
821 pRegFrame->rdi += offIncrement;
822 cTransfers--;
823 }
824#ifdef IN_RC
825 MMGCRamDeregisterTrapHandler(pVM);
826#endif
827 /* Update ecx. */
828 if (pCpu->fPrefix & DISPREFIX_REP)
829 pRegFrame->ecx = cTransfers;
830 }
831 else
832 rc = VINF_IOM_R3_MMIO_READ_WRITE;
833 }
834 else
835 {
836 /*
837 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
838 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
839 */
840 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
841
842 /* Check callback. */
843 if (!pRange->CTX_SUFF(pfnReadCallback))
844 return VINF_IOM_R3_MMIO_READ;
845
846 /* Convert destination address. */
847 RTGCUINTPTR pu8Virt;
848 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
849 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
850 (RTGCPTR *)&pu8Virt);
851 if (RT_FAILURE(rc))
852 return VINF_IOM_R3_MMIO_READ;
853
854 /* Check if destination address is MMIO. */
855 PIOMMMIORANGE pMMIODst;
856 RTGCPHYS PhysDst;
857 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
858 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
859 if ( RT_SUCCESS(rc)
860 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
861 {
862 /** @todo implement per-device locks for MMIO access. */
863 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
864
865 /*
866 * Extra: [MMIO] -> [MMIO]
867 */
868 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
869 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
870 {
871 iomMmioReleaseRange(pVM, pRange);
872 return VINF_IOM_R3_MMIO_READ_WRITE;
873 }
874
875 /* copy loop. */
876 while (cTransfers)
877 {
878 uint32_t u32Data;
879 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
880 if (rc != VINF_SUCCESS)
881 break;
882 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
883 if (rc != VINF_SUCCESS)
884 break;
885
886 Phys += offIncrement;
887 PhysDst += offIncrement;
888 pRegFrame->rsi += offIncrement;
889 pRegFrame->rdi += offIncrement;
890 cTransfers--;
891 }
892 iomMmioReleaseRange(pVM, pRange);
893 }
894 else
895 {
896 /*
897 * Normal: [MMIO] -> [Mem]
898 */
899 /* Access verification first; we currently can't recover properly from traps inside this instruction */
900 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
901 if (rc != VINF_SUCCESS)
902 {
903 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
904 return VINF_EM_RAW_EMULATE_INSTR;
905 }
906
907 /* copy loop. */
908#ifdef IN_RC
909 MMGCRamRegisterTrapHandler(pVM);
910#endif
911 while (cTransfers)
912 {
913 uint32_t u32Data;
914 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
915 if (rc != VINF_SUCCESS)
916 break;
917 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
918 if (rc != VINF_SUCCESS)
919 {
920 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
921 break;
922 }
923
924 pu8Virt += offIncrement;
925 Phys += offIncrement;
926 pRegFrame->rsi += offIncrement;
927 pRegFrame->rdi += offIncrement;
928 cTransfers--;
929 }
930#ifdef IN_RC
931 MMGCRamDeregisterTrapHandler(pVM);
932#endif
933 }
934
935 /* Update ecx on exit. */
936 if (pCpu->fPrefix & DISPREFIX_REP)
937 pRegFrame->ecx = cTransfers;
938 }
939
940 /* work statistics. */
941 if (rc == VINF_SUCCESS)
942 iomMMIOStatLength(pVM, cb);
943 NOREF(ppStat);
944 return rc;
945}
946#endif /* IOM_WITH_MOVS_SUPPORT */
947
948
949/**
950 * Gets the address / opcode mask corresponding to the given CPU mode.
951 *
952 * @returns Mask.
953 * @param enmCpuMode CPU mode.
954 */
955static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
956{
957 switch (enmCpuMode)
958 {
959 case DISCPUMODE_16BIT: return UINT16_MAX;
960 case DISCPUMODE_32BIT: return UINT32_MAX;
961 case DISCPUMODE_64BIT: return UINT64_MAX;
962 default:
963 AssertFailedReturn(UINT32_MAX);
964 }
965}
966
967
968/**
969 * [REP] STOSB
970 * [REP] STOSW
971 * [REP] STOSD
972 *
973 * Restricted implementation.
974 *
975 *
976 * @returns VBox status code.
977 *
978 * @param pVM The virtual machine.
979 * @param pVCpu Pointer to the virtual CPU structure of the caller.
980 * @param pRegFrame Trap register frame.
981 * @param GCPhysFault The GC physical address corresponding to pvFault.
982 * @param pCpu Disassembler CPU state.
983 * @param pRange Pointer MMIO range.
984 */
985static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
986 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
987{
988 /*
989 * We do not support segment prefixes or REPNE..
990 */
991 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
992 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
993
994 /*
995 * Get bytes/words/dwords/qwords count to copy.
996 */
997 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
998 RTGCUINTREG cTransfers = 1;
999 if (pCpu->fPrefix & DISPREFIX_REP)
1000 {
1001#ifndef IN_RC
1002 if ( CPUMIsGuestIn64BitCode(pVCpu)
1003 && pRegFrame->rcx >= _4G)
1004 return VINF_EM_RAW_EMULATE_INSTR;
1005#endif
1006
1007 cTransfers = pRegFrame->rcx & fAddrMask;
1008 if (!cTransfers)
1009 return VINF_SUCCESS;
1010 }
1011
1012/** @todo r=bird: bounds checks! */
1013
1014 /*
1015 * Get data size.
1016 */
1017 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1018 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1019 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1020
1021#ifdef VBOX_WITH_STATISTICS
1022 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1023 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1024#endif
1025
1026
1027 RTGCPHYS Phys = GCPhysFault;
1028 int rc;
1029 if ( pRange->CTX_SUFF(pfnFillCallback)
1030 && cb <= 4 /* can only fill 32-bit values */)
1031 {
1032 /*
1033 * Use the fill callback.
1034 */
1035 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1036 if (offIncrement > 0)
1037 {
1038 /* addr++ variant. */
1039 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1040 pRegFrame->eax, cb, cTransfers);
1041 if (rc == VINF_SUCCESS)
1042 {
1043 /* Update registers. */
1044 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1045 | (pRegFrame->rdi & ~fAddrMask);
1046 if (pCpu->fPrefix & DISPREFIX_REP)
1047 pRegFrame->rcx &= ~fAddrMask;
1048 }
1049 }
1050 else
1051 {
1052 /* addr-- variant. */
1053 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1054 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1055 pRegFrame->eax, cb, cTransfers);
1056 if (rc == VINF_SUCCESS)
1057 {
1058 /* Update registers. */
1059 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1060 | (pRegFrame->rdi & ~fAddrMask);
1061 if (pCpu->fPrefix & DISPREFIX_REP)
1062 pRegFrame->rcx &= ~fAddrMask;
1063 }
1064 }
1065 }
1066 else
1067 {
1068 /*
1069 * Use the write callback.
1070 */
1071 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1072 uint64_t u64Data = pRegFrame->rax;
1073
1074 /* fill loop. */
1075 do
1076 {
1077 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1078 if (rc != VINF_SUCCESS)
1079 break;
1080
1081 Phys += offIncrement;
1082 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1083 | (pRegFrame->rdi & ~fAddrMask);
1084 cTransfers--;
1085 } while (cTransfers);
1086
1087 /* Update rcx on exit. */
1088 if (pCpu->fPrefix & DISPREFIX_REP)
1089 pRegFrame->rcx = (cTransfers & fAddrMask)
1090 | (pRegFrame->rcx & ~fAddrMask);
1091 }
1092
1093 /*
1094 * Work statistics and return.
1095 */
1096 if (rc == VINF_SUCCESS)
1097 iomMMIOStatLength(pVM, cb);
1098 return rc;
1099}
1100
1101
1102/**
1103 * [REP] LODSB
1104 * [REP] LODSW
1105 * [REP] LODSD
1106 *
1107 * Restricted implementation.
1108 *
1109 *
1110 * @returns VBox status code.
1111 *
1112 * @param pVM The virtual machine.
1113 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1114 * @param pRegFrame Trap register frame.
1115 * @param GCPhysFault The GC physical address corresponding to pvFault.
1116 * @param pCpu Disassembler CPU state.
1117 * @param pRange Pointer MMIO range.
1118 */
1119static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1120 PIOMMMIORANGE pRange)
1121{
1122 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1123
1124 /*
1125 * We do not support segment prefixes or REP*.
1126 */
1127 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1128 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1129
1130 /*
1131 * Get data size.
1132 */
1133 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1134 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1135 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1136
1137 /*
1138 * Perform read.
1139 */
1140 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1141 if (rc == VINF_SUCCESS)
1142 {
1143 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1144 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1145 | (pRegFrame->rsi & ~fAddrMask);
1146 }
1147
1148 /*
1149 * Work statistics and return.
1150 */
1151 if (rc == VINF_SUCCESS)
1152 iomMMIOStatLength(pVM, cb);
1153 return rc;
1154}
1155
1156
1157/**
1158 * CMP [MMIO], reg|imm
1159 * CMP reg|imm, [MMIO]
1160 *
1161 * Restricted implementation.
1162 *
1163 *
1164 * @returns VBox status code.
1165 *
1166 * @param pVM The virtual machine.
1167 * @param pRegFrame Trap register frame.
1168 * @param GCPhysFault The GC physical address corresponding to pvFault.
1169 * @param pCpu Disassembler CPU state.
1170 * @param pRange Pointer MMIO range.
1171 */
1172static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1173 PIOMMMIORANGE pRange)
1174{
1175 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1176
1177 /*
1178 * Get the operands.
1179 */
1180 unsigned cb = 0;
1181 uint64_t uData1 = 0;
1182 uint64_t uData2 = 0;
1183 int rc;
1184 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1185 /* cmp reg, [MMIO]. */
1186 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1187 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1188 /* cmp [MMIO], reg|imm. */
1189 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1190 else
1191 {
1192 AssertMsgFailed(("Disassember CMP problem..\n"));
1193 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1194 }
1195
1196 if (rc == VINF_SUCCESS)
1197 {
1198#if HC_ARCH_BITS == 32
1199 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1200 if (cb > 4)
1201 return VINF_IOM_R3_MMIO_READ_WRITE;
1202#endif
1203 /* Emulate CMP and update guest flags. */
1204 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1205 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1206 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1207 iomMMIOStatLength(pVM, cb);
1208 }
1209
1210 return rc;
1211}
1212
1213
1214/**
1215 * AND [MMIO], reg|imm
1216 * AND reg, [MMIO]
1217 * OR [MMIO], reg|imm
1218 * OR reg, [MMIO]
1219 *
1220 * Restricted implementation.
1221 *
1222 *
1223 * @returns VBox status code.
1224 *
1225 * @param pVM The virtual machine.
1226 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1227 * @param pRegFrame Trap register frame.
1228 * @param GCPhysFault The GC physical address corresponding to pvFault.
1229 * @param pCpu Disassembler CPU state.
1230 * @param pRange Pointer MMIO range.
1231 * @param pfnEmulate Instruction emulation function.
1232 */
1233static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1234 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1235{
1236 unsigned cb = 0;
1237 uint64_t uData1 = 0;
1238 uint64_t uData2 = 0;
1239 bool fAndWrite;
1240 int rc;
1241
1242#ifdef LOG_ENABLED
1243 const char *pszInstr;
1244
1245 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1246 pszInstr = "Xor";
1247 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1248 pszInstr = "Or";
1249 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1250 pszInstr = "And";
1251 else
1252 pszInstr = "OrXorAnd??";
1253#endif
1254
1255 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1256 {
1257#if HC_ARCH_BITS == 32
1258 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1259 if (cb > 4)
1260 return VINF_IOM_R3_MMIO_READ_WRITE;
1261#endif
1262 /* and reg, [MMIO]. */
1263 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1264 fAndWrite = false;
1265 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1266 }
1267 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1268 {
1269#if HC_ARCH_BITS == 32
1270 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1271 if (cb > 4)
1272 return VINF_IOM_R3_MMIO_READ_WRITE;
1273#endif
1274 /* and [MMIO], reg|imm. */
1275 fAndWrite = true;
1276 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1277 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1278 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1279 else
1280 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1281 }
1282 else
1283 {
1284 AssertMsgFailed(("Disassember AND problem..\n"));
1285 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1286 }
1287
1288 if (rc == VINF_SUCCESS)
1289 {
1290 /* Emulate AND and update guest flags. */
1291 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1292
1293 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1294
1295 if (fAndWrite)
1296 /* Store result to MMIO. */
1297 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1298 else
1299 {
1300 /* Store result to register. */
1301 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1302 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1303 }
1304 if (rc == VINF_SUCCESS)
1305 {
1306 /* Update guest's eflags and finish. */
1307 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1308 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1309 iomMMIOStatLength(pVM, cb);
1310 }
1311 }
1312
1313 return rc;
1314}
1315
1316
1317/**
1318 * TEST [MMIO], reg|imm
1319 * TEST reg, [MMIO]
1320 *
1321 * Restricted implementation.
1322 *
1323 *
1324 * @returns VBox status code.
1325 *
1326 * @param pVM The virtual machine.
1327 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1328 * @param pRegFrame Trap register frame.
1329 * @param GCPhysFault The GC physical address corresponding to pvFault.
1330 * @param pCpu Disassembler CPU state.
1331 * @param pRange Pointer MMIO range.
1332 */
1333static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1334 PIOMMMIORANGE pRange)
1335{
1336 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1337
1338 unsigned cb = 0;
1339 uint64_t uData1 = 0;
1340 uint64_t uData2 = 0;
1341 int rc;
1342
1343 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1344 {
1345 /* and test, [MMIO]. */
1346 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1347 }
1348 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1349 {
1350 /* test [MMIO], reg|imm. */
1351 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1352 }
1353 else
1354 {
1355 AssertMsgFailed(("Disassember TEST problem..\n"));
1356 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1357 }
1358
1359 if (rc == VINF_SUCCESS)
1360 {
1361#if HC_ARCH_BITS == 32
1362 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1363 if (cb > 4)
1364 return VINF_IOM_R3_MMIO_READ_WRITE;
1365#endif
1366
1367 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1368 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1369 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1370 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1371 iomMMIOStatLength(pVM, cb);
1372 }
1373
1374 return rc;
1375}
1376
1377
1378/**
1379 * BT [MMIO], reg|imm
1380 *
1381 * Restricted implementation.
1382 *
1383 *
1384 * @returns VBox status code.
1385 *
1386 * @param pVM The virtual machine.
1387 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1388 * @param pRegFrame Trap register frame.
1389 * @param GCPhysFault The GC physical address corresponding to pvFault.
1390 * @param pCpu Disassembler CPU state.
1391 * @param pRange Pointer MMIO range.
1392 */
1393static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1394 PIOMMMIORANGE pRange)
1395{
1396 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1397
1398 uint64_t uBit = 0;
1399 uint64_t uData = 0;
1400 unsigned cbIgnored;
1401
1402 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1403 {
1404 AssertMsgFailed(("Disassember BT problem..\n"));
1405 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1406 }
1407 /* The size of the memory operand only matters here. */
1408 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1409
1410 /* bt [MMIO], reg|imm. */
1411 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1412 if (rc == VINF_SUCCESS)
1413 {
1414 /* Find the bit inside the faulting address */
1415 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1416 iomMMIOStatLength(pVM, cbData);
1417 }
1418
1419 return rc;
1420}
1421
1422/**
1423 * XCHG [MMIO], reg
1424 * XCHG reg, [MMIO]
1425 *
1426 * Restricted implementation.
1427 *
1428 *
1429 * @returns VBox status code.
1430 *
1431 * @param pVM The virtual machine.
1432 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1433 * @param pRegFrame Trap register frame.
1434 * @param GCPhysFault The GC physical address corresponding to pvFault.
1435 * @param pCpu Disassembler CPU state.
1436 * @param pRange Pointer MMIO range.
1437 */
1438static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1439 PIOMMMIORANGE pRange)
1440{
1441 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1442 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1443 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1444 return VINF_IOM_R3_MMIO_READ_WRITE;
1445
1446 int rc;
1447 unsigned cb = 0;
1448 uint64_t uData1 = 0;
1449 uint64_t uData2 = 0;
1450 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1451 {
1452 /* xchg reg, [MMIO]. */
1453 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1454 if (rc == VINF_SUCCESS)
1455 {
1456 /* Store result to MMIO. */
1457 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1458
1459 if (rc == VINF_SUCCESS)
1460 {
1461 /* Store result to register. */
1462 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1463 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1464 }
1465 else
1466 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1467 }
1468 else
1469 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1470 }
1471 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1472 {
1473 /* xchg [MMIO], reg. */
1474 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1475 if (rc == VINF_SUCCESS)
1476 {
1477 /* Store result to MMIO. */
1478 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1479 if (rc == VINF_SUCCESS)
1480 {
1481 /* Store result to register. */
1482 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1483 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1484 }
1485 else
1486 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1487 }
1488 else
1489 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1490 }
1491 else
1492 {
1493 AssertMsgFailed(("Disassember XCHG problem..\n"));
1494 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1495 }
1496 return rc;
1497}
1498
1499
1500/**
1501 * \#PF Handler callback for MMIO ranges.
1502 *
1503 * @returns VBox status code (appropriate for GC return).
1504 * @param pVM Pointer to the VM.
1505 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1506 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1507 * any error code (the EPT misconfig hack).
1508 * @param pCtxCore Trap register frame.
1509 * @param GCPhysFault The GC physical address corresponding to pvFault.
1510 * @param pvUser Pointer to the MMIO ring-3 range entry.
1511 */
1512static int iomMMIOHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1513{
1514 int rc = IOM_LOCK_SHARED(pVM);
1515#ifndef IN_RING3
1516 if (rc == VERR_SEM_BUSY)
1517 return VINF_IOM_R3_MMIO_READ_WRITE;
1518#endif
1519 AssertRC(rc);
1520
1521 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1522 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1523
1524 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1525 Assert(pRange);
1526 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1527 iomMmioRetainRange(pRange);
1528#ifndef VBOX_WITH_STATISTICS
1529 IOM_UNLOCK_SHARED(pVM);
1530
1531#else
1532 /*
1533 * Locate the statistics.
1534 */
1535 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1536 if (!pStats)
1537 {
1538 iomMmioReleaseRange(pVM, pRange);
1539# ifdef IN_RING3
1540 return VERR_NO_MEMORY;
1541# else
1542 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1543 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1544 return VINF_IOM_R3_MMIO_READ_WRITE;
1545# endif
1546 }
1547#endif
1548
1549#ifndef IN_RING3
1550 /*
1551 * Should we defer the request right away? This isn't usually the case, so
1552 * do the simple test first and the try deal with uErrorCode being N/A.
1553 */
1554 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1555 || !pRange->CTX_SUFF(pfnReadCallback))
1556 && ( uErrorCode == UINT32_MAX
1557 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1558 : uErrorCode & X86_TRAP_PF_RW
1559 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1560 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1561 )
1562 )
1563 )
1564 {
1565 if (uErrorCode & X86_TRAP_PF_RW)
1566 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1567 else
1568 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1569
1570 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1571 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1572 iomMmioReleaseRange(pVM, pRange);
1573 return VINF_IOM_R3_MMIO_READ_WRITE;
1574 }
1575#endif /* !IN_RING3 */
1576
1577 /*
1578 * Retain the range and do locking.
1579 */
1580 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1581 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1582 if (rc != VINF_SUCCESS)
1583 {
1584 iomMmioReleaseRange(pVM, pRange);
1585 return rc;
1586 }
1587
1588 /*
1589 * Disassemble the instruction and interpret it.
1590 */
1591 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1592 unsigned cbOp;
1593 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1594 if (RT_FAILURE(rc))
1595 {
1596 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1597 iomMmioReleaseRange(pVM, pRange);
1598 return rc;
1599 }
1600 switch (pDis->pCurInstr->uOpcode)
1601 {
1602 case OP_MOV:
1603 case OP_MOVZX:
1604 case OP_MOVSX:
1605 {
1606 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1607 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1608 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1609 ? uErrorCode & X86_TRAP_PF_RW
1610 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1611 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1612 else
1613 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1614 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1615 break;
1616 }
1617
1618
1619#ifdef IOM_WITH_MOVS_SUPPORT
1620 case OP_MOVSB:
1621 case OP_MOVSWD:
1622 {
1623 if (uErrorCode == UINT32_MAX)
1624 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1625 else
1626 {
1627 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1628 PSTAMPROFILE pStat = NULL;
1629 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1630 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1631 }
1632 break;
1633 }
1634#endif
1635
1636 case OP_STOSB:
1637 case OP_STOSWD:
1638 Assert(uErrorCode & X86_TRAP_PF_RW);
1639 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1640 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1641 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1642 break;
1643
1644 case OP_LODSB:
1645 case OP_LODSWD:
1646 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1647 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1648 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1649 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1650 break;
1651
1652 case OP_CMP:
1653 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1654 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1655 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1656 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1657 break;
1658
1659 case OP_AND:
1660 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1661 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1662 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1663 break;
1664
1665 case OP_OR:
1666 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1667 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1668 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1669 break;
1670
1671 case OP_XOR:
1672 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1673 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1674 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1675 break;
1676
1677 case OP_TEST:
1678 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1679 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1680 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1681 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1682 break;
1683
1684 case OP_BT:
1685 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1686 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1687 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1688 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1689 break;
1690
1691 case OP_XCHG:
1692 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1693 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1694 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1695 break;
1696
1697
1698 /*
1699 * The instruction isn't supported. Hand it on to ring-3.
1700 */
1701 default:
1702 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1703 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1704 break;
1705 }
1706
1707 /*
1708 * On success advance EIP.
1709 */
1710 if (rc == VINF_SUCCESS)
1711 pCtxCore->rip += cbOp;
1712 else
1713 {
1714 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1715#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1716 switch (rc)
1717 {
1718 case VINF_IOM_R3_MMIO_READ:
1719 case VINF_IOM_R3_MMIO_READ_WRITE:
1720 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1721 break;
1722 case VINF_IOM_R3_MMIO_WRITE:
1723 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1724 break;
1725 }
1726#endif
1727 }
1728
1729 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1730 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1731 iomMmioReleaseRange(pVM, pRange);
1732 return rc;
1733}
1734
1735
1736/**
1737 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1738 * \#PF access handler callback for MMIO pages.}
1739 *
1740 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1741 */
1742DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1743 RTGCPHYS GCPhysFault, void *pvUser)
1744{
1745 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1746 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1747 return iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1748}
1749
1750
1751/**
1752 * Physical access handler for MMIO ranges.
1753 *
1754 * @returns VBox status code (appropriate for GC return).
1755 * @param pVM Pointer to the VM.
1756 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1757 * @param uErrorCode CPU Error code.
1758 * @param pCtxCore Trap register frame.
1759 * @param GCPhysFault The GC physical address.
1760 */
1761VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1762{
1763 /*
1764 * We don't have a range here, so look it up before calling the common function.
1765 */
1766 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1767#ifndef IN_RING3
1768 if (rc2 == VERR_SEM_BUSY)
1769 return VINF_IOM_R3_MMIO_READ_WRITE;
1770#endif
1771 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1772 if (RT_UNLIKELY(!pRange))
1773 {
1774 IOM_UNLOCK_SHARED(pVM);
1775 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1776 }
1777 iomMmioRetainRange(pRange);
1778 IOM_UNLOCK_SHARED(pVM);
1779
1780 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1781
1782 iomMmioReleaseRange(pVM, pRange);
1783 return VBOXSTRICTRC_VAL(rcStrict);
1784}
1785
1786
1787/**
1788 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
1789 *
1790 * @remarks The @a pvUser argument points to the MMIO range entry.
1791 */
1792PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
1793 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1794{
1795 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1796 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1797
1798 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1799 AssertPtr(pRange);
1800 NOREF(pvPhys); NOREF(enmOrigin);
1801
1802 /*
1803 * Validate the range.
1804 */
1805 int rc = IOM_LOCK_SHARED(pVM);
1806 AssertRC(rc);
1807 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1808
1809 /*
1810 * Perform locking.
1811 */
1812 iomMmioRetainRange(pRange);
1813 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1814 IOM_UNLOCK_SHARED(pVM);
1815 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1816 if (rcStrict == VINF_SUCCESS)
1817 {
1818 /*
1819 * Perform the access.
1820 */
1821 if (enmAccessType == PGMACCESSTYPE_READ)
1822 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1823 else
1824 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1825
1826 /* Check the return code. */
1827#ifdef IN_RING3
1828 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
1829#else
1830 AssertMsg( rcStrict == VINF_SUCCESS
1831 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
1832 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
1833 || rcStrict == VINF_EM_DBG_STOP
1834 || rcStrict == VINF_EM_DBG_BREAKPOINT
1835 || rcStrict == VINF_EM_OFF
1836 || rcStrict == VINF_EM_SUSPEND
1837 || rcStrict == VINF_EM_RESET
1838 //|| rcStrict == VINF_EM_HALT /* ?? */
1839 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
1840 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
1841#endif
1842
1843 iomMmioReleaseRange(pVM, pRange);
1844 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1845 }
1846 else
1847 iomMmioReleaseRange(pVM, pRange);
1848 return rcStrict;
1849}
1850
1851
1852/**
1853 * Reads a MMIO register.
1854 *
1855 * @returns VBox status code.
1856 *
1857 * @param pVM Pointer to the VM.
1858 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1859 * @param GCPhys The physical address to read.
1860 * @param pu32Value Where to store the value read.
1861 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1862 */
1863VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1864{
1865 /* Take the IOM lock before performing any MMIO. */
1866 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1867#ifndef IN_RING3
1868 if (rc == VERR_SEM_BUSY)
1869 return VINF_IOM_R3_MMIO_WRITE;
1870#endif
1871 AssertRC(VBOXSTRICTRC_VAL(rc));
1872#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1873 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1874#endif
1875
1876 /*
1877 * Lookup the current context range node and statistics.
1878 */
1879 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1880 if (!pRange)
1881 {
1882 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1883 IOM_UNLOCK_SHARED(pVM);
1884 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1885 }
1886 iomMmioRetainRange(pRange);
1887#ifndef VBOX_WITH_STATISTICS
1888 IOM_UNLOCK_SHARED(pVM);
1889
1890#else /* VBOX_WITH_STATISTICS */
1891 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1892 if (!pStats)
1893 {
1894 iomMmioReleaseRange(pVM, pRange);
1895# ifdef IN_RING3
1896 return VERR_NO_MEMORY;
1897# else
1898 return VINF_IOM_R3_MMIO_READ;
1899# endif
1900 }
1901 STAM_COUNTER_INC(&pStats->Accesses);
1902#endif /* VBOX_WITH_STATISTICS */
1903
1904 if (pRange->CTX_SUFF(pfnReadCallback))
1905 {
1906 /*
1907 * Perform locking.
1908 */
1909 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1910 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
1911 if (rc != VINF_SUCCESS)
1912 {
1913 iomMmioReleaseRange(pVM, pRange);
1914 return rc;
1915 }
1916
1917 /*
1918 * Perform the read and deal with the result.
1919 */
1920 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1921 if ( (cbValue == 4 && !(GCPhys & 3))
1922 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
1923 || (cbValue == 8 && !(GCPhys & 7)) )
1924 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
1925 pu32Value, (unsigned)cbValue);
1926 else
1927 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
1928 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1929 switch (VBOXSTRICTRC_VAL(rc))
1930 {
1931 case VINF_SUCCESS:
1932 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1933 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1934 iomMmioReleaseRange(pVM, pRange);
1935 return rc;
1936#ifndef IN_RING3
1937 case VINF_IOM_R3_MMIO_READ:
1938 case VINF_IOM_R3_MMIO_READ_WRITE:
1939 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1940#endif
1941 default:
1942 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1943 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1944 iomMmioReleaseRange(pVM, pRange);
1945 return rc;
1946
1947 case VINF_IOM_MMIO_UNUSED_00:
1948 iomMMIODoRead00s(pu32Value, cbValue);
1949 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1950 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1951 iomMmioReleaseRange(pVM, pRange);
1952 return VINF_SUCCESS;
1953
1954 case VINF_IOM_MMIO_UNUSED_FF:
1955 iomMMIODoReadFFs(pu32Value, cbValue);
1956 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1957 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1958 iomMmioReleaseRange(pVM, pRange);
1959 return VINF_SUCCESS;
1960 }
1961 /* not reached */
1962 }
1963#ifndef IN_RING3
1964 if (pRange->pfnReadCallbackR3)
1965 {
1966 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1967 iomMmioReleaseRange(pVM, pRange);
1968 return VINF_IOM_R3_MMIO_READ;
1969 }
1970#endif
1971
1972 /*
1973 * Unassigned memory - this is actually not supposed t happen...
1974 */
1975 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1976 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1977 iomMMIODoReadFFs(pu32Value, cbValue);
1978 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1979 iomMmioReleaseRange(pVM, pRange);
1980 return VINF_SUCCESS;
1981}
1982
1983
1984/**
1985 * Writes to a MMIO register.
1986 *
1987 * @returns VBox status code.
1988 *
1989 * @param pVM Pointer to the VM.
1990 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1991 * @param GCPhys The physical address to write to.
1992 * @param u32Value The value to write.
1993 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1994 */
1995VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1996{
1997 /* Take the IOM lock before performing any MMIO. */
1998 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1999#ifndef IN_RING3
2000 if (rc == VERR_SEM_BUSY)
2001 return VINF_IOM_R3_MMIO_WRITE;
2002#endif
2003 AssertRC(VBOXSTRICTRC_VAL(rc));
2004#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2005 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2006#endif
2007
2008 /*
2009 * Lookup the current context range node.
2010 */
2011 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2012 if (!pRange)
2013 {
2014 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2015 IOM_UNLOCK_SHARED(pVM);
2016 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2017 }
2018 iomMmioRetainRange(pRange);
2019#ifndef VBOX_WITH_STATISTICS
2020 IOM_UNLOCK_SHARED(pVM);
2021
2022#else /* VBOX_WITH_STATISTICS */
2023 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2024 if (!pStats)
2025 {
2026 iomMmioReleaseRange(pVM, pRange);
2027# ifdef IN_RING3
2028 return VERR_NO_MEMORY;
2029# else
2030 return VINF_IOM_R3_MMIO_WRITE;
2031# endif
2032 }
2033 STAM_COUNTER_INC(&pStats->Accesses);
2034#endif /* VBOX_WITH_STATISTICS */
2035
2036 if (pRange->CTX_SUFF(pfnWriteCallback))
2037 {
2038 /*
2039 * Perform locking.
2040 */
2041 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2042 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2043 if (rc != VINF_SUCCESS)
2044 {
2045 iomMmioReleaseRange(pVM, pRange);
2046 return rc;
2047 }
2048
2049 /*
2050 * Perform the write.
2051 */
2052 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2053 if ( (cbValue == 4 && !(GCPhys & 3))
2054 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2055 || (cbValue == 8 && !(GCPhys & 7)) )
2056 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2057 GCPhys, &u32Value, (unsigned)cbValue);
2058 else
2059 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2060 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2061#ifndef IN_RING3
2062 if ( rc == VINF_IOM_R3_MMIO_WRITE
2063 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2064 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2065#endif
2066 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2067 iomMmioReleaseRange(pVM, pRange);
2068 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2069 return rc;
2070 }
2071#ifndef IN_RING3
2072 if (pRange->pfnWriteCallbackR3)
2073 {
2074 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2075 iomMmioReleaseRange(pVM, pRange);
2076 return VINF_IOM_R3_MMIO_WRITE;
2077 }
2078#endif
2079
2080 /*
2081 * No write handler, nothing to do.
2082 */
2083 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2084 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2085 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2086 iomMmioReleaseRange(pVM, pRange);
2087 return VINF_SUCCESS;
2088}
2089
2090
2091/**
2092 * [REP*] INSB/INSW/INSD
2093 * ES:EDI,DX[,ECX]
2094 *
2095 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2096 *
2097 * @returns Strict VBox status code. Informational status codes other than the one documented
2098 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2099 * @retval VINF_SUCCESS Success.
2100 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2101 * status code must be passed on to EM.
2102 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2103 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2104 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2105 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2106 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2107 *
2108 * @param pVM The virtual machine.
2109 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2110 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2111 * @param uPort IO Port
2112 * @param uPrefix IO instruction prefix
2113 * @param enmAddrMode The address mode.
2114 * @param cbTransfer Size of transfer unit
2115 */
2116VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2117 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2118{
2119 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2120
2121 /*
2122 * We do not support REPNE or decrementing destination
2123 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2124 */
2125 if ( (uPrefix & DISPREFIX_REPNE)
2126 || pRegFrame->eflags.Bits.u1DF)
2127 return VINF_EM_RAW_EMULATE_INSTR;
2128
2129 /*
2130 * Get bytes/words/dwords count to transfer.
2131 */
2132 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2133 RTGCUINTREG cTransfers = 1;
2134 if (uPrefix & DISPREFIX_REP)
2135 {
2136#ifndef IN_RC
2137 if ( CPUMIsGuestIn64BitCode(pVCpu)
2138 && pRegFrame->rcx >= _4G)
2139 return VINF_EM_RAW_EMULATE_INSTR;
2140#endif
2141 cTransfers = pRegFrame->rcx & fAddrMask;
2142 if (!cTransfers)
2143 return VINF_SUCCESS;
2144 }
2145
2146 /* Convert destination address es:edi. */
2147 RTGCPTR GCPtrDst;
2148 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2149 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2150 &GCPtrDst);
2151 if (RT_FAILURE(rc2))
2152 {
2153 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2154 return VINF_EM_RAW_EMULATE_INSTR;
2155 }
2156
2157 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2158 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2159 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2160 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2161 if (rc2 != VINF_SUCCESS)
2162 {
2163 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2164 return VINF_EM_RAW_EMULATE_INSTR;
2165 }
2166
2167 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2168 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2169 if (cTransfers > 1)
2170 {
2171 /* If the device supports string transfers, ask it to do as
2172 * much as it wants. The rest is done with single-word transfers. */
2173 const RTGCUINTREG cTransfersOrg = cTransfers;
2174 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, &GCPtrDst, &cTransfers, cbTransfer);
2175 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2176 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2177 | (pRegFrame->rdi & ~fAddrMask);
2178 }
2179
2180#ifdef IN_RC
2181 MMGCRamRegisterTrapHandler(pVM);
2182#endif
2183 while (cTransfers && rcStrict == VINF_SUCCESS)
2184 {
2185 uint32_t u32Value;
2186 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2187 if (!IOM_SUCCESS(rcStrict))
2188 break;
2189 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2190 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2191 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2192 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2193 | (pRegFrame->rdi & ~fAddrMask);
2194 cTransfers--;
2195 }
2196#ifdef IN_RC
2197 MMGCRamDeregisterTrapHandler(pVM);
2198#endif
2199
2200 /* Update rcx on exit. */
2201 if (uPrefix & DISPREFIX_REP)
2202 pRegFrame->rcx = (cTransfers & fAddrMask)
2203 | (pRegFrame->rcx & ~fAddrMask);
2204
2205 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2206 return rcStrict;
2207}
2208
2209
2210#if !defined(VBOX_WITH_FIRST_IEM_STEP) || defined(IN_RC) /* Latter for IOMRCIOPortHandler */
2211/**
2212 * [REP*] INSB/INSW/INSD
2213 * ES:EDI,DX[,ECX]
2214 *
2215 * @returns Strict VBox status code. Informational status codes other than the one documented
2216 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2217 * @retval VINF_SUCCESS Success.
2218 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2219 * status code must be passed on to EM.
2220 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2221 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2222 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2223 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2224 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2225 *
2226 * @param pVM The virtual machine.
2227 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2228 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2229 * @param pCpu Disassembler CPU state.
2230 */
2231VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2232{
2233 /*
2234 * Get port number directly from the register (no need to bother the
2235 * disassembler). And get the I/O register size from the opcode / prefix.
2236 */
2237 RTIOPORT Port = pRegFrame->edx & 0xffff;
2238 unsigned cb = 0;
2239 if (pCpu->pCurInstr->uOpcode == OP_INSB)
2240 cb = 1;
2241 else
2242 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2243
2244 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2245 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2246 {
2247 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2248 return rcStrict;
2249 }
2250
2251 return IOMInterpretINSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2252}
2253#endif /* !IEM || RC */
2254
2255
2256/**
2257 * [REP*] OUTSB/OUTSW/OUTSD
2258 * DS:ESI,DX[,ECX]
2259 *
2260 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2261 *
2262 * @returns Strict VBox status code. Informational status codes other than the one documented
2263 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2264 * @retval VINF_SUCCESS Success.
2265 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2266 * status code must be passed on to EM.
2267 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2268 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2269 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2270 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2271 *
2272 * @param pVM The virtual machine.
2273 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2274 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2275 * @param uPort IO Port
2276 * @param uPrefix IO instruction prefix
2277 * @param enmAddrMode The address mode.
2278 * @param cbTransfer Size of transfer unit
2279 */
2280VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2281 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2282{
2283 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2284
2285 /*
2286 * We do not support segment prefixes, REPNE or
2287 * decrementing source pointer.
2288 */
2289 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2290 || pRegFrame->eflags.Bits.u1DF)
2291 return VINF_EM_RAW_EMULATE_INSTR;
2292
2293 /*
2294 * Get bytes/words/dwords count to transfer.
2295 */
2296 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2297 RTGCUINTREG cTransfers = 1;
2298 if (uPrefix & DISPREFIX_REP)
2299 {
2300#ifndef IN_RC
2301 if ( CPUMIsGuestIn64BitCode(pVCpu)
2302 && pRegFrame->rcx >= _4G)
2303 return VINF_EM_RAW_EMULATE_INSTR;
2304#endif
2305 cTransfers = pRegFrame->rcx & fAddrMask;
2306 if (!cTransfers)
2307 return VINF_SUCCESS;
2308 }
2309
2310 /* Convert source address ds:esi. */
2311 RTGCPTR GCPtrSrc;
2312 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2313 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2314 &GCPtrSrc);
2315 if (RT_FAILURE(rc2))
2316 {
2317 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2318 return VINF_EM_RAW_EMULATE_INSTR;
2319 }
2320
2321 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2322 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2323 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2324 (cpl == 3) ? X86_PTE_US : 0);
2325 if (rc2 != VINF_SUCCESS)
2326 {
2327 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2328 return VINF_EM_RAW_EMULATE_INSTR;
2329 }
2330
2331 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2332 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2333 if (cTransfers > 1)
2334 {
2335 /*
2336 * If the device supports string transfers, ask it to do as
2337 * much as it wants. The rest is done with single-word transfers.
2338 */
2339 const RTGCUINTREG cTransfersOrg = cTransfers;
2340 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
2341 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2342 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2343 | (pRegFrame->rsi & ~fAddrMask);
2344 }
2345
2346#ifdef IN_RC
2347 MMGCRamRegisterTrapHandler(pVM);
2348#endif
2349
2350 while (cTransfers && rcStrict == VINF_SUCCESS)
2351 {
2352 uint32_t u32Value = 0;
2353 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2354 if (rcStrict != VINF_SUCCESS)
2355 break;
2356 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2357 if (!IOM_SUCCESS(rcStrict))
2358 break;
2359 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2360 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2361 | (pRegFrame->rsi & ~fAddrMask);
2362 cTransfers--;
2363 }
2364
2365#ifdef IN_RC
2366 MMGCRamDeregisterTrapHandler(pVM);
2367#endif
2368
2369 /* Update rcx on exit. */
2370 if (uPrefix & DISPREFIX_REP)
2371 pRegFrame->rcx = (cTransfers & fAddrMask)
2372 | (pRegFrame->rcx & ~fAddrMask);
2373
2374 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2375 return rcStrict;
2376}
2377
2378
2379#if !defined(VBOX_WITH_FIRST_IEM_STEP) || defined(IN_RC) /* Latter for IOMRCIOPortHandler */
2380/**
2381 * [REP*] OUTSB/OUTSW/OUTSD
2382 * DS:ESI,DX[,ECX]
2383 *
2384 * @returns Strict VBox status code. Informational status codes other than the one documented
2385 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2386 * @retval VINF_SUCCESS Success.
2387 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2388 * status code must be passed on to EM.
2389 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2390 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
2391 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2392 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2393 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2394 *
2395 * @param pVM The virtual machine.
2396 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2397 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2398 * @param pCpu Disassembler CPU state.
2399 */
2400VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2401{
2402 /*
2403 * Get port number from the first parameter.
2404 * And get the I/O register size from the opcode / prefix.
2405 */
2406 uint64_t Port = 0;
2407 unsigned cb = 0;
2408 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &Port, &cb);
2409 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
2410 if (pCpu->pCurInstr->uOpcode == OP_OUTSB)
2411 cb = 1;
2412 else
2413 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2414
2415 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2416 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2417 {
2418 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2419 return rcStrict;
2420 }
2421
2422 return IOMInterpretOUTSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2423}
2424#endif /* !IEM || RC */
2425
2426#ifndef IN_RC
2427
2428/**
2429 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2430 *
2431 * (This is a special optimization used by the VGA device.)
2432 *
2433 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2434 * remapping is made,.
2435 *
2436 * @param pVM The virtual machine.
2437 * @param GCPhys The address of the MMIO page to be changed.
2438 * @param GCPhysRemapped The address of the MMIO2 page.
2439 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2440 * for the time being.
2441 */
2442VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2443{
2444# ifndef IEM_VERIFICATION_MODE_FULL
2445 /* Currently only called from the VGA device during MMIO. */
2446 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2447 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2448 PVMCPU pVCpu = VMMGetCpu(pVM);
2449
2450 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2451 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2452 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2453 && !HMIsNestedPagingActive(pVM)))
2454 return VINF_SUCCESS; /* ignore */
2455
2456 int rc = IOM_LOCK_SHARED(pVM);
2457 if (RT_FAILURE(rc))
2458 return VINF_SUCCESS; /* better luck the next time around */
2459
2460 /*
2461 * Lookup the context range node the page belongs to.
2462 */
2463 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2464 AssertMsgReturn(pRange,
2465 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2466
2467 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2468 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2469
2470 /*
2471 * Do the aliasing; page align the addresses since PGM is picky.
2472 */
2473 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2474 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2475
2476 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2477
2478 IOM_UNLOCK_SHARED(pVM);
2479 AssertRCReturn(rc, rc);
2480
2481 /*
2482 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2483 * can simply prefetch it.
2484 *
2485 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2486 */
2487# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2488# ifdef VBOX_STRICT
2489 uint64_t fFlags;
2490 RTHCPHYS HCPhys;
2491 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2492 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2493# endif
2494# endif
2495 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2496 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2497# endif /* !IEM_VERIFICATION_MODE_FULL */
2498 return VINF_SUCCESS;
2499}
2500
2501
2502# ifndef IEM_VERIFICATION_MODE_FULL
2503/**
2504 * Mapping a HC page in place of an MMIO page for direct access.
2505 *
2506 * (This is a special optimization used by the APIC in the VT-x case.)
2507 *
2508 * @returns VBox status code.
2509 *
2510 * @param pVM Pointer to the VM.
2511 * @param pVCpu Pointer to the VMCPU.
2512 * @param GCPhys The address of the MMIO page to be changed.
2513 * @param HCPhys The address of the host physical page.
2514 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2515 * for the time being.
2516 */
2517VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2518{
2519 /* Currently only called from VT-x code during a page fault. */
2520 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2521
2522 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2523 Assert(HMIsEnabled(pVM));
2524
2525 /*
2526 * Lookup the context range node the page belongs to.
2527 */
2528#ifdef VBOX_STRICT
2529 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2530 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2531 AssertMsgReturn(pRange,
2532 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2533 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2534 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2535#endif
2536
2537 /*
2538 * Do the aliasing; page align the addresses since PGM is picky.
2539 */
2540 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2541 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2542
2543 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2544 AssertRCReturn(rc, rc);
2545
2546 /*
2547 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2548 * can simply prefetch it.
2549 *
2550 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2551 */
2552 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2553 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2554 return VINF_SUCCESS;
2555}
2556#endif /* !IEM_VERIFICATION_MODE_FULL */
2557
2558
2559/**
2560 * Reset a previously modified MMIO region; restore the access flags.
2561 *
2562 * @returns VBox status code.
2563 *
2564 * @param pVM The virtual machine.
2565 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2566 */
2567VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2568{
2569 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2570
2571 PVMCPU pVCpu = VMMGetCpu(pVM);
2572
2573 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2574 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2575 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2576 && !HMIsNestedPagingActive(pVM)))
2577 return VINF_SUCCESS; /* ignore */
2578
2579 /*
2580 * Lookup the context range node the page belongs to.
2581 */
2582#ifdef VBOX_STRICT
2583 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2584 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2585 AssertMsgReturn(pRange,
2586 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2587 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2588 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2589#endif
2590
2591 /*
2592 * Call PGM to do the job work.
2593 *
2594 * After the call, all the pages should be non-present... unless there is
2595 * a page pool flush pending (unlikely).
2596 */
2597 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2598 AssertRC(rc);
2599
2600#ifdef VBOX_STRICT
2601 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2602 {
2603 uint32_t cb = pRange->cb;
2604 GCPhys = pRange->GCPhys;
2605 while (cb)
2606 {
2607 uint64_t fFlags;
2608 RTHCPHYS HCPhys;
2609 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2610 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2611 cb -= PAGE_SIZE;
2612 GCPhys += PAGE_SIZE;
2613 }
2614 }
2615#endif
2616 return rc;
2617}
2618
2619#endif /* !IN_RC */
2620
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette