VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 56417

Last change on this file since 56417 was 56417, checked in by vboxsync, 9 years ago

Fixed bug in IOMInterpretINSEx and IOMInterpretOUTSEx introduced with r101015.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 96.4 KB
Line 
1/* $Id: IOMAllMMIO.cpp 56417 2015-06-14 14:25:31Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*******************************************************************************
50* Global Variables *
51*******************************************************************************/
52
53/**
54 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
55 */
56static const unsigned g_aSize2Shift[] =
57{
58 ~0U, /* 0 - invalid */
59 0, /* *1 == 2^0 */
60 1, /* *2 == 2^1 */
61 ~0U, /* 3 - invalid */
62 2, /* *4 == 2^2 */
63 ~0U, /* 5 - invalid */
64 ~0U, /* 6 - invalid */
65 ~0U, /* 7 - invalid */
66 3 /* *8 == 2^3 */
67};
68
69/**
70 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
71 */
72#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
73
74
75/**
76 * Returns the contents of register or immediate data of instruction's parameter.
77 *
78 * @returns true on success.
79 *
80 * @todo Get rid of this code. Use DISQueryParamVal instead
81 *
82 * @param pCpu Pointer to current disassembler context.
83 * @param pParam Pointer to parameter of instruction to process.
84 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
85 * @param pu64Data Where to store retrieved data.
86 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
87 */
88bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
89{
90 NOREF(pCpu);
91 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
92 {
93 *pcbSize = 0;
94 *pu64Data = 0;
95 return false;
96 }
97
98 /* divide and conquer */
99 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
100 {
101 if (pParam->fUse & DISUSE_REG_GEN32)
102 {
103 *pcbSize = 4;
104 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
105 return true;
106 }
107
108 if (pParam->fUse & DISUSE_REG_GEN16)
109 {
110 *pcbSize = 2;
111 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
112 return true;
113 }
114
115 if (pParam->fUse & DISUSE_REG_GEN8)
116 {
117 *pcbSize = 1;
118 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
119 return true;
120 }
121
122 Assert(pParam->fUse & DISUSE_REG_GEN64);
123 *pcbSize = 8;
124 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
125 return true;
126 }
127 else
128 {
129 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
130 {
131 *pcbSize = 8;
132 *pu64Data = pParam->uValue;
133 return true;
134 }
135
136 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
137 {
138 *pcbSize = 4;
139 *pu64Data = (uint32_t)pParam->uValue;
140 return true;
141 }
142
143 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
144 {
145 *pcbSize = 2;
146 *pu64Data = (uint16_t)pParam->uValue;
147 return true;
148 }
149
150 if (pParam->fUse & DISUSE_IMMEDIATE8)
151 {
152 *pcbSize = 1;
153 *pu64Data = (uint8_t)pParam->uValue;
154 return true;
155 }
156
157 if (pParam->fUse & DISUSE_REG_SEG)
158 {
159 *pcbSize = 2;
160 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
161 return true;
162 } /* Else - error. */
163
164 AssertFailed();
165 *pcbSize = 0;
166 *pu64Data = 0;
167 return false;
168 }
169}
170
171
172/**
173 * Saves data to 8/16/32 general purpose or segment register defined by
174 * instruction's parameter.
175 *
176 * @returns true on success.
177 * @param pCpu Pointer to current disassembler context.
178 * @param pParam Pointer to parameter of instruction to process.
179 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
180 * @param u64Data 8/16/32/64 bit data to store.
181 */
182bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
183{
184 NOREF(pCpu);
185 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
186 {
187 return false;
188 }
189
190 if (pParam->fUse & DISUSE_REG_GEN32)
191 {
192 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
193 return true;
194 }
195
196 if (pParam->fUse & DISUSE_REG_GEN64)
197 {
198 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
199 return true;
200 }
201
202 if (pParam->fUse & DISUSE_REG_GEN16)
203 {
204 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
205 return true;
206 }
207
208 if (pParam->fUse & DISUSE_REG_GEN8)
209 {
210 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
211 return true;
212 }
213
214 if (pParam->fUse & DISUSE_REG_SEG)
215 {
216 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
217 return true;
218 }
219
220 /* Else - error. */
221 return false;
222}
223
224
225/**
226 * Deals with complicated MMIO writes.
227 *
228 * Complicated means unaligned or non-dword/qword sized accesses depending on
229 * the MMIO region's access mode flags.
230 *
231 * @returns Strict VBox status code. Any EM scheduling status code,
232 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
233 * VINF_IOM_R3_MMIO_READ may be returned.
234 *
235 * @param pVM Pointer to the VM.
236 * @param pRange The range to write to.
237 * @param GCPhys The physical address to start writing.
238 * @param pvValue Where to store the value.
239 * @param cbValue The size of the value to write.
240 */
241static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
242{
243 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
244 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
245 VERR_IOM_MMIO_IPE_1);
246 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
247 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
248 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
249 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
250
251 /*
252 * Do debug stop if requested.
253 */
254 int rc = VINF_SUCCESS; NOREF(pVM);
255#ifdef VBOX_STRICT
256 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
257 {
258# ifdef IN_RING3
259 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
260 R3STRING(pRange->pszDesc)));
261 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
262 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
263 if (rc == VERR_DBGF_NOT_ATTACHED)
264 rc = VINF_SUCCESS;
265# else
266 return VINF_IOM_R3_MMIO_WRITE;
267# endif
268 }
269#endif
270
271 /*
272 * Check if we should ignore the write.
273 */
274 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
275 {
276 Assert(cbValue != 4 || (GCPhys & 3));
277 return VINF_SUCCESS;
278 }
279 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
280 {
281 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
282 return VINF_SUCCESS;
283 }
284
285 /*
286 * Split and conquer.
287 */
288 for (;;)
289 {
290 unsigned const offAccess = GCPhys & 3;
291 unsigned cbThisPart = 4 - offAccess;
292 if (cbThisPart > cbValue)
293 cbThisPart = cbValue;
294
295 /*
296 * Get the missing bits (if any).
297 */
298 uint32_t u32MissingValue = 0;
299 if (fReadMissing && cbThisPart != 4)
300 {
301 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
302 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
303 switch (rc2)
304 {
305 case VINF_SUCCESS:
306 break;
307 case VINF_IOM_MMIO_UNUSED_FF:
308 u32MissingValue = UINT32_C(0xffffffff);
309 break;
310 case VINF_IOM_MMIO_UNUSED_00:
311 u32MissingValue = 0;
312 break;
313 case VINF_IOM_R3_MMIO_READ:
314 case VINF_IOM_R3_MMIO_READ_WRITE:
315 case VINF_IOM_R3_MMIO_WRITE:
316 /** @todo What if we've split a transfer and already read
317 * something? Since writes generally have sideeffects we
318 * could be kind of screwed here...
319 *
320 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
321 * to REM for MMIO accesses (like may currently do). */
322
323 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
324 return rc2;
325 default:
326 if (RT_FAILURE(rc2))
327 {
328 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
329 return rc2;
330 }
331 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
332 if (rc == VINF_SUCCESS || rc2 < rc)
333 rc = rc2;
334 break;
335 }
336 }
337
338 /*
339 * Merge missing and given bits.
340 */
341 uint32_t u32GivenMask;
342 uint32_t u32GivenValue;
343 switch (cbThisPart)
344 {
345 case 1:
346 u32GivenValue = *(uint8_t const *)pvValue;
347 u32GivenMask = UINT32_C(0x000000ff);
348 break;
349 case 2:
350 u32GivenValue = *(uint16_t const *)pvValue;
351 u32GivenMask = UINT32_C(0x0000ffff);
352 break;
353 case 3:
354 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
355 ((uint8_t const *)pvValue)[2], 0);
356 u32GivenMask = UINT32_C(0x00ffffff);
357 break;
358 case 4:
359 u32GivenValue = *(uint32_t const *)pvValue;
360 u32GivenMask = UINT32_C(0xffffffff);
361 break;
362 default:
363 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
364 }
365 if (offAccess)
366 {
367 u32GivenValue <<= offAccess * 8;
368 u32GivenMask <<= offAccess * 8;
369 }
370
371 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
372 | (u32GivenValue & u32GivenMask);
373
374 /*
375 * Do DWORD write to the device.
376 */
377 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
378 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
379 switch (rc2)
380 {
381 case VINF_SUCCESS:
382 break;
383 case VINF_IOM_R3_MMIO_READ:
384 case VINF_IOM_R3_MMIO_READ_WRITE:
385 case VINF_IOM_R3_MMIO_WRITE:
386 /** @todo What if we've split a transfer and already read
387 * something? Since reads can have sideeffects we could be
388 * kind of screwed here...
389 *
390 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
391 * to REM for MMIO accesses (like may currently do). */
392 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
393 return rc2;
394 default:
395 if (RT_FAILURE(rc2))
396 {
397 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
398 return rc2;
399 }
400 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
401 if (rc == VINF_SUCCESS || rc2 < rc)
402 rc = rc2;
403 break;
404 }
405
406 /*
407 * Advance.
408 */
409 cbValue -= cbThisPart;
410 if (!cbValue)
411 break;
412 GCPhys += cbThisPart;
413 pvValue = (uint8_t const *)pvValue + cbThisPart;
414 }
415
416 return rc;
417}
418
419
420
421
422/**
423 * Wrapper which does the write and updates range statistics when such are enabled.
424 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
425 */
426static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
427 const void *pvData, unsigned cb)
428{
429#ifdef VBOX_WITH_STATISTICS
430 int rcSem = IOM_LOCK_SHARED(pVM);
431 if (rcSem == VERR_SEM_BUSY)
432 return VINF_IOM_R3_MMIO_WRITE;
433 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
434 if (!pStats)
435# ifdef IN_RING3
436 return VERR_NO_MEMORY;
437# else
438 return VINF_IOM_R3_MMIO_WRITE;
439# endif
440 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
441#endif
442
443 VBOXSTRICTRC rcStrict;
444 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
445 {
446 if ( (cb == 4 && !(GCPhysFault & 3))
447 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
448 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
449 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
450 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
451 else
452 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
453 }
454 else
455 rcStrict = VINF_SUCCESS;
456
457 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
458 STAM_COUNTER_INC(&pStats->Accesses);
459 return rcStrict;
460}
461
462
463/**
464 * Deals with complicated MMIO reads.
465 *
466 * Complicated means unaligned or non-dword/qword sized accesses depending on
467 * the MMIO region's access mode flags.
468 *
469 * @returns Strict VBox status code. Any EM scheduling status code,
470 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
471 * VINF_IOM_R3_MMIO_WRITE may be returned.
472 *
473 * @param pVM Pointer to the VM.
474 * @param pRange The range to read from.
475 * @param GCPhys The physical address to start reading.
476 * @param pvValue Where to store the value.
477 * @param cbValue The size of the value to read.
478 */
479static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
480{
481 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
482 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
483 VERR_IOM_MMIO_IPE_1);
484 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
485 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
486
487 /*
488 * Do debug stop if requested.
489 */
490 int rc = VINF_SUCCESS; NOREF(pVM);
491#ifdef VBOX_STRICT
492 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
493 {
494# ifdef IN_RING3
495 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
496 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
497 if (rc == VERR_DBGF_NOT_ATTACHED)
498 rc = VINF_SUCCESS;
499# else
500 return VINF_IOM_R3_MMIO_READ;
501# endif
502 }
503#endif
504
505 /*
506 * Split and conquer.
507 */
508 for (;;)
509 {
510 /*
511 * Do DWORD read from the device.
512 */
513 uint32_t u32Value;
514 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
515 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
516 switch (rc2)
517 {
518 case VINF_SUCCESS:
519 break;
520 case VINF_IOM_MMIO_UNUSED_FF:
521 u32Value = UINT32_C(0xffffffff);
522 break;
523 case VINF_IOM_MMIO_UNUSED_00:
524 u32Value = 0;
525 break;
526 case VINF_IOM_R3_MMIO_READ:
527 case VINF_IOM_R3_MMIO_READ_WRITE:
528 case VINF_IOM_R3_MMIO_WRITE:
529 /** @todo What if we've split a transfer and already read
530 * something? Since reads can have sideeffects we could be
531 * kind of screwed here... */
532 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
533 return rc2;
534 default:
535 if (RT_FAILURE(rc2))
536 {
537 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
538 return rc2;
539 }
540 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
541 if (rc == VINF_SUCCESS || rc2 < rc)
542 rc = rc2;
543 break;
544 }
545 u32Value >>= (GCPhys & 3) * 8;
546
547 /*
548 * Write what we've read.
549 */
550 unsigned cbThisPart = 4 - (GCPhys & 3);
551 if (cbThisPart > cbValue)
552 cbThisPart = cbValue;
553
554 switch (cbThisPart)
555 {
556 case 1:
557 *(uint8_t *)pvValue = (uint8_t)u32Value;
558 break;
559 case 2:
560 *(uint16_t *)pvValue = (uint16_t)u32Value;
561 break;
562 case 3:
563 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
564 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
565 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
566 break;
567 case 4:
568 *(uint32_t *)pvValue = u32Value;
569 break;
570 }
571
572 /*
573 * Advance.
574 */
575 cbValue -= cbThisPart;
576 if (!cbValue)
577 break;
578 GCPhys += cbThisPart;
579 pvValue = (uint8_t *)pvValue + cbThisPart;
580 }
581
582 return rc;
583}
584
585
586/**
587 * Implements VINF_IOM_MMIO_UNUSED_FF.
588 *
589 * @returns VINF_SUCCESS.
590 * @param pvValue Where to store the zeros.
591 * @param cbValue How many bytes to read.
592 */
593static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
594{
595 switch (cbValue)
596 {
597 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
598 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
599 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
600 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
601 default:
602 {
603 uint8_t *pb = (uint8_t *)pvValue;
604 while (cbValue--)
605 *pb++ = UINT8_C(0xff);
606 break;
607 }
608 }
609 return VINF_SUCCESS;
610}
611
612
613/**
614 * Implements VINF_IOM_MMIO_UNUSED_00.
615 *
616 * @returns VINF_SUCCESS.
617 * @param pvValue Where to store the zeros.
618 * @param cbValue How many bytes to read.
619 */
620static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
621{
622 switch (cbValue)
623 {
624 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
625 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
626 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
627 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
628 default:
629 {
630 uint8_t *pb = (uint8_t *)pvValue;
631 while (cbValue--)
632 *pb++ = UINT8_C(0x00);
633 break;
634 }
635 }
636 return VINF_SUCCESS;
637}
638
639
640/**
641 * Wrapper which does the read and updates range statistics when such are enabled.
642 */
643DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
644 void *pvValue, unsigned cbValue)
645{
646#ifdef VBOX_WITH_STATISTICS
647 int rcSem = IOM_LOCK_SHARED(pVM);
648 if (rcSem == VERR_SEM_BUSY)
649 return VINF_IOM_R3_MMIO_READ;
650 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
651 if (!pStats)
652# ifdef IN_RING3
653 return VERR_NO_MEMORY;
654# else
655 return VINF_IOM_R3_MMIO_READ;
656# endif
657 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
658#endif
659
660 VBOXSTRICTRC rcStrict;
661 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
662 {
663 if ( ( cbValue == 4
664 && !(GCPhys & 3))
665 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
666 || ( cbValue == 8
667 && !(GCPhys & 7)
668 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
669 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
670 pvValue, cbValue);
671 else
672 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
673 }
674 else
675 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
676 if (rcStrict != VINF_SUCCESS)
677 {
678 switch (VBOXSTRICTRC_VAL(rcStrict))
679 {
680 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
681 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
682 }
683 }
684
685 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
686 STAM_COUNTER_INC(&pStats->Accesses);
687 return rcStrict;
688}
689
690
691/**
692 * Internal - statistics only.
693 */
694DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
695{
696#ifdef VBOX_WITH_STATISTICS
697 switch (cb)
698 {
699 case 1:
700 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
701 break;
702 case 2:
703 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
704 break;
705 case 4:
706 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
707 break;
708 case 8:
709 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
710 break;
711 default:
712 /* No way. */
713 AssertMsgFailed(("Invalid data length %d\n", cb));
714 break;
715 }
716#else
717 NOREF(pVM); NOREF(cb);
718#endif
719}
720
721
722#ifndef VBOX_WITH_2ND_IEM_STEP
723
724/**
725 * MOV reg, mem (read)
726 * MOVZX reg, mem (read)
727 * MOVSX reg, mem (read)
728 *
729 * @returns VBox status code.
730 *
731 * @param pVM The virtual machine.
732 * @param pVCpu Pointer to the virtual CPU structure of the caller.
733 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
734 * @param pCpu Disassembler CPU state.
735 * @param pRange Pointer MMIO range.
736 * @param GCPhysFault The GC physical address corresponding to pvFault.
737 */
738static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
739 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
740{
741 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
742
743 /*
744 * Get the data size from parameter 2,
745 * and call the handler function to get the data.
746 */
747 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
748 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
749
750 uint64_t u64Data = 0;
751 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
752 if (rc == VINF_SUCCESS)
753 {
754 /*
755 * Do sign extension for MOVSX.
756 */
757 /** @todo checkup MOVSX implementation! */
758 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
759 {
760 if (cb == 1)
761 {
762 /* DWORD <- BYTE */
763 int64_t iData = (int8_t)u64Data;
764 u64Data = (uint64_t)iData;
765 }
766 else
767 {
768 /* DWORD <- WORD */
769 int64_t iData = (int16_t)u64Data;
770 u64Data = (uint64_t)iData;
771 }
772 }
773
774 /*
775 * Store the result to register (parameter 1).
776 */
777 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
778 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
779 }
780
781 if (rc == VINF_SUCCESS)
782 iomMMIOStatLength(pVM, cb);
783 return rc;
784}
785
786
787/**
788 * MOV mem, reg|imm (write)
789 *
790 * @returns VBox status code.
791 *
792 * @param pVM The virtual machine.
793 * @param pVCpu Pointer to the virtual CPU structure of the caller.
794 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
795 * @param pCpu Disassembler CPU state.
796 * @param pRange Pointer MMIO range.
797 * @param GCPhysFault The GC physical address corresponding to pvFault.
798 */
799static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
800 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
801{
802 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
803
804 /*
805 * Get data to write from second parameter,
806 * and call the callback to write it.
807 */
808 unsigned cb = 0;
809 uint64_t u64Data = 0;
810 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
811 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
812
813 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
814 if (rc == VINF_SUCCESS)
815 iomMMIOStatLength(pVM, cb);
816 return rc;
817}
818
819
820/** Wrapper for reading virtual memory. */
821DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
822{
823 /* Note: This will fail in R0 or RC if it hits an access handler. That
824 isn't a problem though since the operation can be restarted in REM. */
825#ifdef IN_RC
826 NOREF(pVCpu);
827 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
828 /* Page may be protected and not directly accessible. */
829 if (rc == VERR_ACCESS_DENIED)
830 rc = VINF_IOM_R3_IOPORT_WRITE;
831 return rc;
832#else
833 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
834#endif
835}
836
837
838/** Wrapper for writing virtual memory. */
839DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
840{
841 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
842 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
843 * as well since we're not behind the pgm lock and handler may change between calls.
844 *
845 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
846 * the state of some shadowed structures. */
847#if defined(IN_RING0) || defined(IN_RC)
848 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
849#else
850 NOREF(pCtxCore);
851 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
852#endif
853}
854
855
856#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
857/**
858 * [REP] MOVSB
859 * [REP] MOVSW
860 * [REP] MOVSD
861 *
862 * Restricted implementation.
863 *
864 *
865 * @returns VBox status code.
866 *
867 * @param pVM The virtual machine.
868 * @param uErrorCode CPU Error code.
869 * @param pRegFrame Trap register frame.
870 * @param GCPhysFault The GC physical address corresponding to pvFault.
871 * @param pCpu Disassembler CPU state.
872 * @param pRange Pointer MMIO range.
873 * @param ppStat Which sub-sample to attribute this call to.
874 */
875static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
876 PSTAMPROFILE *ppStat)
877{
878 /*
879 * We do not support segment prefixes or REPNE.
880 */
881 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
882 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
883
884 PVMCPU pVCpu = VMMGetCpu(pVM);
885
886 /*
887 * Get bytes/words/dwords/qword count to copy.
888 */
889 uint32_t cTransfers = 1;
890 if (pCpu->fPrefix & DISPREFIX_REP)
891 {
892#ifndef IN_RC
893 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
894 && pRegFrame->rcx >= _4G)
895 return VINF_EM_RAW_EMULATE_INSTR;
896#endif
897
898 cTransfers = pRegFrame->ecx;
899 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
900 cTransfers &= 0xffff;
901
902 if (!cTransfers)
903 return VINF_SUCCESS;
904 }
905
906 /* Get the current privilege level. */
907 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
908
909 /*
910 * Get data size.
911 */
912 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
913 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
914 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
915
916#ifdef VBOX_WITH_STATISTICS
917 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
918 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
919#endif
920
921/** @todo re-evaluate on page boundaries. */
922
923 RTGCPHYS Phys = GCPhysFault;
924 int rc;
925 if (fWriteAccess)
926 {
927 /*
928 * Write operation: [Mem] -> [MMIO]
929 * ds:esi (Virt Src) -> es:edi (Phys Dst)
930 */
931 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
932
933 /* Check callback. */
934 if (!pRange->CTX_SUFF(pfnWriteCallback))
935 return VINF_IOM_R3_MMIO_WRITE;
936
937 /* Convert source address ds:esi. */
938 RTGCUINTPTR pu8Virt;
939 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
940 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
941 (PRTGCPTR)&pu8Virt);
942 if (RT_SUCCESS(rc))
943 {
944
945 /* Access verification first; we currently can't recover properly from traps inside this instruction */
946 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
947 if (rc != VINF_SUCCESS)
948 {
949 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
950 return VINF_EM_RAW_EMULATE_INSTR;
951 }
952
953#ifdef IN_RC
954 MMGCRamRegisterTrapHandler(pVM);
955#endif
956
957 /* copy loop. */
958 while (cTransfers)
959 {
960 uint32_t u32Data = 0;
961 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
962 if (rc != VINF_SUCCESS)
963 break;
964 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
965 if (rc != VINF_SUCCESS)
966 break;
967
968 pu8Virt += offIncrement;
969 Phys += offIncrement;
970 pRegFrame->rsi += offIncrement;
971 pRegFrame->rdi += offIncrement;
972 cTransfers--;
973 }
974#ifdef IN_RC
975 MMGCRamDeregisterTrapHandler(pVM);
976#endif
977 /* Update ecx. */
978 if (pCpu->fPrefix & DISPREFIX_REP)
979 pRegFrame->ecx = cTransfers;
980 }
981 else
982 rc = VINF_IOM_R3_MMIO_READ_WRITE;
983 }
984 else
985 {
986 /*
987 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
988 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
989 */
990 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
991
992 /* Check callback. */
993 if (!pRange->CTX_SUFF(pfnReadCallback))
994 return VINF_IOM_R3_MMIO_READ;
995
996 /* Convert destination address. */
997 RTGCUINTPTR pu8Virt;
998 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
999 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1000 (RTGCPTR *)&pu8Virt);
1001 if (RT_FAILURE(rc))
1002 return VINF_IOM_R3_MMIO_READ;
1003
1004 /* Check if destination address is MMIO. */
1005 PIOMMMIORANGE pMMIODst;
1006 RTGCPHYS PhysDst;
1007 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1008 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1009 if ( RT_SUCCESS(rc)
1010 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1011 {
1012 /** @todo implement per-device locks for MMIO access. */
1013 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1014
1015 /*
1016 * Extra: [MMIO] -> [MMIO]
1017 */
1018 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1019 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1020 {
1021 iomMmioReleaseRange(pVM, pRange);
1022 return VINF_IOM_R3_MMIO_READ_WRITE;
1023 }
1024
1025 /* copy loop. */
1026 while (cTransfers)
1027 {
1028 uint32_t u32Data;
1029 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1030 if (rc != VINF_SUCCESS)
1031 break;
1032 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1033 if (rc != VINF_SUCCESS)
1034 break;
1035
1036 Phys += offIncrement;
1037 PhysDst += offIncrement;
1038 pRegFrame->rsi += offIncrement;
1039 pRegFrame->rdi += offIncrement;
1040 cTransfers--;
1041 }
1042 iomMmioReleaseRange(pVM, pRange);
1043 }
1044 else
1045 {
1046 /*
1047 * Normal: [MMIO] -> [Mem]
1048 */
1049 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1050 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1051 if (rc != VINF_SUCCESS)
1052 {
1053 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1054 return VINF_EM_RAW_EMULATE_INSTR;
1055 }
1056
1057 /* copy loop. */
1058#ifdef IN_RC
1059 MMGCRamRegisterTrapHandler(pVM);
1060#endif
1061 while (cTransfers)
1062 {
1063 uint32_t u32Data;
1064 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1065 if (rc != VINF_SUCCESS)
1066 break;
1067 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1068 if (rc != VINF_SUCCESS)
1069 {
1070 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1071 break;
1072 }
1073
1074 pu8Virt += offIncrement;
1075 Phys += offIncrement;
1076 pRegFrame->rsi += offIncrement;
1077 pRegFrame->rdi += offIncrement;
1078 cTransfers--;
1079 }
1080#ifdef IN_RC
1081 MMGCRamDeregisterTrapHandler(pVM);
1082#endif
1083 }
1084
1085 /* Update ecx on exit. */
1086 if (pCpu->fPrefix & DISPREFIX_REP)
1087 pRegFrame->ecx = cTransfers;
1088 }
1089
1090 /* work statistics. */
1091 if (rc == VINF_SUCCESS)
1092 iomMMIOStatLength(pVM, cb);
1093 NOREF(ppStat);
1094 return rc;
1095}
1096#endif /* IOM_WITH_MOVS_SUPPORT */
1097
1098
1099/**
1100 * Gets the address / opcode mask corresponding to the given CPU mode.
1101 *
1102 * @returns Mask.
1103 * @param enmCpuMode CPU mode.
1104 */
1105static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1106{
1107 switch (enmCpuMode)
1108 {
1109 case DISCPUMODE_16BIT: return UINT16_MAX;
1110 case DISCPUMODE_32BIT: return UINT32_MAX;
1111 case DISCPUMODE_64BIT: return UINT64_MAX;
1112 default:
1113 AssertFailedReturn(UINT32_MAX);
1114 }
1115}
1116
1117
1118/**
1119 * [REP] STOSB
1120 * [REP] STOSW
1121 * [REP] STOSD
1122 *
1123 * Restricted implementation.
1124 *
1125 *
1126 * @returns VBox status code.
1127 *
1128 * @param pVM The virtual machine.
1129 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1130 * @param pRegFrame Trap register frame.
1131 * @param GCPhysFault The GC physical address corresponding to pvFault.
1132 * @param pCpu Disassembler CPU state.
1133 * @param pRange Pointer MMIO range.
1134 */
1135static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1136 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1137{
1138 /*
1139 * We do not support segment prefixes or REPNE..
1140 */
1141 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1142 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1143
1144 /*
1145 * Get bytes/words/dwords/qwords count to copy.
1146 */
1147 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1148 RTGCUINTREG cTransfers = 1;
1149 if (pCpu->fPrefix & DISPREFIX_REP)
1150 {
1151#ifndef IN_RC
1152 if ( CPUMIsGuestIn64BitCode(pVCpu)
1153 && pRegFrame->rcx >= _4G)
1154 return VINF_EM_RAW_EMULATE_INSTR;
1155#endif
1156
1157 cTransfers = pRegFrame->rcx & fAddrMask;
1158 if (!cTransfers)
1159 return VINF_SUCCESS;
1160 }
1161
1162/** @todo r=bird: bounds checks! */
1163
1164 /*
1165 * Get data size.
1166 */
1167 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1168 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1169 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1170
1171#ifdef VBOX_WITH_STATISTICS
1172 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1173 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1174#endif
1175
1176
1177 RTGCPHYS Phys = GCPhysFault;
1178 int rc;
1179 if ( pRange->CTX_SUFF(pfnFillCallback)
1180 && cb <= 4 /* can only fill 32-bit values */)
1181 {
1182 /*
1183 * Use the fill callback.
1184 */
1185 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1186 if (offIncrement > 0)
1187 {
1188 /* addr++ variant. */
1189 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1190 pRegFrame->eax, cb, cTransfers);
1191 if (rc == VINF_SUCCESS)
1192 {
1193 /* Update registers. */
1194 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1195 | (pRegFrame->rdi & ~fAddrMask);
1196 if (pCpu->fPrefix & DISPREFIX_REP)
1197 pRegFrame->rcx &= ~fAddrMask;
1198 }
1199 }
1200 else
1201 {
1202 /* addr-- variant. */
1203 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1204 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1205 pRegFrame->eax, cb, cTransfers);
1206 if (rc == VINF_SUCCESS)
1207 {
1208 /* Update registers. */
1209 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1210 | (pRegFrame->rdi & ~fAddrMask);
1211 if (pCpu->fPrefix & DISPREFIX_REP)
1212 pRegFrame->rcx &= ~fAddrMask;
1213 }
1214 }
1215 }
1216 else
1217 {
1218 /*
1219 * Use the write callback.
1220 */
1221 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1222 uint64_t u64Data = pRegFrame->rax;
1223
1224 /* fill loop. */
1225 do
1226 {
1227 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1228 if (rc != VINF_SUCCESS)
1229 break;
1230
1231 Phys += offIncrement;
1232 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1233 | (pRegFrame->rdi & ~fAddrMask);
1234 cTransfers--;
1235 } while (cTransfers);
1236
1237 /* Update rcx on exit. */
1238 if (pCpu->fPrefix & DISPREFIX_REP)
1239 pRegFrame->rcx = (cTransfers & fAddrMask)
1240 | (pRegFrame->rcx & ~fAddrMask);
1241 }
1242
1243 /*
1244 * Work statistics and return.
1245 */
1246 if (rc == VINF_SUCCESS)
1247 iomMMIOStatLength(pVM, cb);
1248 return rc;
1249}
1250
1251
1252/**
1253 * [REP] LODSB
1254 * [REP] LODSW
1255 * [REP] LODSD
1256 *
1257 * Restricted implementation.
1258 *
1259 *
1260 * @returns VBox status code.
1261 *
1262 * @param pVM The virtual machine.
1263 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1264 * @param pRegFrame Trap register frame.
1265 * @param GCPhysFault The GC physical address corresponding to pvFault.
1266 * @param pCpu Disassembler CPU state.
1267 * @param pRange Pointer MMIO range.
1268 */
1269static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1270 PIOMMMIORANGE pRange)
1271{
1272 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1273
1274 /*
1275 * We do not support segment prefixes or REP*.
1276 */
1277 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1278 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1279
1280 /*
1281 * Get data size.
1282 */
1283 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1284 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1285 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1286
1287 /*
1288 * Perform read.
1289 */
1290 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1291 if (rc == VINF_SUCCESS)
1292 {
1293 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1294 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1295 | (pRegFrame->rsi & ~fAddrMask);
1296 }
1297
1298 /*
1299 * Work statistics and return.
1300 */
1301 if (rc == VINF_SUCCESS)
1302 iomMMIOStatLength(pVM, cb);
1303 return rc;
1304}
1305
1306
1307/**
1308 * CMP [MMIO], reg|imm
1309 * CMP reg|imm, [MMIO]
1310 *
1311 * Restricted implementation.
1312 *
1313 *
1314 * @returns VBox status code.
1315 *
1316 * @param pVM The virtual machine.
1317 * @param pRegFrame Trap register frame.
1318 * @param GCPhysFault The GC physical address corresponding to pvFault.
1319 * @param pCpu Disassembler CPU state.
1320 * @param pRange Pointer MMIO range.
1321 */
1322static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1323 PIOMMMIORANGE pRange)
1324{
1325 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1326
1327 /*
1328 * Get the operands.
1329 */
1330 unsigned cb = 0;
1331 uint64_t uData1 = 0;
1332 uint64_t uData2 = 0;
1333 int rc;
1334 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1335 /* cmp reg, [MMIO]. */
1336 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1337 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1338 /* cmp [MMIO], reg|imm. */
1339 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1340 else
1341 {
1342 AssertMsgFailed(("Disassember CMP problem..\n"));
1343 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1344 }
1345
1346 if (rc == VINF_SUCCESS)
1347 {
1348#if HC_ARCH_BITS == 32
1349 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1350 if (cb > 4)
1351 return VINF_IOM_R3_MMIO_READ_WRITE;
1352#endif
1353 /* Emulate CMP and update guest flags. */
1354 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1355 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1356 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1357 iomMMIOStatLength(pVM, cb);
1358 }
1359
1360 return rc;
1361}
1362
1363
1364/**
1365 * AND [MMIO], reg|imm
1366 * AND reg, [MMIO]
1367 * OR [MMIO], reg|imm
1368 * OR reg, [MMIO]
1369 *
1370 * Restricted implementation.
1371 *
1372 *
1373 * @returns VBox status code.
1374 *
1375 * @param pVM The virtual machine.
1376 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1377 * @param pRegFrame Trap register frame.
1378 * @param GCPhysFault The GC physical address corresponding to pvFault.
1379 * @param pCpu Disassembler CPU state.
1380 * @param pRange Pointer MMIO range.
1381 * @param pfnEmulate Instruction emulation function.
1382 */
1383static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1384 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1385{
1386 unsigned cb = 0;
1387 uint64_t uData1 = 0;
1388 uint64_t uData2 = 0;
1389 bool fAndWrite;
1390 int rc;
1391
1392#ifdef LOG_ENABLED
1393 const char *pszInstr;
1394
1395 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1396 pszInstr = "Xor";
1397 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1398 pszInstr = "Or";
1399 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1400 pszInstr = "And";
1401 else
1402 pszInstr = "OrXorAnd??";
1403#endif
1404
1405 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1406 {
1407#if HC_ARCH_BITS == 32
1408 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1409 if (cb > 4)
1410 return VINF_IOM_R3_MMIO_READ_WRITE;
1411#endif
1412 /* and reg, [MMIO]. */
1413 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1414 fAndWrite = false;
1415 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1416 }
1417 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1418 {
1419#if HC_ARCH_BITS == 32
1420 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1421 if (cb > 4)
1422 return VINF_IOM_R3_MMIO_READ_WRITE;
1423#endif
1424 /* and [MMIO], reg|imm. */
1425 fAndWrite = true;
1426 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1427 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1428 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1429 else
1430 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1431 }
1432 else
1433 {
1434 AssertMsgFailed(("Disassember AND problem..\n"));
1435 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1436 }
1437
1438 if (rc == VINF_SUCCESS)
1439 {
1440 /* Emulate AND and update guest flags. */
1441 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1442
1443 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1444
1445 if (fAndWrite)
1446 /* Store result to MMIO. */
1447 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1448 else
1449 {
1450 /* Store result to register. */
1451 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1452 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1453 }
1454 if (rc == VINF_SUCCESS)
1455 {
1456 /* Update guest's eflags and finish. */
1457 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1458 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1459 iomMMIOStatLength(pVM, cb);
1460 }
1461 }
1462
1463 return rc;
1464}
1465
1466
1467/**
1468 * TEST [MMIO], reg|imm
1469 * TEST reg, [MMIO]
1470 *
1471 * Restricted implementation.
1472 *
1473 *
1474 * @returns VBox status code.
1475 *
1476 * @param pVM The virtual machine.
1477 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1478 * @param pRegFrame Trap register frame.
1479 * @param GCPhysFault The GC physical address corresponding to pvFault.
1480 * @param pCpu Disassembler CPU state.
1481 * @param pRange Pointer MMIO range.
1482 */
1483static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1484 PIOMMMIORANGE pRange)
1485{
1486 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1487
1488 unsigned cb = 0;
1489 uint64_t uData1 = 0;
1490 uint64_t uData2 = 0;
1491 int rc;
1492
1493 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1494 {
1495 /* and test, [MMIO]. */
1496 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1497 }
1498 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1499 {
1500 /* test [MMIO], reg|imm. */
1501 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1502 }
1503 else
1504 {
1505 AssertMsgFailed(("Disassember TEST problem..\n"));
1506 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1507 }
1508
1509 if (rc == VINF_SUCCESS)
1510 {
1511#if HC_ARCH_BITS == 32
1512 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1513 if (cb > 4)
1514 return VINF_IOM_R3_MMIO_READ_WRITE;
1515#endif
1516
1517 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1518 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1519 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1520 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1521 iomMMIOStatLength(pVM, cb);
1522 }
1523
1524 return rc;
1525}
1526
1527
1528/**
1529 * BT [MMIO], reg|imm
1530 *
1531 * Restricted implementation.
1532 *
1533 *
1534 * @returns VBox status code.
1535 *
1536 * @param pVM The virtual machine.
1537 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1538 * @param pRegFrame Trap register frame.
1539 * @param GCPhysFault The GC physical address corresponding to pvFault.
1540 * @param pCpu Disassembler CPU state.
1541 * @param pRange Pointer MMIO range.
1542 */
1543static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1544 PIOMMMIORANGE pRange)
1545{
1546 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1547
1548 uint64_t uBit = 0;
1549 uint64_t uData = 0;
1550 unsigned cbIgnored;
1551
1552 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1553 {
1554 AssertMsgFailed(("Disassember BT problem..\n"));
1555 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1556 }
1557 /* The size of the memory operand only matters here. */
1558 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1559
1560 /* bt [MMIO], reg|imm. */
1561 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1562 if (rc == VINF_SUCCESS)
1563 {
1564 /* Find the bit inside the faulting address */
1565 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1566 iomMMIOStatLength(pVM, cbData);
1567 }
1568
1569 return rc;
1570}
1571
1572/**
1573 * XCHG [MMIO], reg
1574 * XCHG reg, [MMIO]
1575 *
1576 * Restricted implementation.
1577 *
1578 *
1579 * @returns VBox status code.
1580 *
1581 * @param pVM The virtual machine.
1582 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1583 * @param pRegFrame Trap register frame.
1584 * @param GCPhysFault The GC physical address corresponding to pvFault.
1585 * @param pCpu Disassembler CPU state.
1586 * @param pRange Pointer MMIO range.
1587 */
1588static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1589 PIOMMMIORANGE pRange)
1590{
1591 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1592 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1593 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1594 return VINF_IOM_R3_MMIO_READ_WRITE;
1595
1596 int rc;
1597 unsigned cb = 0;
1598 uint64_t uData1 = 0;
1599 uint64_t uData2 = 0;
1600 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1601 {
1602 /* xchg reg, [MMIO]. */
1603 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1604 if (rc == VINF_SUCCESS)
1605 {
1606 /* Store result to MMIO. */
1607 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1608
1609 if (rc == VINF_SUCCESS)
1610 {
1611 /* Store result to register. */
1612 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1613 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1614 }
1615 else
1616 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1617 }
1618 else
1619 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1620 }
1621 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1622 {
1623 /* xchg [MMIO], reg. */
1624 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1625 if (rc == VINF_SUCCESS)
1626 {
1627 /* Store result to MMIO. */
1628 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1629 if (rc == VINF_SUCCESS)
1630 {
1631 /* Store result to register. */
1632 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1633 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1634 }
1635 else
1636 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1637 }
1638 else
1639 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1640 }
1641 else
1642 {
1643 AssertMsgFailed(("Disassember XCHG problem..\n"));
1644 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1645 }
1646 return rc;
1647}
1648
1649#endif /* !VBOX_WITH_2ND_IEM_STEP */
1650
1651/**
1652 * \#PF Handler callback for MMIO ranges.
1653 *
1654 * @returns VBox status code (appropriate for GC return).
1655 * @param pVM Pointer to the VM.
1656 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1657 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1658 * any error code (the EPT misconfig hack).
1659 * @param pCtxCore Trap register frame.
1660 * @param GCPhysFault The GC physical address corresponding to pvFault.
1661 * @param pvUser Pointer to the MMIO ring-3 range entry.
1662 */
1663static VBOXSTRICTRC iomMMIOHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1664{
1665 int rc = IOM_LOCK_SHARED(pVM);
1666#ifndef IN_RING3
1667 if (rc == VERR_SEM_BUSY)
1668 return VINF_IOM_R3_MMIO_READ_WRITE;
1669#endif
1670 AssertRC(rc);
1671
1672 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1673 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1674
1675 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1676 Assert(pRange);
1677 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1678 iomMmioRetainRange(pRange);
1679#ifndef VBOX_WITH_STATISTICS
1680 IOM_UNLOCK_SHARED(pVM);
1681
1682#else
1683 /*
1684 * Locate the statistics.
1685 */
1686 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1687 if (!pStats)
1688 {
1689 iomMmioReleaseRange(pVM, pRange);
1690# ifdef IN_RING3
1691 return VERR_NO_MEMORY;
1692# else
1693 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1694 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1695 return VINF_IOM_R3_MMIO_READ_WRITE;
1696# endif
1697 }
1698#endif
1699
1700#ifndef IN_RING3
1701 /*
1702 * Should we defer the request right away? This isn't usually the case, so
1703 * do the simple test first and the try deal with uErrorCode being N/A.
1704 */
1705 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1706 || !pRange->CTX_SUFF(pfnReadCallback))
1707 && ( uErrorCode == UINT32_MAX
1708 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1709 : uErrorCode & X86_TRAP_PF_RW
1710 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1711 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1712 )
1713 )
1714 )
1715 {
1716 if (uErrorCode & X86_TRAP_PF_RW)
1717 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1718 else
1719 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1720
1721 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1722 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1723 iomMmioReleaseRange(pVM, pRange);
1724 return VINF_IOM_R3_MMIO_READ_WRITE;
1725 }
1726#endif /* !IN_RING3 */
1727
1728 /*
1729 * Retain the range and do locking.
1730 */
1731 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1732 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1733 if (rc != VINF_SUCCESS)
1734 {
1735 iomMmioReleaseRange(pVM, pRange);
1736 return rc;
1737 }
1738
1739#ifdef VBOX_WITH_2ND_IEM_STEP
1740
1741 /*
1742 * Let IEM call us back via iomMmioHandler.
1743 */
1744 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1745
1746 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1747 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1748 iomMmioReleaseRange(pVM, pRange);
1749 return rcStrict;
1750
1751#else
1752
1753 /*
1754 * Disassemble the instruction and interpret it.
1755 */
1756 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1757 unsigned cbOp;
1758 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1759 if (RT_FAILURE(rc))
1760 {
1761 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1762 iomMmioReleaseRange(pVM, pRange);
1763 return rc;
1764 }
1765 switch (pDis->pCurInstr->uOpcode)
1766 {
1767 case OP_MOV:
1768 case OP_MOVZX:
1769 case OP_MOVSX:
1770 {
1771 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1772 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1773 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1774 ? uErrorCode & X86_TRAP_PF_RW
1775 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1776 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1777 else
1778 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1779 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1780 break;
1781 }
1782
1783
1784# ifdef IOM_WITH_MOVS_SUPPORT
1785 case OP_MOVSB:
1786 case OP_MOVSWD:
1787 {
1788 if (uErrorCode == UINT32_MAX)
1789 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1790 else
1791 {
1792 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1793 PSTAMPROFILE pStat = NULL;
1794 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1795 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1796 }
1797 break;
1798 }
1799# endif
1800
1801 case OP_STOSB:
1802 case OP_STOSWD:
1803 Assert(uErrorCode & X86_TRAP_PF_RW);
1804 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1805 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1806 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1807 break;
1808
1809 case OP_LODSB:
1810 case OP_LODSWD:
1811 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1812 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1813 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1814 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1815 break;
1816
1817 case OP_CMP:
1818 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1819 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1820 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1821 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1822 break;
1823
1824 case OP_AND:
1825 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1826 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1827 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1828 break;
1829
1830 case OP_OR:
1831 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1832 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1833 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1834 break;
1835
1836 case OP_XOR:
1837 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1838 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1839 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1840 break;
1841
1842 case OP_TEST:
1843 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1844 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1845 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1846 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1847 break;
1848
1849 case OP_BT:
1850 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1851 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1852 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1853 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1854 break;
1855
1856 case OP_XCHG:
1857 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1858 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1859 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1860 break;
1861
1862
1863 /*
1864 * The instruction isn't supported. Hand it on to ring-3.
1865 */
1866 default:
1867 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1868 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1869 break;
1870 }
1871
1872 /*
1873 * On success advance EIP.
1874 */
1875 if (rc == VINF_SUCCESS)
1876 pCtxCore->rip += cbOp;
1877 else
1878 {
1879 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1880# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1881 switch (rc)
1882 {
1883 case VINF_IOM_R3_MMIO_READ:
1884 case VINF_IOM_R3_MMIO_READ_WRITE:
1885 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1886 break;
1887 case VINF_IOM_R3_MMIO_WRITE:
1888 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1889 break;
1890 }
1891# endif
1892 }
1893
1894 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1895 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1896 iomMmioReleaseRange(pVM, pRange);
1897 return rc;
1898#endif /* !VBOX_WITH_2ND_IEM_STEP */
1899}
1900
1901
1902/**
1903 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1904 * \#PF access handler callback for MMIO pages.}
1905 *
1906 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1907 */
1908DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1909 RTGCPHYS GCPhysFault, void *pvUser)
1910{
1911 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1912 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1913 return iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1914}
1915
1916
1917/**
1918 * Physical access handler for MMIO ranges.
1919 *
1920 * @returns VBox status code (appropriate for GC return).
1921 * @param pVM Pointer to the VM.
1922 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1923 * @param uErrorCode CPU Error code.
1924 * @param pCtxCore Trap register frame.
1925 * @param GCPhysFault The GC physical address.
1926 */
1927VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1928{
1929 /*
1930 * We don't have a range here, so look it up before calling the common function.
1931 */
1932 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1933#ifndef IN_RING3
1934 if (rc2 == VERR_SEM_BUSY)
1935 return VINF_IOM_R3_MMIO_READ_WRITE;
1936#endif
1937 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1938 if (RT_UNLIKELY(!pRange))
1939 {
1940 IOM_UNLOCK_SHARED(pVM);
1941 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1942 }
1943 iomMmioRetainRange(pRange);
1944 IOM_UNLOCK_SHARED(pVM);
1945
1946 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1947
1948 iomMmioReleaseRange(pVM, pRange);
1949 return VBOXSTRICTRC_VAL(rcStrict);
1950}
1951
1952
1953/**
1954 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
1955 *
1956 * @remarks The @a pvUser argument points to the MMIO range entry.
1957 */
1958PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
1959 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1960{
1961 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1962 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1963
1964 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1965 AssertPtr(pRange);
1966 NOREF(pvPhys); NOREF(enmOrigin);
1967
1968 /*
1969 * Validate the range.
1970 */
1971 int rc = IOM_LOCK_SHARED(pVM);
1972 AssertRC(rc);
1973 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1974
1975 /*
1976 * Perform locking.
1977 */
1978 iomMmioRetainRange(pRange);
1979 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1980 IOM_UNLOCK_SHARED(pVM);
1981 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1982 if (rcStrict == VINF_SUCCESS)
1983 {
1984 /*
1985 * Perform the access.
1986 */
1987 if (enmAccessType == PGMACCESSTYPE_READ)
1988 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1989 else
1990 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1991
1992 /* Check the return code. */
1993#ifdef IN_RING3
1994 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
1995#else
1996 AssertMsg( rcStrict == VINF_SUCCESS
1997 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
1998 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
1999 || rcStrict == VINF_EM_DBG_STOP
2000 || rcStrict == VINF_EM_DBG_BREAKPOINT
2001 || rcStrict == VINF_EM_OFF
2002 || rcStrict == VINF_EM_SUSPEND
2003 || rcStrict == VINF_EM_RESET
2004 //|| rcStrict == VINF_EM_HALT /* ?? */
2005 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2006 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2007#endif
2008
2009 iomMmioReleaseRange(pVM, pRange);
2010 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2011 }
2012 else
2013 iomMmioReleaseRange(pVM, pRange);
2014 return rcStrict;
2015}
2016
2017
2018#ifdef IN_RING3 /* Only used by REM. */
2019
2020/**
2021 * Reads a MMIO register.
2022 *
2023 * @returns VBox status code.
2024 *
2025 * @param pVM Pointer to the VM.
2026 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2027 * @param GCPhys The physical address to read.
2028 * @param pu32Value Where to store the value read.
2029 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2030 */
2031VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2032{
2033 /* Take the IOM lock before performing any MMIO. */
2034 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2035#ifndef IN_RING3
2036 if (rc == VERR_SEM_BUSY)
2037 return VINF_IOM_R3_MMIO_WRITE;
2038#endif
2039 AssertRC(VBOXSTRICTRC_VAL(rc));
2040#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2041 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2042#endif
2043
2044 /*
2045 * Lookup the current context range node and statistics.
2046 */
2047 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2048 if (!pRange)
2049 {
2050 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2051 IOM_UNLOCK_SHARED(pVM);
2052 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2053 }
2054 iomMmioRetainRange(pRange);
2055#ifndef VBOX_WITH_STATISTICS
2056 IOM_UNLOCK_SHARED(pVM);
2057
2058#else /* VBOX_WITH_STATISTICS */
2059 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2060 if (!pStats)
2061 {
2062 iomMmioReleaseRange(pVM, pRange);
2063# ifdef IN_RING3
2064 return VERR_NO_MEMORY;
2065# else
2066 return VINF_IOM_R3_MMIO_READ;
2067# endif
2068 }
2069 STAM_COUNTER_INC(&pStats->Accesses);
2070#endif /* VBOX_WITH_STATISTICS */
2071
2072 if (pRange->CTX_SUFF(pfnReadCallback))
2073 {
2074 /*
2075 * Perform locking.
2076 */
2077 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2078 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2079 if (rc != VINF_SUCCESS)
2080 {
2081 iomMmioReleaseRange(pVM, pRange);
2082 return rc;
2083 }
2084
2085 /*
2086 * Perform the read and deal with the result.
2087 */
2088 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2089 if ( (cbValue == 4 && !(GCPhys & 3))
2090 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2091 || (cbValue == 8 && !(GCPhys & 7)) )
2092 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2093 pu32Value, (unsigned)cbValue);
2094 else
2095 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2096 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2097 switch (VBOXSTRICTRC_VAL(rc))
2098 {
2099 case VINF_SUCCESS:
2100 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2101 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2102 iomMmioReleaseRange(pVM, pRange);
2103 return rc;
2104#ifndef IN_RING3
2105 case VINF_IOM_R3_MMIO_READ:
2106 case VINF_IOM_R3_MMIO_READ_WRITE:
2107 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2108#endif
2109 default:
2110 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2111 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2112 iomMmioReleaseRange(pVM, pRange);
2113 return rc;
2114
2115 case VINF_IOM_MMIO_UNUSED_00:
2116 iomMMIODoRead00s(pu32Value, cbValue);
2117 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2118 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2119 iomMmioReleaseRange(pVM, pRange);
2120 return VINF_SUCCESS;
2121
2122 case VINF_IOM_MMIO_UNUSED_FF:
2123 iomMMIODoReadFFs(pu32Value, cbValue);
2124 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2125 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2126 iomMmioReleaseRange(pVM, pRange);
2127 return VINF_SUCCESS;
2128 }
2129 /* not reached */
2130 }
2131#ifndef IN_RING3
2132 if (pRange->pfnReadCallbackR3)
2133 {
2134 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2135 iomMmioReleaseRange(pVM, pRange);
2136 return VINF_IOM_R3_MMIO_READ;
2137 }
2138#endif
2139
2140 /*
2141 * Unassigned memory - this is actually not supposed t happen...
2142 */
2143 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2144 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2145 iomMMIODoReadFFs(pu32Value, cbValue);
2146 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2147 iomMmioReleaseRange(pVM, pRange);
2148 return VINF_SUCCESS;
2149}
2150
2151
2152/**
2153 * Writes to a MMIO register.
2154 *
2155 * @returns VBox status code.
2156 *
2157 * @param pVM Pointer to the VM.
2158 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2159 * @param GCPhys The physical address to write to.
2160 * @param u32Value The value to write.
2161 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2162 */
2163VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2164{
2165 /* Take the IOM lock before performing any MMIO. */
2166 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2167#ifndef IN_RING3
2168 if (rc == VERR_SEM_BUSY)
2169 return VINF_IOM_R3_MMIO_WRITE;
2170#endif
2171 AssertRC(VBOXSTRICTRC_VAL(rc));
2172#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2173 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2174#endif
2175
2176 /*
2177 * Lookup the current context range node.
2178 */
2179 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2180 if (!pRange)
2181 {
2182 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2183 IOM_UNLOCK_SHARED(pVM);
2184 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2185 }
2186 iomMmioRetainRange(pRange);
2187#ifndef VBOX_WITH_STATISTICS
2188 IOM_UNLOCK_SHARED(pVM);
2189
2190#else /* VBOX_WITH_STATISTICS */
2191 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2192 if (!pStats)
2193 {
2194 iomMmioReleaseRange(pVM, pRange);
2195# ifdef IN_RING3
2196 return VERR_NO_MEMORY;
2197# else
2198 return VINF_IOM_R3_MMIO_WRITE;
2199# endif
2200 }
2201 STAM_COUNTER_INC(&pStats->Accesses);
2202#endif /* VBOX_WITH_STATISTICS */
2203
2204 if (pRange->CTX_SUFF(pfnWriteCallback))
2205 {
2206 /*
2207 * Perform locking.
2208 */
2209 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2210 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2211 if (rc != VINF_SUCCESS)
2212 {
2213 iomMmioReleaseRange(pVM, pRange);
2214 return rc;
2215 }
2216
2217 /*
2218 * Perform the write.
2219 */
2220 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2221 if ( (cbValue == 4 && !(GCPhys & 3))
2222 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2223 || (cbValue == 8 && !(GCPhys & 7)) )
2224 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2225 GCPhys, &u32Value, (unsigned)cbValue);
2226 else
2227 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2228 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2229#ifndef IN_RING3
2230 if ( rc == VINF_IOM_R3_MMIO_WRITE
2231 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2232 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2233#endif
2234 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2235 iomMmioReleaseRange(pVM, pRange);
2236 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2237 return rc;
2238 }
2239#ifndef IN_RING3
2240 if (pRange->pfnWriteCallbackR3)
2241 {
2242 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2243 iomMmioReleaseRange(pVM, pRange);
2244 return VINF_IOM_R3_MMIO_WRITE;
2245 }
2246#endif
2247
2248 /*
2249 * No write handler, nothing to do.
2250 */
2251 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2252 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2253 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2254 iomMmioReleaseRange(pVM, pRange);
2255 return VINF_SUCCESS;
2256}
2257
2258#endif /* IN_RING3 - only used by REM. */
2259#ifndef VBOX_WITH_2ND_IEM_STEP
2260
2261/**
2262 * [REP*] INSB/INSW/INSD
2263 * ES:EDI,DX[,ECX]
2264 *
2265 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2266 *
2267 * @returns Strict VBox status code. Informational status codes other than the one documented
2268 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2269 * @retval VINF_SUCCESS Success.
2270 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2271 * status code must be passed on to EM.
2272 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2273 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2274 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2275 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2276 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2277 *
2278 * @param pVM The virtual machine.
2279 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2280 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2281 * @param uPort IO Port
2282 * @param uPrefix IO instruction prefix
2283 * @param enmAddrMode The address mode.
2284 * @param cbTransfer Size of transfer unit
2285 */
2286VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2287 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2288{
2289 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2290
2291 /*
2292 * We do not support REPNE or decrementing destination
2293 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2294 */
2295 if ( (uPrefix & DISPREFIX_REPNE)
2296 || pRegFrame->eflags.Bits.u1DF)
2297 return VINF_EM_RAW_EMULATE_INSTR;
2298
2299 /*
2300 * Get bytes/words/dwords count to transfer.
2301 */
2302 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2303 RTGCUINTREG cTransfers = 1;
2304 if (uPrefix & DISPREFIX_REP)
2305 {
2306#ifndef IN_RC
2307 if ( CPUMIsGuestIn64BitCode(pVCpu)
2308 && pRegFrame->rcx >= _4G)
2309 return VINF_EM_RAW_EMULATE_INSTR;
2310#endif
2311 cTransfers = pRegFrame->rcx & fAddrMask;
2312 if (!cTransfers)
2313 return VINF_SUCCESS;
2314 }
2315
2316 /* Convert destination address es:edi. */
2317 RTGCPTR GCPtrDst;
2318 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2319 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2320 &GCPtrDst);
2321 if (RT_FAILURE(rc2))
2322 {
2323 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2324 return VINF_EM_RAW_EMULATE_INSTR;
2325 }
2326
2327 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2328 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2329 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2330 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2331 if (rc2 != VINF_SUCCESS)
2332 {
2333 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2334 return VINF_EM_RAW_EMULATE_INSTR;
2335 }
2336
2337 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2338 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2339 if (cTransfers > 1)
2340 {
2341 /*
2342 * Work the string page by page, letting the device handle as much
2343 * as it likes via the string I/O interface.
2344 */
2345 for (;;)
2346 {
2347 PGMPAGEMAPLOCK Lock;
2348 void *pvDst;
2349 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2350 if (RT_SUCCESS(rc2))
2351 {
2352 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2353 if (cMaxThisTime > cTransfers)
2354 cMaxThisTime = cTransfers;
2355 if (!cMaxThisTime)
2356 break;
2357 uint32_t cThisTime = cMaxThisTime;
2358
2359 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2360 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2361 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2362
2363 PGMPhysReleasePageMappingLock(pVM, &Lock);
2364
2365 uint32_t const cActual = cMaxThisTime - cThisTime;
2366 uint32_t const cbActual = cActual * cbTransfer;
2367 cTransfers -= cActual;
2368 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2369 | (pRegFrame->rdi & ~fAddrMask);
2370 GCPtrDst += cbActual;
2371
2372 if ( cThisTime
2373 || !cTransfers
2374 || rcStrict != VINF_SUCCESS
2375 || (GCPtrDst & PAGE_OFFSET_MASK))
2376 break;
2377 }
2378 else
2379 {
2380 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2381 break;
2382 }
2383 }
2384 }
2385
2386 /*
2387 * Single transfer / unmapped memory fallback.
2388 */
2389#ifdef IN_RC
2390 MMGCRamRegisterTrapHandler(pVM);
2391#endif
2392 while (cTransfers && rcStrict == VINF_SUCCESS)
2393 {
2394 uint32_t u32Value;
2395 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2396 if (!IOM_SUCCESS(rcStrict))
2397 break;
2398 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2399 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2400 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2401 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2402 | (pRegFrame->rdi & ~fAddrMask);
2403 cTransfers--;
2404 }
2405#ifdef IN_RC
2406 MMGCRamDeregisterTrapHandler(pVM);
2407#endif
2408
2409 /* Update rcx on exit. */
2410 if (uPrefix & DISPREFIX_REP)
2411 pRegFrame->rcx = (cTransfers & fAddrMask)
2412 | (pRegFrame->rcx & ~fAddrMask);
2413
2414 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2415 return rcStrict;
2416}
2417
2418
2419/**
2420 * [REP*] OUTSB/OUTSW/OUTSD
2421 * DS:ESI,DX[,ECX]
2422 *
2423 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2424 *
2425 * @returns Strict VBox status code. Informational status codes other than the one documented
2426 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2427 * @retval VINF_SUCCESS Success.
2428 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2429 * status code must be passed on to EM.
2430 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2431 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2432 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2433 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2434 *
2435 * @param pVM The virtual machine.
2436 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2437 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2438 * @param uPort IO Port
2439 * @param uPrefix IO instruction prefix
2440 * @param enmAddrMode The address mode.
2441 * @param cbTransfer Size of transfer unit
2442 *
2443 * @remarks This API will probably be relaced by IEM before long, so no use in
2444 * optimizing+fixing stuff too much here.
2445 */
2446VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2447 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2448{
2449 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2450
2451 /*
2452 * We do not support segment prefixes, REPNE or
2453 * decrementing source pointer.
2454 */
2455 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2456 || pRegFrame->eflags.Bits.u1DF)
2457 return VINF_EM_RAW_EMULATE_INSTR;
2458
2459 /*
2460 * Get bytes/words/dwords count to transfer.
2461 */
2462 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2463 RTGCUINTREG cTransfers = 1;
2464 if (uPrefix & DISPREFIX_REP)
2465 {
2466#ifndef IN_RC
2467 if ( CPUMIsGuestIn64BitCode(pVCpu)
2468 && pRegFrame->rcx >= _4G)
2469 return VINF_EM_RAW_EMULATE_INSTR;
2470#endif
2471 cTransfers = pRegFrame->rcx & fAddrMask;
2472 if (!cTransfers)
2473 return VINF_SUCCESS;
2474 }
2475
2476 /* Convert source address ds:esi. */
2477 RTGCPTR GCPtrSrc;
2478 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2479 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2480 &GCPtrSrc);
2481 if (RT_FAILURE(rc2))
2482 {
2483 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2484 return VINF_EM_RAW_EMULATE_INSTR;
2485 }
2486
2487 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2488 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2489 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2490 (cpl == 3) ? X86_PTE_US : 0);
2491 if (rc2 != VINF_SUCCESS)
2492 {
2493 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2494 return VINF_EM_RAW_EMULATE_INSTR;
2495 }
2496
2497 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2498 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2499 if (cTransfers > 1)
2500 {
2501 /*
2502 * Work the string page by page, letting the device handle as much
2503 * as it likes via the string I/O interface.
2504 */
2505 for (;;)
2506 {
2507 PGMPAGEMAPLOCK Lock;
2508 void const *pvSrc;
2509 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2510 if (RT_SUCCESS(rc2))
2511 {
2512 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2513 if (cMaxThisTime > cTransfers)
2514 cMaxThisTime = cTransfers;
2515 if (!cMaxThisTime)
2516 break;
2517 uint32_t cThisTime = cMaxThisTime;
2518
2519 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2520 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2521 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2522
2523 PGMPhysReleasePageMappingLock(pVM, &Lock);
2524
2525 uint32_t const cActual = cMaxThisTime - cThisTime;
2526 uint32_t const cbActual = cActual * cbTransfer;
2527 cTransfers -= cActual;
2528 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2529 | (pRegFrame->rsi & ~fAddrMask);
2530 GCPtrSrc += cbActual;
2531
2532 if ( cThisTime
2533 || !cTransfers
2534 || rcStrict != VINF_SUCCESS
2535 || (GCPtrSrc & PAGE_OFFSET_MASK))
2536 break;
2537 }
2538 else
2539 {
2540 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2541 break;
2542 }
2543 }
2544 }
2545
2546 /*
2547 * Single transfer / unmapped memory fallback.
2548 */
2549#ifdef IN_RC
2550 MMGCRamRegisterTrapHandler(pVM);
2551#endif
2552
2553 while (cTransfers && rcStrict == VINF_SUCCESS)
2554 {
2555 uint32_t u32Value = 0;
2556 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2557 if (rcStrict != VINF_SUCCESS)
2558 break;
2559 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2560 if (!IOM_SUCCESS(rcStrict))
2561 break;
2562 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2563 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2564 | (pRegFrame->rsi & ~fAddrMask);
2565 cTransfers--;
2566 }
2567
2568#ifdef IN_RC
2569 MMGCRamDeregisterTrapHandler(pVM);
2570#endif
2571
2572 /* Update rcx on exit. */
2573 if (uPrefix & DISPREFIX_REP)
2574 pRegFrame->rcx = (cTransfers & fAddrMask)
2575 | (pRegFrame->rcx & ~fAddrMask);
2576
2577 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2578 return rcStrict;
2579}
2580
2581#endif /* !VBOX_WITH_2ND_IEM_STEP */
2582
2583
2584#ifndef IN_RC
2585
2586/**
2587 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2588 *
2589 * (This is a special optimization used by the VGA device.)
2590 *
2591 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2592 * remapping is made,.
2593 *
2594 * @param pVM The virtual machine.
2595 * @param GCPhys The address of the MMIO page to be changed.
2596 * @param GCPhysRemapped The address of the MMIO2 page.
2597 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2598 * for the time being.
2599 */
2600VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2601{
2602# ifndef IEM_VERIFICATION_MODE_FULL
2603 /* Currently only called from the VGA device during MMIO. */
2604 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2605 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2606 PVMCPU pVCpu = VMMGetCpu(pVM);
2607
2608 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2609 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2610 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2611 && !HMIsNestedPagingActive(pVM)))
2612 return VINF_SUCCESS; /* ignore */
2613
2614 int rc = IOM_LOCK_SHARED(pVM);
2615 if (RT_FAILURE(rc))
2616 return VINF_SUCCESS; /* better luck the next time around */
2617
2618 /*
2619 * Lookup the context range node the page belongs to.
2620 */
2621 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2622 AssertMsgReturn(pRange,
2623 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2624
2625 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2626 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2627
2628 /*
2629 * Do the aliasing; page align the addresses since PGM is picky.
2630 */
2631 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2632 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2633
2634 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2635
2636 IOM_UNLOCK_SHARED(pVM);
2637 AssertRCReturn(rc, rc);
2638
2639 /*
2640 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2641 * can simply prefetch it.
2642 *
2643 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2644 */
2645# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2646# ifdef VBOX_STRICT
2647 uint64_t fFlags;
2648 RTHCPHYS HCPhys;
2649 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2650 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2651# endif
2652# endif
2653 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2654 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2655# endif /* !IEM_VERIFICATION_MODE_FULL */
2656 return VINF_SUCCESS;
2657}
2658
2659
2660# ifndef IEM_VERIFICATION_MODE_FULL
2661/**
2662 * Mapping a HC page in place of an MMIO page for direct access.
2663 *
2664 * (This is a special optimization used by the APIC in the VT-x case.)
2665 *
2666 * @returns VBox status code.
2667 *
2668 * @param pVM Pointer to the VM.
2669 * @param pVCpu Pointer to the VMCPU.
2670 * @param GCPhys The address of the MMIO page to be changed.
2671 * @param HCPhys The address of the host physical page.
2672 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2673 * for the time being.
2674 */
2675VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2676{
2677 /* Currently only called from VT-x code during a page fault. */
2678 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2679
2680 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2681 Assert(HMIsEnabled(pVM));
2682
2683 /*
2684 * Lookup the context range node the page belongs to.
2685 */
2686# ifdef VBOX_STRICT
2687 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2688 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2689 AssertMsgReturn(pRange,
2690 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2691 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2692 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2693# endif
2694
2695 /*
2696 * Do the aliasing; page align the addresses since PGM is picky.
2697 */
2698 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2699 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2700
2701 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2702 AssertRCReturn(rc, rc);
2703
2704 /*
2705 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2706 * can simply prefetch it.
2707 *
2708 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2709 */
2710 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2711 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2712 return VINF_SUCCESS;
2713}
2714# endif /* !IEM_VERIFICATION_MODE_FULL */
2715
2716
2717/**
2718 * Reset a previously modified MMIO region; restore the access flags.
2719 *
2720 * @returns VBox status code.
2721 *
2722 * @param pVM The virtual machine.
2723 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2724 */
2725VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2726{
2727 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2728
2729 PVMCPU pVCpu = VMMGetCpu(pVM);
2730
2731 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2732 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2733 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2734 && !HMIsNestedPagingActive(pVM)))
2735 return VINF_SUCCESS; /* ignore */
2736
2737 /*
2738 * Lookup the context range node the page belongs to.
2739 */
2740# ifdef VBOX_STRICT
2741 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2742 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2743 AssertMsgReturn(pRange,
2744 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2745 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2746 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2747# endif
2748
2749 /*
2750 * Call PGM to do the job work.
2751 *
2752 * After the call, all the pages should be non-present... unless there is
2753 * a page pool flush pending (unlikely).
2754 */
2755 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2756 AssertRC(rc);
2757
2758# ifdef VBOX_STRICT
2759 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2760 {
2761 uint32_t cb = pRange->cb;
2762 GCPhys = pRange->GCPhys;
2763 while (cb)
2764 {
2765 uint64_t fFlags;
2766 RTHCPHYS HCPhys;
2767 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2768 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2769 cb -= PAGE_SIZE;
2770 GCPhys += PAGE_SIZE;
2771 }
2772 }
2773# endif
2774 return rc;
2775}
2776
2777#endif /* !IN_RC */
2778
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette