VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 57529

Last change on this file since 57529 was 57358, checked in by vboxsync, 9 years ago

*: scm cleanup run.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 98.1 KB
Line 
1/* $Id: IOMAllMMIO.cpp 57358 2015-08-14 15:16:38Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52/** @def IEM_USE_IEM_INSTEAD
53 * Use IEM instead of IOM for interpreting MMIO accesses.
54 * Because of PATM/CSAM issues in raw-mode, we've split this up into 2nd and 3rd
55 * IEM deployment step. */
56#if ((defined(IN_RING3) || defined(IN_RING0)) && defined(VBOX_WITH_2ND_IEM_STEP)) \
57 || defined(VBOX_WITH_3RD_IEM_STEP)
58# define IEM_USE_IEM_INSTEAD
59#endif
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65
66/**
67 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
68 */
69static const unsigned g_aSize2Shift[] =
70{
71 ~0U, /* 0 - invalid */
72 0, /* *1 == 2^0 */
73 1, /* *2 == 2^1 */
74 ~0U, /* 3 - invalid */
75 2, /* *4 == 2^2 */
76 ~0U, /* 5 - invalid */
77 ~0U, /* 6 - invalid */
78 ~0U, /* 7 - invalid */
79 3 /* *8 == 2^3 */
80};
81
82/**
83 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
84 */
85#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
86
87
88/**
89 * Returns the contents of register or immediate data of instruction's parameter.
90 *
91 * @returns true on success.
92 *
93 * @todo Get rid of this code. Use DISQueryParamVal instead
94 *
95 * @param pCpu Pointer to current disassembler context.
96 * @param pParam Pointer to parameter of instruction to process.
97 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
98 * @param pu64Data Where to store retrieved data.
99 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
100 */
101bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
102{
103 NOREF(pCpu);
104 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
105 {
106 *pcbSize = 0;
107 *pu64Data = 0;
108 return false;
109 }
110
111 /* divide and conquer */
112 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
113 {
114 if (pParam->fUse & DISUSE_REG_GEN32)
115 {
116 *pcbSize = 4;
117 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
118 return true;
119 }
120
121 if (pParam->fUse & DISUSE_REG_GEN16)
122 {
123 *pcbSize = 2;
124 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
125 return true;
126 }
127
128 if (pParam->fUse & DISUSE_REG_GEN8)
129 {
130 *pcbSize = 1;
131 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
132 return true;
133 }
134
135 Assert(pParam->fUse & DISUSE_REG_GEN64);
136 *pcbSize = 8;
137 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
138 return true;
139 }
140 else
141 {
142 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
143 {
144 *pcbSize = 8;
145 *pu64Data = pParam->uValue;
146 return true;
147 }
148
149 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
150 {
151 *pcbSize = 4;
152 *pu64Data = (uint32_t)pParam->uValue;
153 return true;
154 }
155
156 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
157 {
158 *pcbSize = 2;
159 *pu64Data = (uint16_t)pParam->uValue;
160 return true;
161 }
162
163 if (pParam->fUse & DISUSE_IMMEDIATE8)
164 {
165 *pcbSize = 1;
166 *pu64Data = (uint8_t)pParam->uValue;
167 return true;
168 }
169
170 if (pParam->fUse & DISUSE_REG_SEG)
171 {
172 *pcbSize = 2;
173 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
174 return true;
175 } /* Else - error. */
176
177 AssertFailed();
178 *pcbSize = 0;
179 *pu64Data = 0;
180 return false;
181 }
182}
183
184
185/**
186 * Saves data to 8/16/32 general purpose or segment register defined by
187 * instruction's parameter.
188 *
189 * @returns true on success.
190 * @param pCpu Pointer to current disassembler context.
191 * @param pParam Pointer to parameter of instruction to process.
192 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
193 * @param u64Data 8/16/32/64 bit data to store.
194 */
195bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
196{
197 NOREF(pCpu);
198 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
199 {
200 return false;
201 }
202
203 if (pParam->fUse & DISUSE_REG_GEN32)
204 {
205 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
206 return true;
207 }
208
209 if (pParam->fUse & DISUSE_REG_GEN64)
210 {
211 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
212 return true;
213 }
214
215 if (pParam->fUse & DISUSE_REG_GEN16)
216 {
217 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
218 return true;
219 }
220
221 if (pParam->fUse & DISUSE_REG_GEN8)
222 {
223 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
224 return true;
225 }
226
227 if (pParam->fUse & DISUSE_REG_SEG)
228 {
229 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
230 return true;
231 }
232
233 /* Else - error. */
234 return false;
235}
236
237
238/**
239 * Deals with complicated MMIO writes.
240 *
241 * Complicated means unaligned or non-dword/qword sized accesses depending on
242 * the MMIO region's access mode flags.
243 *
244 * @returns Strict VBox status code. Any EM scheduling status code,
245 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
246 * VINF_IOM_R3_MMIO_READ may be returned.
247 *
248 * @param pVM Pointer to the VM.
249 * @param pRange The range to write to.
250 * @param GCPhys The physical address to start writing.
251 * @param pvValue Where to store the value.
252 * @param cbValue The size of the value to write.
253 */
254static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
255{
256 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
257 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
258 VERR_IOM_MMIO_IPE_1);
259 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
260 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
261 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
262 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
263
264 /*
265 * Do debug stop if requested.
266 */
267 int rc = VINF_SUCCESS; NOREF(pVM);
268#ifdef VBOX_STRICT
269 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
270 {
271# ifdef IN_RING3
272 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
273 R3STRING(pRange->pszDesc)));
274 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
275 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
276 if (rc == VERR_DBGF_NOT_ATTACHED)
277 rc = VINF_SUCCESS;
278# else
279 return VINF_IOM_R3_MMIO_WRITE;
280# endif
281 }
282#endif
283
284 /*
285 * Check if we should ignore the write.
286 */
287 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
288 {
289 Assert(cbValue != 4 || (GCPhys & 3));
290 return VINF_SUCCESS;
291 }
292 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
293 {
294 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
295 return VINF_SUCCESS;
296 }
297
298 /*
299 * Split and conquer.
300 */
301 for (;;)
302 {
303 unsigned const offAccess = GCPhys & 3;
304 unsigned cbThisPart = 4 - offAccess;
305 if (cbThisPart > cbValue)
306 cbThisPart = cbValue;
307
308 /*
309 * Get the missing bits (if any).
310 */
311 uint32_t u32MissingValue = 0;
312 if (fReadMissing && cbThisPart != 4)
313 {
314 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
315 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
316 switch (rc2)
317 {
318 case VINF_SUCCESS:
319 break;
320 case VINF_IOM_MMIO_UNUSED_FF:
321 u32MissingValue = UINT32_C(0xffffffff);
322 break;
323 case VINF_IOM_MMIO_UNUSED_00:
324 u32MissingValue = 0;
325 break;
326 case VINF_IOM_R3_MMIO_READ:
327 case VINF_IOM_R3_MMIO_READ_WRITE:
328 case VINF_IOM_R3_MMIO_WRITE:
329 /** @todo What if we've split a transfer and already read
330 * something? Since writes generally have sideeffects we
331 * could be kind of screwed here...
332 *
333 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
334 * to REM for MMIO accesses (like may currently do). */
335
336 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
337 return rc2;
338 default:
339 if (RT_FAILURE(rc2))
340 {
341 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
342 return rc2;
343 }
344 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
345 if (rc == VINF_SUCCESS || rc2 < rc)
346 rc = rc2;
347 break;
348 }
349 }
350
351 /*
352 * Merge missing and given bits.
353 */
354 uint32_t u32GivenMask;
355 uint32_t u32GivenValue;
356 switch (cbThisPart)
357 {
358 case 1:
359 u32GivenValue = *(uint8_t const *)pvValue;
360 u32GivenMask = UINT32_C(0x000000ff);
361 break;
362 case 2:
363 u32GivenValue = *(uint16_t const *)pvValue;
364 u32GivenMask = UINT32_C(0x0000ffff);
365 break;
366 case 3:
367 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
368 ((uint8_t const *)pvValue)[2], 0);
369 u32GivenMask = UINT32_C(0x00ffffff);
370 break;
371 case 4:
372 u32GivenValue = *(uint32_t const *)pvValue;
373 u32GivenMask = UINT32_C(0xffffffff);
374 break;
375 default:
376 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
377 }
378 if (offAccess)
379 {
380 u32GivenValue <<= offAccess * 8;
381 u32GivenMask <<= offAccess * 8;
382 }
383
384 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
385 | (u32GivenValue & u32GivenMask);
386
387 /*
388 * Do DWORD write to the device.
389 */
390 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
391 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
392 switch (rc2)
393 {
394 case VINF_SUCCESS:
395 break;
396 case VINF_IOM_R3_MMIO_READ:
397 case VINF_IOM_R3_MMIO_READ_WRITE:
398 case VINF_IOM_R3_MMIO_WRITE:
399 /** @todo What if we've split a transfer and already read
400 * something? Since reads can have sideeffects we could be
401 * kind of screwed here...
402 *
403 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
404 * to REM for MMIO accesses (like may currently do). */
405 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
406 return rc2;
407 default:
408 if (RT_FAILURE(rc2))
409 {
410 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
411 return rc2;
412 }
413 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
414 if (rc == VINF_SUCCESS || rc2 < rc)
415 rc = rc2;
416 break;
417 }
418
419 /*
420 * Advance.
421 */
422 cbValue -= cbThisPart;
423 if (!cbValue)
424 break;
425 GCPhys += cbThisPart;
426 pvValue = (uint8_t const *)pvValue + cbThisPart;
427 }
428
429 return rc;
430}
431
432
433
434
435/**
436 * Wrapper which does the write and updates range statistics when such are enabled.
437 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
438 */
439static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
440 const void *pvData, unsigned cb)
441{
442#ifdef VBOX_WITH_STATISTICS
443 int rcSem = IOM_LOCK_SHARED(pVM);
444 if (rcSem == VERR_SEM_BUSY)
445 return VINF_IOM_R3_MMIO_WRITE;
446 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
447 if (!pStats)
448# ifdef IN_RING3
449 return VERR_NO_MEMORY;
450# else
451 return VINF_IOM_R3_MMIO_WRITE;
452# endif
453 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
454#endif
455
456 VBOXSTRICTRC rcStrict;
457 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
458 {
459 if ( (cb == 4 && !(GCPhysFault & 3))
460 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
461 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
462 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
463 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
464 else
465 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
466 }
467 else
468 rcStrict = VINF_SUCCESS;
469
470 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
471 STAM_COUNTER_INC(&pStats->Accesses);
472 return rcStrict;
473}
474
475
476/**
477 * Deals with complicated MMIO reads.
478 *
479 * Complicated means unaligned or non-dword/qword sized accesses depending on
480 * the MMIO region's access mode flags.
481 *
482 * @returns Strict VBox status code. Any EM scheduling status code,
483 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
484 * VINF_IOM_R3_MMIO_WRITE may be returned.
485 *
486 * @param pVM Pointer to the VM.
487 * @param pRange The range to read from.
488 * @param GCPhys The physical address to start reading.
489 * @param pvValue Where to store the value.
490 * @param cbValue The size of the value to read.
491 */
492static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
493{
494 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
495 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
496 VERR_IOM_MMIO_IPE_1);
497 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
498 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
499
500 /*
501 * Do debug stop if requested.
502 */
503 int rc = VINF_SUCCESS; NOREF(pVM);
504#ifdef VBOX_STRICT
505 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
506 {
507# ifdef IN_RING3
508 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
509 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
510 if (rc == VERR_DBGF_NOT_ATTACHED)
511 rc = VINF_SUCCESS;
512# else
513 return VINF_IOM_R3_MMIO_READ;
514# endif
515 }
516#endif
517
518 /*
519 * Split and conquer.
520 */
521 for (;;)
522 {
523 /*
524 * Do DWORD read from the device.
525 */
526 uint32_t u32Value;
527 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
528 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
529 switch (rc2)
530 {
531 case VINF_SUCCESS:
532 break;
533 case VINF_IOM_MMIO_UNUSED_FF:
534 u32Value = UINT32_C(0xffffffff);
535 break;
536 case VINF_IOM_MMIO_UNUSED_00:
537 u32Value = 0;
538 break;
539 case VINF_IOM_R3_MMIO_READ:
540 case VINF_IOM_R3_MMIO_READ_WRITE:
541 case VINF_IOM_R3_MMIO_WRITE:
542 /** @todo What if we've split a transfer and already read
543 * something? Since reads can have sideeffects we could be
544 * kind of screwed here... */
545 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
546 return rc2;
547 default:
548 if (RT_FAILURE(rc2))
549 {
550 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
551 return rc2;
552 }
553 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
554 if (rc == VINF_SUCCESS || rc2 < rc)
555 rc = rc2;
556 break;
557 }
558 u32Value >>= (GCPhys & 3) * 8;
559
560 /*
561 * Write what we've read.
562 */
563 unsigned cbThisPart = 4 - (GCPhys & 3);
564 if (cbThisPart > cbValue)
565 cbThisPart = cbValue;
566
567 switch (cbThisPart)
568 {
569 case 1:
570 *(uint8_t *)pvValue = (uint8_t)u32Value;
571 break;
572 case 2:
573 *(uint16_t *)pvValue = (uint16_t)u32Value;
574 break;
575 case 3:
576 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
577 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
578 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
579 break;
580 case 4:
581 *(uint32_t *)pvValue = u32Value;
582 break;
583 }
584
585 /*
586 * Advance.
587 */
588 cbValue -= cbThisPart;
589 if (!cbValue)
590 break;
591 GCPhys += cbThisPart;
592 pvValue = (uint8_t *)pvValue + cbThisPart;
593 }
594
595 return rc;
596}
597
598
599/**
600 * Implements VINF_IOM_MMIO_UNUSED_FF.
601 *
602 * @returns VINF_SUCCESS.
603 * @param pvValue Where to store the zeros.
604 * @param cbValue How many bytes to read.
605 */
606static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
607{
608 switch (cbValue)
609 {
610 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
611 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
612 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
613 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
614 default:
615 {
616 uint8_t *pb = (uint8_t *)pvValue;
617 while (cbValue--)
618 *pb++ = UINT8_C(0xff);
619 break;
620 }
621 }
622 return VINF_SUCCESS;
623}
624
625
626/**
627 * Implements VINF_IOM_MMIO_UNUSED_00.
628 *
629 * @returns VINF_SUCCESS.
630 * @param pvValue Where to store the zeros.
631 * @param cbValue How many bytes to read.
632 */
633static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
634{
635 switch (cbValue)
636 {
637 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
638 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
639 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
640 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
641 default:
642 {
643 uint8_t *pb = (uint8_t *)pvValue;
644 while (cbValue--)
645 *pb++ = UINT8_C(0x00);
646 break;
647 }
648 }
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Wrapper which does the read and updates range statistics when such are enabled.
655 */
656DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
657 void *pvValue, unsigned cbValue)
658{
659#ifdef VBOX_WITH_STATISTICS
660 int rcSem = IOM_LOCK_SHARED(pVM);
661 if (rcSem == VERR_SEM_BUSY)
662 return VINF_IOM_R3_MMIO_READ;
663 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
664 if (!pStats)
665# ifdef IN_RING3
666 return VERR_NO_MEMORY;
667# else
668 return VINF_IOM_R3_MMIO_READ;
669# endif
670 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
671#endif
672
673 VBOXSTRICTRC rcStrict;
674 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
675 {
676 if ( ( cbValue == 4
677 && !(GCPhys & 3))
678 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
679 || ( cbValue == 8
680 && !(GCPhys & 7)
681 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
682 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
683 pvValue, cbValue);
684 else
685 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
686 }
687 else
688 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
689 if (rcStrict != VINF_SUCCESS)
690 {
691 switch (VBOXSTRICTRC_VAL(rcStrict))
692 {
693 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
694 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
695 }
696 }
697
698 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
699 STAM_COUNTER_INC(&pStats->Accesses);
700 return rcStrict;
701}
702
703
704/**
705 * Internal - statistics only.
706 */
707DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
708{
709#ifdef VBOX_WITH_STATISTICS
710 switch (cb)
711 {
712 case 1:
713 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
714 break;
715 case 2:
716 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
717 break;
718 case 4:
719 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
720 break;
721 case 8:
722 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
723 break;
724 default:
725 /* No way. */
726 AssertMsgFailed(("Invalid data length %d\n", cb));
727 break;
728 }
729#else
730 NOREF(pVM); NOREF(cb);
731#endif
732}
733
734
735#ifndef IEM_USE_IEM_INSTEAD
736
737/**
738 * MOV reg, mem (read)
739 * MOVZX reg, mem (read)
740 * MOVSX reg, mem (read)
741 *
742 * @returns VBox status code.
743 *
744 * @param pVM The virtual machine.
745 * @param pVCpu Pointer to the virtual CPU structure of the caller.
746 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
747 * @param pCpu Disassembler CPU state.
748 * @param pRange Pointer MMIO range.
749 * @param GCPhysFault The GC physical address corresponding to pvFault.
750 */
751static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
752 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
753{
754 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
755
756 /*
757 * Get the data size from parameter 2,
758 * and call the handler function to get the data.
759 */
760 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
761 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
762
763 uint64_t u64Data = 0;
764 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
765 if (rc == VINF_SUCCESS)
766 {
767 /*
768 * Do sign extension for MOVSX.
769 */
770 /** @todo checkup MOVSX implementation! */
771 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
772 {
773 if (cb == 1)
774 {
775 /* DWORD <- BYTE */
776 int64_t iData = (int8_t)u64Data;
777 u64Data = (uint64_t)iData;
778 }
779 else
780 {
781 /* DWORD <- WORD */
782 int64_t iData = (int16_t)u64Data;
783 u64Data = (uint64_t)iData;
784 }
785 }
786
787 /*
788 * Store the result to register (parameter 1).
789 */
790 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
791 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
792 }
793
794 if (rc == VINF_SUCCESS)
795 iomMMIOStatLength(pVM, cb);
796 return rc;
797}
798
799
800/**
801 * MOV mem, reg|imm (write)
802 *
803 * @returns VBox status code.
804 *
805 * @param pVM The virtual machine.
806 * @param pVCpu Pointer to the virtual CPU structure of the caller.
807 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
808 * @param pCpu Disassembler CPU state.
809 * @param pRange Pointer MMIO range.
810 * @param GCPhysFault The GC physical address corresponding to pvFault.
811 */
812static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
813 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
814{
815 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
816
817 /*
818 * Get data to write from second parameter,
819 * and call the callback to write it.
820 */
821 unsigned cb = 0;
822 uint64_t u64Data = 0;
823 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
824 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
825
826 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
827 if (rc == VINF_SUCCESS)
828 iomMMIOStatLength(pVM, cb);
829 return rc;
830}
831
832
833/** Wrapper for reading virtual memory. */
834DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
835{
836 /* Note: This will fail in R0 or RC if it hits an access handler. That
837 isn't a problem though since the operation can be restarted in REM. */
838#ifdef IN_RC
839 NOREF(pVCpu);
840 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
841 /* Page may be protected and not directly accessible. */
842 if (rc == VERR_ACCESS_DENIED)
843 rc = VINF_IOM_R3_IOPORT_WRITE;
844 return rc;
845#else
846 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
847#endif
848}
849
850
851/** Wrapper for writing virtual memory. */
852DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
853{
854 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
855 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
856 * as well since we're not behind the pgm lock and handler may change between calls.
857 *
858 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
859 * the state of some shadowed structures. */
860#if defined(IN_RING0) || defined(IN_RC)
861 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
862#else
863 NOREF(pCtxCore);
864 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
865#endif
866}
867
868
869#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
870/**
871 * [REP] MOVSB
872 * [REP] MOVSW
873 * [REP] MOVSD
874 *
875 * Restricted implementation.
876 *
877 *
878 * @returns VBox status code.
879 *
880 * @param pVM The virtual machine.
881 * @param uErrorCode CPU Error code.
882 * @param pRegFrame Trap register frame.
883 * @param GCPhysFault The GC physical address corresponding to pvFault.
884 * @param pCpu Disassembler CPU state.
885 * @param pRange Pointer MMIO range.
886 * @param ppStat Which sub-sample to attribute this call to.
887 */
888static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
889 PSTAMPROFILE *ppStat)
890{
891 /*
892 * We do not support segment prefixes or REPNE.
893 */
894 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
895 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
896
897 PVMCPU pVCpu = VMMGetCpu(pVM);
898
899 /*
900 * Get bytes/words/dwords/qword count to copy.
901 */
902 uint32_t cTransfers = 1;
903 if (pCpu->fPrefix & DISPREFIX_REP)
904 {
905#ifndef IN_RC
906 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
907 && pRegFrame->rcx >= _4G)
908 return VINF_EM_RAW_EMULATE_INSTR;
909#endif
910
911 cTransfers = pRegFrame->ecx;
912 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
913 cTransfers &= 0xffff;
914
915 if (!cTransfers)
916 return VINF_SUCCESS;
917 }
918
919 /* Get the current privilege level. */
920 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
921
922 /*
923 * Get data size.
924 */
925 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
926 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
927 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
928
929#ifdef VBOX_WITH_STATISTICS
930 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
931 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
932#endif
933
934/** @todo re-evaluate on page boundaries. */
935
936 RTGCPHYS Phys = GCPhysFault;
937 int rc;
938 if (fWriteAccess)
939 {
940 /*
941 * Write operation: [Mem] -> [MMIO]
942 * ds:esi (Virt Src) -> es:edi (Phys Dst)
943 */
944 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
945
946 /* Check callback. */
947 if (!pRange->CTX_SUFF(pfnWriteCallback))
948 return VINF_IOM_R3_MMIO_WRITE;
949
950 /* Convert source address ds:esi. */
951 RTGCUINTPTR pu8Virt;
952 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
953 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
954 (PRTGCPTR)&pu8Virt);
955 if (RT_SUCCESS(rc))
956 {
957
958 /* Access verification first; we currently can't recover properly from traps inside this instruction */
959 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
960 if (rc != VINF_SUCCESS)
961 {
962 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
963 return VINF_EM_RAW_EMULATE_INSTR;
964 }
965
966#ifdef IN_RC
967 MMGCRamRegisterTrapHandler(pVM);
968#endif
969
970 /* copy loop. */
971 while (cTransfers)
972 {
973 uint32_t u32Data = 0;
974 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
975 if (rc != VINF_SUCCESS)
976 break;
977 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
978 if (rc != VINF_SUCCESS)
979 break;
980
981 pu8Virt += offIncrement;
982 Phys += offIncrement;
983 pRegFrame->rsi += offIncrement;
984 pRegFrame->rdi += offIncrement;
985 cTransfers--;
986 }
987#ifdef IN_RC
988 MMGCRamDeregisterTrapHandler(pVM);
989#endif
990 /* Update ecx. */
991 if (pCpu->fPrefix & DISPREFIX_REP)
992 pRegFrame->ecx = cTransfers;
993 }
994 else
995 rc = VINF_IOM_R3_MMIO_READ_WRITE;
996 }
997 else
998 {
999 /*
1000 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
1001 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
1002 */
1003 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
1004
1005 /* Check callback. */
1006 if (!pRange->CTX_SUFF(pfnReadCallback))
1007 return VINF_IOM_R3_MMIO_READ;
1008
1009 /* Convert destination address. */
1010 RTGCUINTPTR pu8Virt;
1011 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1012 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1013 (RTGCPTR *)&pu8Virt);
1014 if (RT_FAILURE(rc))
1015 return VINF_IOM_R3_MMIO_READ;
1016
1017 /* Check if destination address is MMIO. */
1018 PIOMMMIORANGE pMMIODst;
1019 RTGCPHYS PhysDst;
1020 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1021 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1022 if ( RT_SUCCESS(rc)
1023 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1024 {
1025 /** @todo implement per-device locks for MMIO access. */
1026 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1027
1028 /*
1029 * Extra: [MMIO] -> [MMIO]
1030 */
1031 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1032 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1033 {
1034 iomMmioReleaseRange(pVM, pRange);
1035 return VINF_IOM_R3_MMIO_READ_WRITE;
1036 }
1037
1038 /* copy loop. */
1039 while (cTransfers)
1040 {
1041 uint32_t u32Data;
1042 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1043 if (rc != VINF_SUCCESS)
1044 break;
1045 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1046 if (rc != VINF_SUCCESS)
1047 break;
1048
1049 Phys += offIncrement;
1050 PhysDst += offIncrement;
1051 pRegFrame->rsi += offIncrement;
1052 pRegFrame->rdi += offIncrement;
1053 cTransfers--;
1054 }
1055 iomMmioReleaseRange(pVM, pRange);
1056 }
1057 else
1058 {
1059 /*
1060 * Normal: [MMIO] -> [Mem]
1061 */
1062 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1063 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1064 if (rc != VINF_SUCCESS)
1065 {
1066 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1067 return VINF_EM_RAW_EMULATE_INSTR;
1068 }
1069
1070 /* copy loop. */
1071#ifdef IN_RC
1072 MMGCRamRegisterTrapHandler(pVM);
1073#endif
1074 while (cTransfers)
1075 {
1076 uint32_t u32Data;
1077 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1078 if (rc != VINF_SUCCESS)
1079 break;
1080 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1081 if (rc != VINF_SUCCESS)
1082 {
1083 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1084 break;
1085 }
1086
1087 pu8Virt += offIncrement;
1088 Phys += offIncrement;
1089 pRegFrame->rsi += offIncrement;
1090 pRegFrame->rdi += offIncrement;
1091 cTransfers--;
1092 }
1093#ifdef IN_RC
1094 MMGCRamDeregisterTrapHandler(pVM);
1095#endif
1096 }
1097
1098 /* Update ecx on exit. */
1099 if (pCpu->fPrefix & DISPREFIX_REP)
1100 pRegFrame->ecx = cTransfers;
1101 }
1102
1103 /* work statistics. */
1104 if (rc == VINF_SUCCESS)
1105 iomMMIOStatLength(pVM, cb);
1106 NOREF(ppStat);
1107 return rc;
1108}
1109#endif /* IOM_WITH_MOVS_SUPPORT */
1110
1111
1112/**
1113 * Gets the address / opcode mask corresponding to the given CPU mode.
1114 *
1115 * @returns Mask.
1116 * @param enmCpuMode CPU mode.
1117 */
1118static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1119{
1120 switch (enmCpuMode)
1121 {
1122 case DISCPUMODE_16BIT: return UINT16_MAX;
1123 case DISCPUMODE_32BIT: return UINT32_MAX;
1124 case DISCPUMODE_64BIT: return UINT64_MAX;
1125 default:
1126 AssertFailedReturn(UINT32_MAX);
1127 }
1128}
1129
1130
1131/**
1132 * [REP] STOSB
1133 * [REP] STOSW
1134 * [REP] STOSD
1135 *
1136 * Restricted implementation.
1137 *
1138 *
1139 * @returns VBox status code.
1140 *
1141 * @param pVM The virtual machine.
1142 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1143 * @param pRegFrame Trap register frame.
1144 * @param GCPhysFault The GC physical address corresponding to pvFault.
1145 * @param pCpu Disassembler CPU state.
1146 * @param pRange Pointer MMIO range.
1147 */
1148static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1149 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1150{
1151 /*
1152 * We do not support segment prefixes or REPNE..
1153 */
1154 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1155 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1156
1157 /*
1158 * Get bytes/words/dwords/qwords count to copy.
1159 */
1160 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1161 RTGCUINTREG cTransfers = 1;
1162 if (pCpu->fPrefix & DISPREFIX_REP)
1163 {
1164#ifndef IN_RC
1165 if ( CPUMIsGuestIn64BitCode(pVCpu)
1166 && pRegFrame->rcx >= _4G)
1167 return VINF_EM_RAW_EMULATE_INSTR;
1168#endif
1169
1170 cTransfers = pRegFrame->rcx & fAddrMask;
1171 if (!cTransfers)
1172 return VINF_SUCCESS;
1173 }
1174
1175/** @todo r=bird: bounds checks! */
1176
1177 /*
1178 * Get data size.
1179 */
1180 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1181 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1182 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1183
1184#ifdef VBOX_WITH_STATISTICS
1185 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1186 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1187#endif
1188
1189
1190 RTGCPHYS Phys = GCPhysFault;
1191 int rc;
1192 if ( pRange->CTX_SUFF(pfnFillCallback)
1193 && cb <= 4 /* can only fill 32-bit values */)
1194 {
1195 /*
1196 * Use the fill callback.
1197 */
1198 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1199 if (offIncrement > 0)
1200 {
1201 /* addr++ variant. */
1202 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1203 pRegFrame->eax, cb, cTransfers);
1204 if (rc == VINF_SUCCESS)
1205 {
1206 /* Update registers. */
1207 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1208 | (pRegFrame->rdi & ~fAddrMask);
1209 if (pCpu->fPrefix & DISPREFIX_REP)
1210 pRegFrame->rcx &= ~fAddrMask;
1211 }
1212 }
1213 else
1214 {
1215 /* addr-- variant. */
1216 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1217 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1218 pRegFrame->eax, cb, cTransfers);
1219 if (rc == VINF_SUCCESS)
1220 {
1221 /* Update registers. */
1222 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1223 | (pRegFrame->rdi & ~fAddrMask);
1224 if (pCpu->fPrefix & DISPREFIX_REP)
1225 pRegFrame->rcx &= ~fAddrMask;
1226 }
1227 }
1228 }
1229 else
1230 {
1231 /*
1232 * Use the write callback.
1233 */
1234 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1235 uint64_t u64Data = pRegFrame->rax;
1236
1237 /* fill loop. */
1238 do
1239 {
1240 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1241 if (rc != VINF_SUCCESS)
1242 break;
1243
1244 Phys += offIncrement;
1245 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1246 | (pRegFrame->rdi & ~fAddrMask);
1247 cTransfers--;
1248 } while (cTransfers);
1249
1250 /* Update rcx on exit. */
1251 if (pCpu->fPrefix & DISPREFIX_REP)
1252 pRegFrame->rcx = (cTransfers & fAddrMask)
1253 | (pRegFrame->rcx & ~fAddrMask);
1254 }
1255
1256 /*
1257 * Work statistics and return.
1258 */
1259 if (rc == VINF_SUCCESS)
1260 iomMMIOStatLength(pVM, cb);
1261 return rc;
1262}
1263
1264
1265/**
1266 * [REP] LODSB
1267 * [REP] LODSW
1268 * [REP] LODSD
1269 *
1270 * Restricted implementation.
1271 *
1272 *
1273 * @returns VBox status code.
1274 *
1275 * @param pVM The virtual machine.
1276 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1277 * @param pRegFrame Trap register frame.
1278 * @param GCPhysFault The GC physical address corresponding to pvFault.
1279 * @param pCpu Disassembler CPU state.
1280 * @param pRange Pointer MMIO range.
1281 */
1282static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1283 PIOMMMIORANGE pRange)
1284{
1285 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1286
1287 /*
1288 * We do not support segment prefixes or REP*.
1289 */
1290 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1291 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1292
1293 /*
1294 * Get data size.
1295 */
1296 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1297 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1298 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1299
1300 /*
1301 * Perform read.
1302 */
1303 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1304 if (rc == VINF_SUCCESS)
1305 {
1306 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1307 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1308 | (pRegFrame->rsi & ~fAddrMask);
1309 }
1310
1311 /*
1312 * Work statistics and return.
1313 */
1314 if (rc == VINF_SUCCESS)
1315 iomMMIOStatLength(pVM, cb);
1316 return rc;
1317}
1318
1319
1320/**
1321 * CMP [MMIO], reg|imm
1322 * CMP reg|imm, [MMIO]
1323 *
1324 * Restricted implementation.
1325 *
1326 *
1327 * @returns VBox status code.
1328 *
1329 * @param pVM The virtual machine.
1330 * @param pRegFrame Trap register frame.
1331 * @param GCPhysFault The GC physical address corresponding to pvFault.
1332 * @param pCpu Disassembler CPU state.
1333 * @param pRange Pointer MMIO range.
1334 */
1335static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1336 PIOMMMIORANGE pRange)
1337{
1338 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1339
1340 /*
1341 * Get the operands.
1342 */
1343 unsigned cb = 0;
1344 uint64_t uData1 = 0;
1345 uint64_t uData2 = 0;
1346 int rc;
1347 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1348 /* cmp reg, [MMIO]. */
1349 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1350 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1351 /* cmp [MMIO], reg|imm. */
1352 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1353 else
1354 {
1355 AssertMsgFailed(("Disassember CMP problem..\n"));
1356 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1357 }
1358
1359 if (rc == VINF_SUCCESS)
1360 {
1361#if HC_ARCH_BITS == 32
1362 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1363 if (cb > 4)
1364 return VINF_IOM_R3_MMIO_READ_WRITE;
1365#endif
1366 /* Emulate CMP and update guest flags. */
1367 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1368 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1369 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1370 iomMMIOStatLength(pVM, cb);
1371 }
1372
1373 return rc;
1374}
1375
1376
1377/**
1378 * AND [MMIO], reg|imm
1379 * AND reg, [MMIO]
1380 * OR [MMIO], reg|imm
1381 * OR reg, [MMIO]
1382 *
1383 * Restricted implementation.
1384 *
1385 *
1386 * @returns VBox status code.
1387 *
1388 * @param pVM The virtual machine.
1389 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1390 * @param pRegFrame Trap register frame.
1391 * @param GCPhysFault The GC physical address corresponding to pvFault.
1392 * @param pCpu Disassembler CPU state.
1393 * @param pRange Pointer MMIO range.
1394 * @param pfnEmulate Instruction emulation function.
1395 */
1396static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1397 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1398{
1399 unsigned cb = 0;
1400 uint64_t uData1 = 0;
1401 uint64_t uData2 = 0;
1402 bool fAndWrite;
1403 int rc;
1404
1405#ifdef LOG_ENABLED
1406 const char *pszInstr;
1407
1408 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1409 pszInstr = "Xor";
1410 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1411 pszInstr = "Or";
1412 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1413 pszInstr = "And";
1414 else
1415 pszInstr = "OrXorAnd??";
1416#endif
1417
1418 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1419 {
1420#if HC_ARCH_BITS == 32
1421 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1422 if (cb > 4)
1423 return VINF_IOM_R3_MMIO_READ_WRITE;
1424#endif
1425 /* and reg, [MMIO]. */
1426 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1427 fAndWrite = false;
1428 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1429 }
1430 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1431 {
1432#if HC_ARCH_BITS == 32
1433 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1434 if (cb > 4)
1435 return VINF_IOM_R3_MMIO_READ_WRITE;
1436#endif
1437 /* and [MMIO], reg|imm. */
1438 fAndWrite = true;
1439 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1440 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1441 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1442 else
1443 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1444 }
1445 else
1446 {
1447 AssertMsgFailed(("Disassember AND problem..\n"));
1448 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1449 }
1450
1451 if (rc == VINF_SUCCESS)
1452 {
1453 /* Emulate AND and update guest flags. */
1454 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1455
1456 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1457
1458 if (fAndWrite)
1459 /* Store result to MMIO. */
1460 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1461 else
1462 {
1463 /* Store result to register. */
1464 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1465 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1466 }
1467 if (rc == VINF_SUCCESS)
1468 {
1469 /* Update guest's eflags and finish. */
1470 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1471 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1472 iomMMIOStatLength(pVM, cb);
1473 }
1474 }
1475
1476 return rc;
1477}
1478
1479
1480/**
1481 * TEST [MMIO], reg|imm
1482 * TEST reg, [MMIO]
1483 *
1484 * Restricted implementation.
1485 *
1486 *
1487 * @returns VBox status code.
1488 *
1489 * @param pVM The virtual machine.
1490 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1491 * @param pRegFrame Trap register frame.
1492 * @param GCPhysFault The GC physical address corresponding to pvFault.
1493 * @param pCpu Disassembler CPU state.
1494 * @param pRange Pointer MMIO range.
1495 */
1496static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1497 PIOMMMIORANGE pRange)
1498{
1499 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1500
1501 unsigned cb = 0;
1502 uint64_t uData1 = 0;
1503 uint64_t uData2 = 0;
1504 int rc;
1505
1506 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1507 {
1508 /* and test, [MMIO]. */
1509 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1510 }
1511 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1512 {
1513 /* test [MMIO], reg|imm. */
1514 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1515 }
1516 else
1517 {
1518 AssertMsgFailed(("Disassember TEST problem..\n"));
1519 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1520 }
1521
1522 if (rc == VINF_SUCCESS)
1523 {
1524#if HC_ARCH_BITS == 32
1525 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1526 if (cb > 4)
1527 return VINF_IOM_R3_MMIO_READ_WRITE;
1528#endif
1529
1530 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1531 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1532 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1533 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1534 iomMMIOStatLength(pVM, cb);
1535 }
1536
1537 return rc;
1538}
1539
1540
1541/**
1542 * BT [MMIO], reg|imm
1543 *
1544 * Restricted implementation.
1545 *
1546 *
1547 * @returns VBox status code.
1548 *
1549 * @param pVM The virtual machine.
1550 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1551 * @param pRegFrame Trap register frame.
1552 * @param GCPhysFault The GC physical address corresponding to pvFault.
1553 * @param pCpu Disassembler CPU state.
1554 * @param pRange Pointer MMIO range.
1555 */
1556static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1557 PIOMMMIORANGE pRange)
1558{
1559 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1560
1561 uint64_t uBit = 0;
1562 uint64_t uData = 0;
1563 unsigned cbIgnored;
1564
1565 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1566 {
1567 AssertMsgFailed(("Disassember BT problem..\n"));
1568 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1569 }
1570 /* The size of the memory operand only matters here. */
1571 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1572
1573 /* bt [MMIO], reg|imm. */
1574 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1575 if (rc == VINF_SUCCESS)
1576 {
1577 /* Find the bit inside the faulting address */
1578 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1579 iomMMIOStatLength(pVM, cbData);
1580 }
1581
1582 return rc;
1583}
1584
1585/**
1586 * XCHG [MMIO], reg
1587 * XCHG reg, [MMIO]
1588 *
1589 * Restricted implementation.
1590 *
1591 *
1592 * @returns VBox status code.
1593 *
1594 * @param pVM The virtual machine.
1595 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1596 * @param pRegFrame Trap register frame.
1597 * @param GCPhysFault The GC physical address corresponding to pvFault.
1598 * @param pCpu Disassembler CPU state.
1599 * @param pRange Pointer MMIO range.
1600 */
1601static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1602 PIOMMMIORANGE pRange)
1603{
1604 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1605 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1606 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1607 return VINF_IOM_R3_MMIO_READ_WRITE;
1608
1609 int rc;
1610 unsigned cb = 0;
1611 uint64_t uData1 = 0;
1612 uint64_t uData2 = 0;
1613 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1614 {
1615 /* xchg reg, [MMIO]. */
1616 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1617 if (rc == VINF_SUCCESS)
1618 {
1619 /* Store result to MMIO. */
1620 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1621
1622 if (rc == VINF_SUCCESS)
1623 {
1624 /* Store result to register. */
1625 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1626 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1627 }
1628 else
1629 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1630 }
1631 else
1632 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1633 }
1634 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1635 {
1636 /* xchg [MMIO], reg. */
1637 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1638 if (rc == VINF_SUCCESS)
1639 {
1640 /* Store result to MMIO. */
1641 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1642 if (rc == VINF_SUCCESS)
1643 {
1644 /* Store result to register. */
1645 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1646 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1647 }
1648 else
1649 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1650 }
1651 else
1652 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1653 }
1654 else
1655 {
1656 AssertMsgFailed(("Disassember XCHG problem..\n"));
1657 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1658 }
1659 return rc;
1660}
1661
1662#endif /* !IEM_USE_IEM_INSTEAD */
1663
1664/**
1665 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
1666 *
1667 * @returns VBox status code (appropriate for GC return).
1668 * @param pVM Pointer to the VM.
1669 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1670 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1671 * any error code (the EPT misconfig hack).
1672 * @param pCtxCore Trap register frame.
1673 * @param GCPhysFault The GC physical address corresponding to pvFault.
1674 * @param pvUser Pointer to the MMIO ring-3 range entry.
1675 */
1676static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
1677 RTGCPHYS GCPhysFault, void *pvUser)
1678{
1679 int rc = IOM_LOCK_SHARED(pVM);
1680#ifndef IN_RING3
1681 if (rc == VERR_SEM_BUSY)
1682 return VINF_IOM_R3_MMIO_READ_WRITE;
1683#endif
1684 AssertRC(rc);
1685
1686 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1687 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1688
1689 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1690 Assert(pRange);
1691 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1692 iomMmioRetainRange(pRange);
1693#ifndef VBOX_WITH_STATISTICS
1694 IOM_UNLOCK_SHARED(pVM);
1695
1696#else
1697 /*
1698 * Locate the statistics.
1699 */
1700 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1701 if (!pStats)
1702 {
1703 iomMmioReleaseRange(pVM, pRange);
1704# ifdef IN_RING3
1705 return VERR_NO_MEMORY;
1706# else
1707 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1708 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1709 return VINF_IOM_R3_MMIO_READ_WRITE;
1710# endif
1711 }
1712#endif
1713
1714#ifndef IN_RING3
1715 /*
1716 * Should we defer the request right away? This isn't usually the case, so
1717 * do the simple test first and the try deal with uErrorCode being N/A.
1718 */
1719 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1720 || !pRange->CTX_SUFF(pfnReadCallback))
1721 && ( uErrorCode == UINT32_MAX
1722 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1723 : uErrorCode & X86_TRAP_PF_RW
1724 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1725 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1726 )
1727 )
1728 )
1729 {
1730 if (uErrorCode & X86_TRAP_PF_RW)
1731 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1732 else
1733 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1734
1735 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1736 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1737 iomMmioReleaseRange(pVM, pRange);
1738 return VINF_IOM_R3_MMIO_READ_WRITE;
1739 }
1740#endif /* !IN_RING3 */
1741
1742 /*
1743 * Retain the range and do locking.
1744 */
1745 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1746 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1747 if (rc != VINF_SUCCESS)
1748 {
1749 iomMmioReleaseRange(pVM, pRange);
1750 return rc;
1751 }
1752
1753#ifdef IEM_USE_IEM_INSTEAD
1754
1755 /*
1756 * Let IEM call us back via iomMmioHandler.
1757 */
1758 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1759
1760 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1761 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1762 iomMmioReleaseRange(pVM, pRange);
1763 if (RT_SUCCESS(rcStrict))
1764 return rcStrict;
1765 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1766 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1767 {
1768 Log(("IOM: Hit unsupported IEM feature!\n"));
1769 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
1770 }
1771 return rcStrict;
1772
1773#else
1774
1775 /*
1776 * Disassemble the instruction and interpret it.
1777 */
1778 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1779 unsigned cbOp;
1780 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1781 if (RT_FAILURE(rc))
1782 {
1783 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1784 iomMmioReleaseRange(pVM, pRange);
1785 return rc;
1786 }
1787 switch (pDis->pCurInstr->uOpcode)
1788 {
1789 case OP_MOV:
1790 case OP_MOVZX:
1791 case OP_MOVSX:
1792 {
1793 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1794 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1795 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1796 ? uErrorCode & X86_TRAP_PF_RW
1797 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1798 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1799 else
1800 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1801 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1802 break;
1803 }
1804
1805
1806# ifdef IOM_WITH_MOVS_SUPPORT
1807 case OP_MOVSB:
1808 case OP_MOVSWD:
1809 {
1810 if (uErrorCode == UINT32_MAX)
1811 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1812 else
1813 {
1814 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1815 PSTAMPROFILE pStat = NULL;
1816 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1817 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1818 }
1819 break;
1820 }
1821# endif
1822
1823 case OP_STOSB:
1824 case OP_STOSWD:
1825 Assert(uErrorCode & X86_TRAP_PF_RW);
1826 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1827 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1828 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1829 break;
1830
1831 case OP_LODSB:
1832 case OP_LODSWD:
1833 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1834 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1835 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1836 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1837 break;
1838
1839 case OP_CMP:
1840 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1841 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1842 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1843 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1844 break;
1845
1846 case OP_AND:
1847 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1848 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1849 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1850 break;
1851
1852 case OP_OR:
1853 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1854 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1855 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1856 break;
1857
1858 case OP_XOR:
1859 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1860 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1861 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1862 break;
1863
1864 case OP_TEST:
1865 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1866 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1867 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1868 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1869 break;
1870
1871 case OP_BT:
1872 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1873 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1874 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1875 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1876 break;
1877
1878 case OP_XCHG:
1879 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1880 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1881 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1882 break;
1883
1884
1885 /*
1886 * The instruction isn't supported. Hand it on to ring-3.
1887 */
1888 default:
1889 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1890 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1891 break;
1892 }
1893
1894 /*
1895 * On success advance EIP.
1896 */
1897 if (rc == VINF_SUCCESS)
1898 pCtxCore->rip += cbOp;
1899 else
1900 {
1901 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1902# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1903 switch (rc)
1904 {
1905 case VINF_IOM_R3_MMIO_READ:
1906 case VINF_IOM_R3_MMIO_READ_WRITE:
1907 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1908 break;
1909 case VINF_IOM_R3_MMIO_WRITE:
1910 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1911 break;
1912 }
1913# endif
1914 }
1915
1916 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1917 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1918 iomMmioReleaseRange(pVM, pRange);
1919 return rc;
1920#endif /* !IEM_USE_IEM_INSTEAD */
1921}
1922
1923
1924/**
1925 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1926 * \#PF access handler callback for MMIO pages.}
1927 *
1928 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1929 */
1930DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1931 RTGCPHYS GCPhysFault, void *pvUser)
1932{
1933 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1934 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1935 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1936}
1937
1938
1939/**
1940 * Physical access handler for MMIO ranges.
1941 *
1942 * @returns VBox status code (appropriate for GC return).
1943 * @param pVM Pointer to the VM.
1944 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1945 * @param uErrorCode CPU Error code.
1946 * @param pCtxCore Trap register frame.
1947 * @param GCPhysFault The GC physical address.
1948 */
1949VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1950{
1951 /*
1952 * We don't have a range here, so look it up before calling the common function.
1953 */
1954 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1955#ifndef IN_RING3
1956 if (rc2 == VERR_SEM_BUSY)
1957 return VINF_IOM_R3_MMIO_READ_WRITE;
1958#endif
1959 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1960 if (RT_UNLIKELY(!pRange))
1961 {
1962 IOM_UNLOCK_SHARED(pVM);
1963 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1964 }
1965 iomMmioRetainRange(pRange);
1966 IOM_UNLOCK_SHARED(pVM);
1967
1968 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1969
1970 iomMmioReleaseRange(pVM, pRange);
1971 return VBOXSTRICTRC_VAL(rcStrict);
1972}
1973
1974
1975/**
1976 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
1977 *
1978 * @remarks The @a pvUser argument points to the MMIO range entry.
1979 */
1980PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
1981 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1982{
1983 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1984 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1985
1986 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1987 AssertPtr(pRange);
1988 NOREF(pvPhys); NOREF(enmOrigin);
1989
1990 /*
1991 * Validate the range.
1992 */
1993 int rc = IOM_LOCK_SHARED(pVM);
1994#ifndef IN_RING3
1995 if (rc == VERR_SEM_BUSY)
1996 return VINF_IOM_R3_MMIO_READ_WRITE;
1997#endif
1998 AssertRC(rc);
1999 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
2000
2001 /*
2002 * Perform locking.
2003 */
2004 iomMmioRetainRange(pRange);
2005 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2006 IOM_UNLOCK_SHARED(pVM);
2007 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
2008 if (rcStrict == VINF_SUCCESS)
2009 {
2010 /*
2011 * Perform the access.
2012 */
2013 if (enmAccessType == PGMACCESSTYPE_READ)
2014 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2015 else
2016 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2017
2018 /* Check the return code. */
2019#ifdef IN_RING3
2020 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
2021#else
2022 AssertMsg( rcStrict == VINF_SUCCESS
2023 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
2024 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
2025 || rcStrict == VINF_EM_DBG_STOP
2026 || rcStrict == VINF_EM_DBG_BREAKPOINT
2027 || rcStrict == VINF_EM_OFF
2028 || rcStrict == VINF_EM_SUSPEND
2029 || rcStrict == VINF_EM_RESET
2030 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
2031 //|| rcStrict == VINF_EM_HALT /* ?? */
2032 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2033 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2034#endif
2035
2036 iomMmioReleaseRange(pVM, pRange);
2037 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2038 }
2039 else
2040 iomMmioReleaseRange(pVM, pRange);
2041 return rcStrict;
2042}
2043
2044
2045#ifdef IN_RING3 /* Only used by REM. */
2046
2047/**
2048 * Reads a MMIO register.
2049 *
2050 * @returns VBox status code.
2051 *
2052 * @param pVM Pointer to the VM.
2053 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2054 * @param GCPhys The physical address to read.
2055 * @param pu32Value Where to store the value read.
2056 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2057 */
2058VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2059{
2060 /* Take the IOM lock before performing any MMIO. */
2061 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2062#ifndef IN_RING3
2063 if (rc == VERR_SEM_BUSY)
2064 return VINF_IOM_R3_MMIO_WRITE;
2065#endif
2066 AssertRC(VBOXSTRICTRC_VAL(rc));
2067#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2068 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2069#endif
2070
2071 /*
2072 * Lookup the current context range node and statistics.
2073 */
2074 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2075 if (!pRange)
2076 {
2077 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2078 IOM_UNLOCK_SHARED(pVM);
2079 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2080 }
2081 iomMmioRetainRange(pRange);
2082#ifndef VBOX_WITH_STATISTICS
2083 IOM_UNLOCK_SHARED(pVM);
2084
2085#else /* VBOX_WITH_STATISTICS */
2086 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2087 if (!pStats)
2088 {
2089 iomMmioReleaseRange(pVM, pRange);
2090# ifdef IN_RING3
2091 return VERR_NO_MEMORY;
2092# else
2093 return VINF_IOM_R3_MMIO_READ;
2094# endif
2095 }
2096 STAM_COUNTER_INC(&pStats->Accesses);
2097#endif /* VBOX_WITH_STATISTICS */
2098
2099 if (pRange->CTX_SUFF(pfnReadCallback))
2100 {
2101 /*
2102 * Perform locking.
2103 */
2104 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2105 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2106 if (rc != VINF_SUCCESS)
2107 {
2108 iomMmioReleaseRange(pVM, pRange);
2109 return rc;
2110 }
2111
2112 /*
2113 * Perform the read and deal with the result.
2114 */
2115 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2116 if ( (cbValue == 4 && !(GCPhys & 3))
2117 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2118 || (cbValue == 8 && !(GCPhys & 7)) )
2119 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2120 pu32Value, (unsigned)cbValue);
2121 else
2122 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2123 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2124 switch (VBOXSTRICTRC_VAL(rc))
2125 {
2126 case VINF_SUCCESS:
2127 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2128 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2129 iomMmioReleaseRange(pVM, pRange);
2130 return rc;
2131#ifndef IN_RING3
2132 case VINF_IOM_R3_MMIO_READ:
2133 case VINF_IOM_R3_MMIO_READ_WRITE:
2134 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2135#endif
2136 default:
2137 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2138 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2139 iomMmioReleaseRange(pVM, pRange);
2140 return rc;
2141
2142 case VINF_IOM_MMIO_UNUSED_00:
2143 iomMMIODoRead00s(pu32Value, cbValue);
2144 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2145 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2146 iomMmioReleaseRange(pVM, pRange);
2147 return VINF_SUCCESS;
2148
2149 case VINF_IOM_MMIO_UNUSED_FF:
2150 iomMMIODoReadFFs(pu32Value, cbValue);
2151 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2152 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2153 iomMmioReleaseRange(pVM, pRange);
2154 return VINF_SUCCESS;
2155 }
2156 /* not reached */
2157 }
2158#ifndef IN_RING3
2159 if (pRange->pfnReadCallbackR3)
2160 {
2161 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2162 iomMmioReleaseRange(pVM, pRange);
2163 return VINF_IOM_R3_MMIO_READ;
2164 }
2165#endif
2166
2167 /*
2168 * Unassigned memory - this is actually not supposed t happen...
2169 */
2170 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2171 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2172 iomMMIODoReadFFs(pu32Value, cbValue);
2173 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2174 iomMmioReleaseRange(pVM, pRange);
2175 return VINF_SUCCESS;
2176}
2177
2178
2179/**
2180 * Writes to a MMIO register.
2181 *
2182 * @returns VBox status code.
2183 *
2184 * @param pVM Pointer to the VM.
2185 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2186 * @param GCPhys The physical address to write to.
2187 * @param u32Value The value to write.
2188 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2189 */
2190VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2191{
2192 /* Take the IOM lock before performing any MMIO. */
2193 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2194#ifndef IN_RING3
2195 if (rc == VERR_SEM_BUSY)
2196 return VINF_IOM_R3_MMIO_WRITE;
2197#endif
2198 AssertRC(VBOXSTRICTRC_VAL(rc));
2199#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2200 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2201#endif
2202
2203 /*
2204 * Lookup the current context range node.
2205 */
2206 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2207 if (!pRange)
2208 {
2209 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2210 IOM_UNLOCK_SHARED(pVM);
2211 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2212 }
2213 iomMmioRetainRange(pRange);
2214#ifndef VBOX_WITH_STATISTICS
2215 IOM_UNLOCK_SHARED(pVM);
2216
2217#else /* VBOX_WITH_STATISTICS */
2218 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2219 if (!pStats)
2220 {
2221 iomMmioReleaseRange(pVM, pRange);
2222# ifdef IN_RING3
2223 return VERR_NO_MEMORY;
2224# else
2225 return VINF_IOM_R3_MMIO_WRITE;
2226# endif
2227 }
2228 STAM_COUNTER_INC(&pStats->Accesses);
2229#endif /* VBOX_WITH_STATISTICS */
2230
2231 if (pRange->CTX_SUFF(pfnWriteCallback))
2232 {
2233 /*
2234 * Perform locking.
2235 */
2236 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2237 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2238 if (rc != VINF_SUCCESS)
2239 {
2240 iomMmioReleaseRange(pVM, pRange);
2241 return rc;
2242 }
2243
2244 /*
2245 * Perform the write.
2246 */
2247 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2248 if ( (cbValue == 4 && !(GCPhys & 3))
2249 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2250 || (cbValue == 8 && !(GCPhys & 7)) )
2251 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2252 GCPhys, &u32Value, (unsigned)cbValue);
2253 else
2254 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2255 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2256#ifndef IN_RING3
2257 if ( rc == VINF_IOM_R3_MMIO_WRITE
2258 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2259 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2260#endif
2261 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2262 iomMmioReleaseRange(pVM, pRange);
2263 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2264 return rc;
2265 }
2266#ifndef IN_RING3
2267 if (pRange->pfnWriteCallbackR3)
2268 {
2269 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2270 iomMmioReleaseRange(pVM, pRange);
2271 return VINF_IOM_R3_MMIO_WRITE;
2272 }
2273#endif
2274
2275 /*
2276 * No write handler, nothing to do.
2277 */
2278 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2279 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2280 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2281 iomMmioReleaseRange(pVM, pRange);
2282 return VINF_SUCCESS;
2283}
2284
2285#endif /* IN_RING3 - only used by REM. */
2286#ifndef IEM_USE_IEM_INSTEAD
2287
2288/**
2289 * [REP*] INSB/INSW/INSD
2290 * ES:EDI,DX[,ECX]
2291 *
2292 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2293 *
2294 * @returns Strict VBox status code. Informational status codes other than the one documented
2295 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2296 * @retval VINF_SUCCESS Success.
2297 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2298 * status code must be passed on to EM.
2299 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2300 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2301 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2302 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2303 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2304 *
2305 * @param pVM The virtual machine.
2306 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2307 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2308 * @param uPort IO Port
2309 * @param uPrefix IO instruction prefix
2310 * @param enmAddrMode The address mode.
2311 * @param cbTransfer Size of transfer unit
2312 */
2313VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2314 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2315{
2316 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2317
2318 /*
2319 * We do not support REPNE or decrementing destination
2320 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2321 */
2322 if ( (uPrefix & DISPREFIX_REPNE)
2323 || pRegFrame->eflags.Bits.u1DF)
2324 return VINF_EM_RAW_EMULATE_INSTR;
2325
2326 /*
2327 * Get bytes/words/dwords count to transfer.
2328 */
2329 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2330 RTGCUINTREG cTransfers = 1;
2331 if (uPrefix & DISPREFIX_REP)
2332 {
2333#ifndef IN_RC
2334 if ( CPUMIsGuestIn64BitCode(pVCpu)
2335 && pRegFrame->rcx >= _4G)
2336 return VINF_EM_RAW_EMULATE_INSTR;
2337#endif
2338 cTransfers = pRegFrame->rcx & fAddrMask;
2339 if (!cTransfers)
2340 return VINF_SUCCESS;
2341 }
2342
2343 /* Convert destination address es:edi. */
2344 RTGCPTR GCPtrDst;
2345 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2346 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2347 &GCPtrDst);
2348 if (RT_FAILURE(rc2))
2349 {
2350 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2351 return VINF_EM_RAW_EMULATE_INSTR;
2352 }
2353
2354 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2355 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2356 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2357 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2358 if (rc2 != VINF_SUCCESS)
2359 {
2360 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2361 return VINF_EM_RAW_EMULATE_INSTR;
2362 }
2363
2364 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2365 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2366 if (cTransfers > 1)
2367 {
2368 /*
2369 * Work the string page by page, letting the device handle as much
2370 * as it likes via the string I/O interface.
2371 */
2372 for (;;)
2373 {
2374 PGMPAGEMAPLOCK Lock;
2375 void *pvDst;
2376 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2377 if (RT_SUCCESS(rc2))
2378 {
2379 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2380 if (cMaxThisTime > cTransfers)
2381 cMaxThisTime = cTransfers;
2382 if (!cMaxThisTime)
2383 break;
2384 uint32_t cThisTime = cMaxThisTime;
2385
2386 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2387 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2388 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2389
2390 uint32_t const cActual = cMaxThisTime - cThisTime;
2391 if (cActual)
2392 { /* Must dirty the page. */
2393 uint8_t b = *(uint8_t *)pvDst;
2394 iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &b, 1);
2395 }
2396
2397 PGMPhysReleasePageMappingLock(pVM, &Lock);
2398
2399 uint32_t const cbActual = cActual * cbTransfer;
2400 cTransfers -= cActual;
2401 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2402 | (pRegFrame->rdi & ~fAddrMask);
2403 GCPtrDst += cbActual;
2404
2405 if ( cThisTime
2406 || !cTransfers
2407 || rcStrict != VINF_SUCCESS
2408 || (GCPtrDst & PAGE_OFFSET_MASK))
2409 break;
2410 }
2411 else
2412 {
2413 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2414 break;
2415 }
2416 }
2417 }
2418
2419 /*
2420 * Single transfer / unmapped memory fallback.
2421 */
2422#ifdef IN_RC
2423 MMGCRamRegisterTrapHandler(pVM);
2424#endif
2425 while (cTransfers && rcStrict == VINF_SUCCESS)
2426 {
2427 uint32_t u32Value;
2428 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2429 if (!IOM_SUCCESS(rcStrict))
2430 break;
2431 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2432 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2433 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2434 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2435 | (pRegFrame->rdi & ~fAddrMask);
2436 cTransfers--;
2437 }
2438#ifdef IN_RC
2439 MMGCRamDeregisterTrapHandler(pVM);
2440#endif
2441
2442 /* Update rcx on exit. */
2443 if (uPrefix & DISPREFIX_REP)
2444 pRegFrame->rcx = (cTransfers & fAddrMask)
2445 | (pRegFrame->rcx & ~fAddrMask);
2446
2447 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2448 return rcStrict;
2449}
2450
2451
2452/**
2453 * [REP*] OUTSB/OUTSW/OUTSD
2454 * DS:ESI,DX[,ECX]
2455 *
2456 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2457 *
2458 * @returns Strict VBox status code. Informational status codes other than the one documented
2459 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2460 * @retval VINF_SUCCESS Success.
2461 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2462 * status code must be passed on to EM.
2463 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2464 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2465 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2466 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2467 *
2468 * @param pVM The virtual machine.
2469 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2470 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2471 * @param uPort IO Port
2472 * @param uPrefix IO instruction prefix
2473 * @param enmAddrMode The address mode.
2474 * @param cbTransfer Size of transfer unit
2475 *
2476 * @remarks This API will probably be relaced by IEM before long, so no use in
2477 * optimizing+fixing stuff too much here.
2478 */
2479VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2480 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2481{
2482 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2483
2484 /*
2485 * We do not support segment prefixes, REPNE or
2486 * decrementing source pointer.
2487 */
2488 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2489 || pRegFrame->eflags.Bits.u1DF)
2490 return VINF_EM_RAW_EMULATE_INSTR;
2491
2492 /*
2493 * Get bytes/words/dwords count to transfer.
2494 */
2495 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2496 RTGCUINTREG cTransfers = 1;
2497 if (uPrefix & DISPREFIX_REP)
2498 {
2499#ifndef IN_RC
2500 if ( CPUMIsGuestIn64BitCode(pVCpu)
2501 && pRegFrame->rcx >= _4G)
2502 return VINF_EM_RAW_EMULATE_INSTR;
2503#endif
2504 cTransfers = pRegFrame->rcx & fAddrMask;
2505 if (!cTransfers)
2506 return VINF_SUCCESS;
2507 }
2508
2509 /* Convert source address ds:esi. */
2510 RTGCPTR GCPtrSrc;
2511 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2512 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2513 &GCPtrSrc);
2514 if (RT_FAILURE(rc2))
2515 {
2516 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2517 return VINF_EM_RAW_EMULATE_INSTR;
2518 }
2519
2520 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2521 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2522 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2523 (cpl == 3) ? X86_PTE_US : 0);
2524 if (rc2 != VINF_SUCCESS)
2525 {
2526 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2527 return VINF_EM_RAW_EMULATE_INSTR;
2528 }
2529
2530 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2531 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2532 if (cTransfers > 1)
2533 {
2534 /*
2535 * Work the string page by page, letting the device handle as much
2536 * as it likes via the string I/O interface.
2537 */
2538 for (;;)
2539 {
2540 PGMPAGEMAPLOCK Lock;
2541 void const *pvSrc;
2542 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2543 if (RT_SUCCESS(rc2))
2544 {
2545 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2546 if (cMaxThisTime > cTransfers)
2547 cMaxThisTime = cTransfers;
2548 if (!cMaxThisTime)
2549 break;
2550 uint32_t cThisTime = cMaxThisTime;
2551
2552 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2553 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2554 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2555
2556 PGMPhysReleasePageMappingLock(pVM, &Lock);
2557
2558 uint32_t const cActual = cMaxThisTime - cThisTime;
2559 uint32_t const cbActual = cActual * cbTransfer;
2560 cTransfers -= cActual;
2561 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2562 | (pRegFrame->rsi & ~fAddrMask);
2563 GCPtrSrc += cbActual;
2564
2565 if ( cThisTime
2566 || !cTransfers
2567 || rcStrict != VINF_SUCCESS
2568 || (GCPtrSrc & PAGE_OFFSET_MASK))
2569 break;
2570 }
2571 else
2572 {
2573 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2574 break;
2575 }
2576 }
2577 }
2578
2579 /*
2580 * Single transfer / unmapped memory fallback.
2581 */
2582#ifdef IN_RC
2583 MMGCRamRegisterTrapHandler(pVM);
2584#endif
2585
2586 while (cTransfers && rcStrict == VINF_SUCCESS)
2587 {
2588 uint32_t u32Value = 0;
2589 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2590 if (rcStrict != VINF_SUCCESS)
2591 break;
2592 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2593 if (!IOM_SUCCESS(rcStrict))
2594 break;
2595 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2596 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2597 | (pRegFrame->rsi & ~fAddrMask);
2598 cTransfers--;
2599 }
2600
2601#ifdef IN_RC
2602 MMGCRamDeregisterTrapHandler(pVM);
2603#endif
2604
2605 /* Update rcx on exit. */
2606 if (uPrefix & DISPREFIX_REP)
2607 pRegFrame->rcx = (cTransfers & fAddrMask)
2608 | (pRegFrame->rcx & ~fAddrMask);
2609
2610 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2611 return rcStrict;
2612}
2613
2614#endif /* !IEM_USE_IEM_INSTEAD */
2615
2616
2617#ifndef IN_RC
2618
2619/**
2620 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2621 *
2622 * (This is a special optimization used by the VGA device.)
2623 *
2624 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2625 * remapping is made,.
2626 *
2627 * @param pVM The virtual machine.
2628 * @param GCPhys The address of the MMIO page to be changed.
2629 * @param GCPhysRemapped The address of the MMIO2 page.
2630 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2631 * for the time being.
2632 */
2633VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2634{
2635# ifndef IEM_VERIFICATION_MODE_FULL
2636 /* Currently only called from the VGA device during MMIO. */
2637 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2638 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2639 PVMCPU pVCpu = VMMGetCpu(pVM);
2640
2641 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2642 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2643 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2644 && !HMIsNestedPagingActive(pVM)))
2645 return VINF_SUCCESS; /* ignore */
2646
2647 int rc = IOM_LOCK_SHARED(pVM);
2648 if (RT_FAILURE(rc))
2649 return VINF_SUCCESS; /* better luck the next time around */
2650
2651 /*
2652 * Lookup the context range node the page belongs to.
2653 */
2654 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2655 AssertMsgReturn(pRange,
2656 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2657
2658 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2659 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2660
2661 /*
2662 * Do the aliasing; page align the addresses since PGM is picky.
2663 */
2664 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2665 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2666
2667 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2668
2669 IOM_UNLOCK_SHARED(pVM);
2670 AssertRCReturn(rc, rc);
2671
2672 /*
2673 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2674 * can simply prefetch it.
2675 *
2676 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2677 */
2678# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2679# ifdef VBOX_STRICT
2680 uint64_t fFlags;
2681 RTHCPHYS HCPhys;
2682 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2683 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2684# endif
2685# endif
2686 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2687 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2688# endif /* !IEM_VERIFICATION_MODE_FULL */
2689 return VINF_SUCCESS;
2690}
2691
2692
2693# ifndef IEM_VERIFICATION_MODE_FULL
2694/**
2695 * Mapping a HC page in place of an MMIO page for direct access.
2696 *
2697 * (This is a special optimization used by the APIC in the VT-x case.)
2698 *
2699 * @returns VBox status code.
2700 *
2701 * @param pVM Pointer to the VM.
2702 * @param pVCpu Pointer to the VMCPU.
2703 * @param GCPhys The address of the MMIO page to be changed.
2704 * @param HCPhys The address of the host physical page.
2705 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2706 * for the time being.
2707 */
2708VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2709{
2710 /* Currently only called from VT-x code during a page fault. */
2711 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2712
2713 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2714 Assert(HMIsEnabled(pVM));
2715
2716 /*
2717 * Lookup the context range node the page belongs to.
2718 */
2719# ifdef VBOX_STRICT
2720 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2721 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2722 AssertMsgReturn(pRange,
2723 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2724 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2725 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2726# endif
2727
2728 /*
2729 * Do the aliasing; page align the addresses since PGM is picky.
2730 */
2731 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2732 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2733
2734 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2735 AssertRCReturn(rc, rc);
2736
2737 /*
2738 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2739 * can simply prefetch it.
2740 *
2741 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2742 */
2743 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2744 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2745 return VINF_SUCCESS;
2746}
2747# endif /* !IEM_VERIFICATION_MODE_FULL */
2748
2749
2750/**
2751 * Reset a previously modified MMIO region; restore the access flags.
2752 *
2753 * @returns VBox status code.
2754 *
2755 * @param pVM The virtual machine.
2756 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2757 */
2758VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2759{
2760 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2761
2762 PVMCPU pVCpu = VMMGetCpu(pVM);
2763
2764 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2765 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2766 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2767 && !HMIsNestedPagingActive(pVM)))
2768 return VINF_SUCCESS; /* ignore */
2769
2770 /*
2771 * Lookup the context range node the page belongs to.
2772 */
2773# ifdef VBOX_STRICT
2774 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2775 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2776 AssertMsgReturn(pRange,
2777 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2778 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2779 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2780# endif
2781
2782 /*
2783 * Call PGM to do the job work.
2784 *
2785 * After the call, all the pages should be non-present... unless there is
2786 * a page pool flush pending (unlikely).
2787 */
2788 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2789 AssertRC(rc);
2790
2791# ifdef VBOX_STRICT
2792 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2793 {
2794 uint32_t cb = pRange->cb;
2795 GCPhys = pRange->GCPhys;
2796 while (cb)
2797 {
2798 uint64_t fFlags;
2799 RTHCPHYS HCPhys;
2800 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2801 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2802 cb -= PAGE_SIZE;
2803 GCPhys += PAGE_SIZE;
2804 }
2805 }
2806# endif
2807 return rc;
2808}
2809
2810#endif /* !IN_RC */
2811
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette