VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 60854

Last change on this file since 60854 was 60854, checked in by vboxsync, 9 years ago

iomMMIODoComplicatedWrite: Use VINF_IOM_R3_IOPORT_COMMIT_WRITE to deal correctly with VINF_IOM_R3_MMIO_READ/WRITE status codes handling complicated stuff.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 101.2 KB
Line 
1/* $Id: IOMAllMMIO.cpp 60854 2016-05-05 18:18:02Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52/** @def IEM_USE_IEM_INSTEAD
53 * Use IEM instead of IOM for interpreting MMIO accesses.
54 * Because of PATM/CSAM issues in raw-mode, we've split this up into 2nd and 3rd
55 * IEM deployment step. */
56#if ((defined(IN_RING3) || defined(IN_RING0)) && defined(VBOX_WITH_2ND_IEM_STEP)) \
57 || defined(VBOX_WITH_3RD_IEM_STEP) || defined(DOXYGEN_RUNNING)
58# define IEM_USE_IEM_INSTEAD
59#endif
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65
66/**
67 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
68 */
69static const unsigned g_aSize2Shift[] =
70{
71 ~0U, /* 0 - invalid */
72 0, /* *1 == 2^0 */
73 1, /* *2 == 2^1 */
74 ~0U, /* 3 - invalid */
75 2, /* *4 == 2^2 */
76 ~0U, /* 5 - invalid */
77 ~0U, /* 6 - invalid */
78 ~0U, /* 7 - invalid */
79 3 /* *8 == 2^3 */
80};
81
82/**
83 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
84 */
85#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
86
87
88/**
89 * Returns the contents of register or immediate data of instruction's parameter.
90 *
91 * @returns true on success.
92 *
93 * @todo Get rid of this code. Use DISQueryParamVal instead
94 *
95 * @param pCpu Pointer to current disassembler context.
96 * @param pParam Pointer to parameter of instruction to process.
97 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
98 * @param pu64Data Where to store retrieved data.
99 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
100 */
101bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
102{
103 NOREF(pCpu);
104 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
105 {
106 *pcbSize = 0;
107 *pu64Data = 0;
108 return false;
109 }
110
111 /* divide and conquer */
112 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
113 {
114 if (pParam->fUse & DISUSE_REG_GEN32)
115 {
116 *pcbSize = 4;
117 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
118 return true;
119 }
120
121 if (pParam->fUse & DISUSE_REG_GEN16)
122 {
123 *pcbSize = 2;
124 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
125 return true;
126 }
127
128 if (pParam->fUse & DISUSE_REG_GEN8)
129 {
130 *pcbSize = 1;
131 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
132 return true;
133 }
134
135 Assert(pParam->fUse & DISUSE_REG_GEN64);
136 *pcbSize = 8;
137 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
138 return true;
139 }
140 else
141 {
142 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
143 {
144 *pcbSize = 8;
145 *pu64Data = pParam->uValue;
146 return true;
147 }
148
149 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
150 {
151 *pcbSize = 4;
152 *pu64Data = (uint32_t)pParam->uValue;
153 return true;
154 }
155
156 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
157 {
158 *pcbSize = 2;
159 *pu64Data = (uint16_t)pParam->uValue;
160 return true;
161 }
162
163 if (pParam->fUse & DISUSE_IMMEDIATE8)
164 {
165 *pcbSize = 1;
166 *pu64Data = (uint8_t)pParam->uValue;
167 return true;
168 }
169
170 if (pParam->fUse & DISUSE_REG_SEG)
171 {
172 *pcbSize = 2;
173 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
174 return true;
175 } /* Else - error. */
176
177 AssertFailed();
178 *pcbSize = 0;
179 *pu64Data = 0;
180 return false;
181 }
182}
183
184
185/**
186 * Saves data to 8/16/32 general purpose or segment register defined by
187 * instruction's parameter.
188 *
189 * @returns true on success.
190 * @param pCpu Pointer to current disassembler context.
191 * @param pParam Pointer to parameter of instruction to process.
192 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
193 * @param u64Data 8/16/32/64 bit data to store.
194 */
195bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
196{
197 NOREF(pCpu);
198 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
199 {
200 return false;
201 }
202
203 if (pParam->fUse & DISUSE_REG_GEN32)
204 {
205 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
206 return true;
207 }
208
209 if (pParam->fUse & DISUSE_REG_GEN64)
210 {
211 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
212 return true;
213 }
214
215 if (pParam->fUse & DISUSE_REG_GEN16)
216 {
217 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
218 return true;
219 }
220
221 if (pParam->fUse & DISUSE_REG_GEN8)
222 {
223 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
224 return true;
225 }
226
227 if (pParam->fUse & DISUSE_REG_SEG)
228 {
229 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
230 return true;
231 }
232
233 /* Else - error. */
234 return false;
235}
236
237
238#ifndef IN_RING3
239/**
240 * Defers a pending MMIO write to ring-3.
241 *
242 * @returns VINF_IOM_R3_MMIO_COMMIT_WRITE
243 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
244 * @param GCPhys The write address.
245 * @param pvBuf The bytes being written.
246 * @param cbBuf How many bytes.
247 * @param pRange The range, if resolved.
248 */
249static VBOXSTRICTRC iomMmioRing3WritePending(PVMCPU pVCpu, RTGCPHYS GCPhys, void const *pvBuf, size_t cbBuf, PIOMMMIORANGE pRange)
250{
251 Log5(("iomMmioRing3WritePending: %RGp LB %#x\n", GCPhys, cbBuf));
252 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
253 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys;
254 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
255 pVCpu->iom.s.PendingMmioWrite.cbValue = (uint32_t)cbBuf;
256 memcpy(pVCpu->iom.s.PendingMmioWrite.abValue, pvBuf, cbBuf);
257 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
258 return VINF_IOM_R3_MMIO_COMMIT_WRITE;
259}
260#endif
261
262
263/**
264 * Deals with complicated MMIO writes.
265 *
266 * Complicated means unaligned or non-dword/qword sized accesses depending on
267 * the MMIO region's access mode flags.
268 *
269 * @returns Strict VBox status code. Any EM scheduling status code,
270 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
271 * VINF_IOM_R3_MMIO_READ may be returned.
272 *
273 * @param pVM The cross context VM structure.
274 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
275 * @param pRange The range to write to.
276 * @param GCPhys The physical address to start writing.
277 * @param pvValue Where to store the value.
278 * @param cbValue The size of the value to write.
279 */
280static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
281 void const *pvValue, unsigned cbValue)
282{
283 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
284 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
285 VERR_IOM_MMIO_IPE_1);
286 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
287 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
288 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
289 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
290
291 /*
292 * Do debug stop if requested.
293 */
294 int rc = VINF_SUCCESS; NOREF(pVM);
295#ifdef VBOX_STRICT
296 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
297 {
298# ifdef IN_RING3
299 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
300 R3STRING(pRange->pszDesc)));
301 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
302 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
303 if (rc == VERR_DBGF_NOT_ATTACHED)
304 rc = VINF_SUCCESS;
305# else
306 return VINF_IOM_R3_MMIO_WRITE;
307# endif
308 }
309#endif
310
311 /*
312 * Check if we should ignore the write.
313 */
314 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
315 {
316 Assert(cbValue != 4 || (GCPhys & 3));
317 return VINF_SUCCESS;
318 }
319 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
320 {
321 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
322 return VINF_SUCCESS;
323 }
324
325 /*
326 * Split and conquer.
327 */
328 for (;;)
329 {
330 unsigned const offAccess = GCPhys & 3;
331 unsigned cbThisPart = 4 - offAccess;
332 if (cbThisPart > cbValue)
333 cbThisPart = cbValue;
334
335 /*
336 * Get the missing bits (if any).
337 */
338 uint32_t u32MissingValue = 0;
339 if (fReadMissing && cbThisPart != 4)
340 {
341 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
342 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
343 switch (rc2)
344 {
345 case VINF_SUCCESS:
346 break;
347 case VINF_IOM_MMIO_UNUSED_FF:
348 u32MissingValue = UINT32_C(0xffffffff);
349 break;
350 case VINF_IOM_MMIO_UNUSED_00:
351 u32MissingValue = 0;
352 break;
353#ifndef IN_RING3
354 case VINF_IOM_R3_MMIO_READ:
355 case VINF_IOM_R3_MMIO_READ_WRITE:
356 case VINF_IOM_R3_MMIO_WRITE:
357 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
358 rc2 = VBOXSTRICTRC_TODO(iomMmioRing3WritePending(pVCpu, GCPhys, pvValue, cbValue, pRange));
359 if (rc == VINF_SUCCESS || rc2 < rc)
360 rc = rc2;
361 return rc;
362#endif
363 default:
364 if (RT_FAILURE(rc2))
365 {
366 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
367 return rc2;
368 }
369 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
370 if (rc == VINF_SUCCESS || rc2 < rc)
371 rc = rc2;
372 break;
373 }
374 }
375
376 /*
377 * Merge missing and given bits.
378 */
379 uint32_t u32GivenMask;
380 uint32_t u32GivenValue;
381 switch (cbThisPart)
382 {
383 case 1:
384 u32GivenValue = *(uint8_t const *)pvValue;
385 u32GivenMask = UINT32_C(0x000000ff);
386 break;
387 case 2:
388 u32GivenValue = *(uint16_t const *)pvValue;
389 u32GivenMask = UINT32_C(0x0000ffff);
390 break;
391 case 3:
392 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
393 ((uint8_t const *)pvValue)[2], 0);
394 u32GivenMask = UINT32_C(0x00ffffff);
395 break;
396 case 4:
397 u32GivenValue = *(uint32_t const *)pvValue;
398 u32GivenMask = UINT32_C(0xffffffff);
399 break;
400 default:
401 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
402 }
403 if (offAccess)
404 {
405 u32GivenValue <<= offAccess * 8;
406 u32GivenMask <<= offAccess * 8;
407 }
408
409 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
410 | (u32GivenValue & u32GivenMask);
411
412 /*
413 * Do DWORD write to the device.
414 */
415 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
416 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
417 switch (rc2)
418 {
419 case VINF_SUCCESS:
420 break;
421#ifndef IN_RING3
422 case VINF_IOM_R3_MMIO_READ:
423 case VINF_IOM_R3_MMIO_READ_WRITE:
424 case VINF_IOM_R3_MMIO_WRITE:
425 Log3(("iomMMIODoComplicatedWrite: deferring GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
426 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
427 AssertReturn(cbValue + (GCPhys & 3) <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
428 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys & ~(RTGCPHYS)3;
429 pVCpu->iom.s.PendingMmioWrite.cbValue = cbValue + (GCPhys & 3);
430 *(uint32_t *)pVCpu->iom.s.PendingMmioWrite.abValue = u32Value;
431 if (cbValue > cbThisPart)
432 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[4],
433 (uint8_t const *)pvValue + cbThisPart, cbValue - cbThisPart);
434 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
435 if (rc == VINF_SUCCESS)
436 rc = VINF_IOM_R3_MMIO_COMMIT_WRITE;
437 return rc2;
438#endif
439 default:
440 if (RT_FAILURE(rc2))
441 {
442 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
443 return rc2;
444 }
445 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
446 if (rc == VINF_SUCCESS || rc2 < rc)
447 rc = rc2;
448 break;
449 }
450
451 /*
452 * Advance.
453 */
454 cbValue -= cbThisPart;
455 if (!cbValue)
456 break;
457 GCPhys += cbThisPart;
458 pvValue = (uint8_t const *)pvValue + cbThisPart;
459 }
460
461 return rc;
462}
463
464
465
466
467/**
468 * Wrapper which does the write and updates range statistics when such are enabled.
469 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
470 */
471static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
472 const void *pvData, unsigned cb)
473{
474#ifdef VBOX_WITH_STATISTICS
475 int rcSem = IOM_LOCK_SHARED(pVM);
476 if (rcSem == VERR_SEM_BUSY)
477 return VINF_IOM_R3_MMIO_WRITE;
478 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
479 if (!pStats)
480# ifdef IN_RING3
481 return VERR_NO_MEMORY;
482# else
483 return VINF_IOM_R3_MMIO_WRITE;
484# endif
485 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
486#else
487 NOREF(pVCpu);
488#endif
489
490 VBOXSTRICTRC rcStrict;
491 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
492 {
493 if ( (cb == 4 && !(GCPhysFault & 3))
494 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
495 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
496 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
497 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
498 else
499 rcStrict = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhysFault, pvData, cb);
500 }
501 else
502 rcStrict = VINF_SUCCESS;
503
504 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
505 STAM_COUNTER_INC(&pStats->Accesses);
506 return rcStrict;
507}
508
509
510/**
511 * Deals with complicated MMIO reads.
512 *
513 * Complicated means unaligned or non-dword/qword sized accesses depending on
514 * the MMIO region's access mode flags.
515 *
516 * @returns Strict VBox status code. Any EM scheduling status code,
517 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
518 * VINF_IOM_R3_MMIO_WRITE may be returned.
519 *
520 * @param pVM The cross context VM structure.
521 * @param pRange The range to read from.
522 * @param GCPhys The physical address to start reading.
523 * @param pvValue Where to store the value.
524 * @param cbValue The size of the value to read.
525 */
526static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
527{
528 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
529 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
530 VERR_IOM_MMIO_IPE_1);
531 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
532 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
533
534 /*
535 * Do debug stop if requested.
536 */
537 int rc = VINF_SUCCESS; NOREF(pVM);
538#ifdef VBOX_STRICT
539 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
540 {
541# ifdef IN_RING3
542 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
543 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
544 if (rc == VERR_DBGF_NOT_ATTACHED)
545 rc = VINF_SUCCESS;
546# else
547 return VINF_IOM_R3_MMIO_READ;
548# endif
549 }
550#endif
551
552 /*
553 * Split and conquer.
554 */
555 for (;;)
556 {
557 /*
558 * Do DWORD read from the device.
559 */
560 uint32_t u32Value;
561 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
562 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
563 switch (rc2)
564 {
565 case VINF_SUCCESS:
566 break;
567 case VINF_IOM_MMIO_UNUSED_FF:
568 u32Value = UINT32_C(0xffffffff);
569 break;
570 case VINF_IOM_MMIO_UNUSED_00:
571 u32Value = 0;
572 break;
573 case VINF_IOM_R3_MMIO_READ:
574 case VINF_IOM_R3_MMIO_READ_WRITE:
575 case VINF_IOM_R3_MMIO_WRITE:
576 /** @todo What if we've split a transfer and already read
577 * something? Since reads can have sideeffects we could be
578 * kind of screwed here... */
579 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
580 return rc2;
581 default:
582 if (RT_FAILURE(rc2))
583 {
584 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
585 return rc2;
586 }
587 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
588 if (rc == VINF_SUCCESS || rc2 < rc)
589 rc = rc2;
590 break;
591 }
592 u32Value >>= (GCPhys & 3) * 8;
593
594 /*
595 * Write what we've read.
596 */
597 unsigned cbThisPart = 4 - (GCPhys & 3);
598 if (cbThisPart > cbValue)
599 cbThisPart = cbValue;
600
601 switch (cbThisPart)
602 {
603 case 1:
604 *(uint8_t *)pvValue = (uint8_t)u32Value;
605 break;
606 case 2:
607 *(uint16_t *)pvValue = (uint16_t)u32Value;
608 break;
609 case 3:
610 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
611 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
612 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
613 break;
614 case 4:
615 *(uint32_t *)pvValue = u32Value;
616 break;
617 }
618
619 /*
620 * Advance.
621 */
622 cbValue -= cbThisPart;
623 if (!cbValue)
624 break;
625 GCPhys += cbThisPart;
626 pvValue = (uint8_t *)pvValue + cbThisPart;
627 }
628
629 return rc;
630}
631
632
633/**
634 * Implements VINF_IOM_MMIO_UNUSED_FF.
635 *
636 * @returns VINF_SUCCESS.
637 * @param pvValue Where to store the zeros.
638 * @param cbValue How many bytes to read.
639 */
640static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
641{
642 switch (cbValue)
643 {
644 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
645 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
646 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
647 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
648 default:
649 {
650 uint8_t *pb = (uint8_t *)pvValue;
651 while (cbValue--)
652 *pb++ = UINT8_C(0xff);
653 break;
654 }
655 }
656 return VINF_SUCCESS;
657}
658
659
660/**
661 * Implements VINF_IOM_MMIO_UNUSED_00.
662 *
663 * @returns VINF_SUCCESS.
664 * @param pvValue Where to store the zeros.
665 * @param cbValue How many bytes to read.
666 */
667static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
668{
669 switch (cbValue)
670 {
671 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
672 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
673 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
674 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
675 default:
676 {
677 uint8_t *pb = (uint8_t *)pvValue;
678 while (cbValue--)
679 *pb++ = UINT8_C(0x00);
680 break;
681 }
682 }
683 return VINF_SUCCESS;
684}
685
686
687/**
688 * Wrapper which does the read and updates range statistics when such are enabled.
689 */
690DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
691 void *pvValue, unsigned cbValue)
692{
693#ifdef VBOX_WITH_STATISTICS
694 int rcSem = IOM_LOCK_SHARED(pVM);
695 if (rcSem == VERR_SEM_BUSY)
696 return VINF_IOM_R3_MMIO_READ;
697 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
698 if (!pStats)
699# ifdef IN_RING3
700 return VERR_NO_MEMORY;
701# else
702 return VINF_IOM_R3_MMIO_READ;
703# endif
704 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
705#else
706 NOREF(pVCpu);
707#endif
708
709 VBOXSTRICTRC rcStrict;
710 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
711 {
712 if ( ( cbValue == 4
713 && !(GCPhys & 3))
714 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
715 || ( cbValue == 8
716 && !(GCPhys & 7)
717 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
718 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
719 pvValue, cbValue);
720 else
721 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
722 }
723 else
724 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
725 if (rcStrict != VINF_SUCCESS)
726 {
727 switch (VBOXSTRICTRC_VAL(rcStrict))
728 {
729 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
730 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
731 }
732 }
733
734 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
735 STAM_COUNTER_INC(&pStats->Accesses);
736 return rcStrict;
737}
738
739
740/**
741 * Internal - statistics only.
742 */
743DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
744{
745#ifdef VBOX_WITH_STATISTICS
746 switch (cb)
747 {
748 case 1:
749 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
750 break;
751 case 2:
752 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
753 break;
754 case 4:
755 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
756 break;
757 case 8:
758 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
759 break;
760 default:
761 /* No way. */
762 AssertMsgFailed(("Invalid data length %d\n", cb));
763 break;
764 }
765#else
766 NOREF(pVM); NOREF(cb);
767#endif
768}
769
770
771#ifndef IEM_USE_IEM_INSTEAD
772
773/**
774 * MOV reg, mem (read)
775 * MOVZX reg, mem (read)
776 * MOVSX reg, mem (read)
777 *
778 * @returns VBox status code.
779 *
780 * @param pVM The cross context VM structure.
781 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
782 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
783 * @param pCpu Disassembler CPU state.
784 * @param pRange Pointer MMIO range.
785 * @param GCPhysFault The GC physical address corresponding to pvFault.
786 */
787static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
788 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
789{
790 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
791
792 /*
793 * Get the data size from parameter 2,
794 * and call the handler function to get the data.
795 */
796 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
797 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
798
799 uint64_t u64Data = 0;
800 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
801 if (rc == VINF_SUCCESS)
802 {
803 /*
804 * Do sign extension for MOVSX.
805 */
806 /** @todo checkup MOVSX implementation! */
807 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
808 {
809 if (cb == 1)
810 {
811 /* DWORD <- BYTE */
812 int64_t iData = (int8_t)u64Data;
813 u64Data = (uint64_t)iData;
814 }
815 else
816 {
817 /* DWORD <- WORD */
818 int64_t iData = (int16_t)u64Data;
819 u64Data = (uint64_t)iData;
820 }
821 }
822
823 /*
824 * Store the result to register (parameter 1).
825 */
826 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
827 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
828 }
829
830 if (rc == VINF_SUCCESS)
831 iomMMIOStatLength(pVM, cb);
832 return rc;
833}
834
835
836/**
837 * MOV mem, reg|imm (write)
838 *
839 * @returns VBox status code.
840 *
841 * @param pVM The cross context VM structure.
842 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
843 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
844 * @param pCpu Disassembler CPU state.
845 * @param pRange Pointer MMIO range.
846 * @param GCPhysFault The GC physical address corresponding to pvFault.
847 */
848static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
849 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
850{
851 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
852
853 /*
854 * Get data to write from second parameter,
855 * and call the callback to write it.
856 */
857 unsigned cb = 0;
858 uint64_t u64Data = 0;
859 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
860 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
861
862 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
863 if (rc == VINF_SUCCESS)
864 iomMMIOStatLength(pVM, cb);
865 return rc;
866}
867
868
869/** Wrapper for reading virtual memory. */
870DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
871{
872 /* Note: This will fail in R0 or RC if it hits an access handler. That
873 isn't a problem though since the operation can be restarted in REM. */
874#ifdef IN_RC
875 NOREF(pVCpu);
876 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
877 /* Page may be protected and not directly accessible. */
878 if (rc == VERR_ACCESS_DENIED)
879 rc = VINF_IOM_R3_IOPORT_WRITE;
880 return rc;
881#else
882 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
883#endif
884}
885
886
887/** Wrapper for writing virtual memory. */
888DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
889{
890 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
891 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
892 * as well since we're not behind the pgm lock and handler may change between calls.
893 *
894 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
895 * the state of some shadowed structures. */
896#if defined(IN_RING0) || defined(IN_RC)
897 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
898#else
899 NOREF(pCtxCore);
900 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
901#endif
902}
903
904
905#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
906/**
907 * [REP] MOVSB
908 * [REP] MOVSW
909 * [REP] MOVSD
910 *
911 * Restricted implementation.
912 *
913 *
914 * @returns VBox status code.
915 *
916 * @param pVM The cross context VM structure.
917 * @param uErrorCode CPU Error code.
918 * @param pRegFrame Trap register frame.
919 * @param GCPhysFault The GC physical address corresponding to pvFault.
920 * @param pCpu Disassembler CPU state.
921 * @param pRange Pointer MMIO range.
922 * @param ppStat Which sub-sample to attribute this call to.
923 */
924static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
925 PSTAMPROFILE *ppStat)
926{
927 /*
928 * We do not support segment prefixes or REPNE.
929 */
930 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
931 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
932
933 PVMCPU pVCpu = VMMGetCpu(pVM);
934
935 /*
936 * Get bytes/words/dwords/qword count to copy.
937 */
938 uint32_t cTransfers = 1;
939 if (pCpu->fPrefix & DISPREFIX_REP)
940 {
941#ifndef IN_RC
942 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
943 && pRegFrame->rcx >= _4G)
944 return VINF_EM_RAW_EMULATE_INSTR;
945#endif
946
947 cTransfers = pRegFrame->ecx;
948 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
949 cTransfers &= 0xffff;
950
951 if (!cTransfers)
952 return VINF_SUCCESS;
953 }
954
955 /* Get the current privilege level. */
956 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
957
958 /*
959 * Get data size.
960 */
961 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
962 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
963 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
964
965#ifdef VBOX_WITH_STATISTICS
966 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
967 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
968#endif
969
970/** @todo re-evaluate on page boundaries. */
971
972 RTGCPHYS Phys = GCPhysFault;
973 int rc;
974 if (fWriteAccess)
975 {
976 /*
977 * Write operation: [Mem] -> [MMIO]
978 * ds:esi (Virt Src) -> es:edi (Phys Dst)
979 */
980 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
981
982 /* Check callback. */
983 if (!pRange->CTX_SUFF(pfnWriteCallback))
984 return VINF_IOM_R3_MMIO_WRITE;
985
986 /* Convert source address ds:esi. */
987 RTGCUINTPTR pu8Virt;
988 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
989 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
990 (PRTGCPTR)&pu8Virt);
991 if (RT_SUCCESS(rc))
992 {
993
994 /* Access verification first; we currently can't recover properly from traps inside this instruction */
995 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
996 if (rc != VINF_SUCCESS)
997 {
998 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
999 return VINF_EM_RAW_EMULATE_INSTR;
1000 }
1001
1002#ifdef IN_RC
1003 MMGCRamRegisterTrapHandler(pVM);
1004#endif
1005
1006 /* copy loop. */
1007 while (cTransfers)
1008 {
1009 uint32_t u32Data = 0;
1010 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
1011 if (rc != VINF_SUCCESS)
1012 break;
1013 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
1014 if (rc != VINF_SUCCESS)
1015 break;
1016
1017 pu8Virt += offIncrement;
1018 Phys += offIncrement;
1019 pRegFrame->rsi += offIncrement;
1020 pRegFrame->rdi += offIncrement;
1021 cTransfers--;
1022 }
1023#ifdef IN_RC
1024 MMGCRamDeregisterTrapHandler(pVM);
1025#endif
1026 /* Update ecx. */
1027 if (pCpu->fPrefix & DISPREFIX_REP)
1028 pRegFrame->ecx = cTransfers;
1029 }
1030 else
1031 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1032 }
1033 else
1034 {
1035 /*
1036 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
1037 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
1038 */
1039 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
1040
1041 /* Check callback. */
1042 if (!pRange->CTX_SUFF(pfnReadCallback))
1043 return VINF_IOM_R3_MMIO_READ;
1044
1045 /* Convert destination address. */
1046 RTGCUINTPTR pu8Virt;
1047 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1048 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1049 (RTGCPTR *)&pu8Virt);
1050 if (RT_FAILURE(rc))
1051 return VINF_IOM_R3_MMIO_READ;
1052
1053 /* Check if destination address is MMIO. */
1054 PIOMMMIORANGE pMMIODst;
1055 RTGCPHYS PhysDst;
1056 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1057 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1058 if ( RT_SUCCESS(rc)
1059 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1060 {
1061 /** @todo implement per-device locks for MMIO access. */
1062 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1063
1064 /*
1065 * Extra: [MMIO] -> [MMIO]
1066 */
1067 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1068 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1069 {
1070 iomMmioReleaseRange(pVM, pRange);
1071 return VINF_IOM_R3_MMIO_READ_WRITE;
1072 }
1073
1074 /* copy loop. */
1075 while (cTransfers)
1076 {
1077 uint32_t u32Data;
1078 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1079 if (rc != VINF_SUCCESS)
1080 break;
1081 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1082 if (rc != VINF_SUCCESS)
1083 break;
1084
1085 Phys += offIncrement;
1086 PhysDst += offIncrement;
1087 pRegFrame->rsi += offIncrement;
1088 pRegFrame->rdi += offIncrement;
1089 cTransfers--;
1090 }
1091 iomMmioReleaseRange(pVM, pRange);
1092 }
1093 else
1094 {
1095 /*
1096 * Normal: [MMIO] -> [Mem]
1097 */
1098 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1099 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1100 if (rc != VINF_SUCCESS)
1101 {
1102 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1103 return VINF_EM_RAW_EMULATE_INSTR;
1104 }
1105
1106 /* copy loop. */
1107#ifdef IN_RC
1108 MMGCRamRegisterTrapHandler(pVM);
1109#endif
1110 while (cTransfers)
1111 {
1112 uint32_t u32Data;
1113 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1114 if (rc != VINF_SUCCESS)
1115 break;
1116 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1117 if (rc != VINF_SUCCESS)
1118 {
1119 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1120 break;
1121 }
1122
1123 pu8Virt += offIncrement;
1124 Phys += offIncrement;
1125 pRegFrame->rsi += offIncrement;
1126 pRegFrame->rdi += offIncrement;
1127 cTransfers--;
1128 }
1129#ifdef IN_RC
1130 MMGCRamDeregisterTrapHandler(pVM);
1131#endif
1132 }
1133
1134 /* Update ecx on exit. */
1135 if (pCpu->fPrefix & DISPREFIX_REP)
1136 pRegFrame->ecx = cTransfers;
1137 }
1138
1139 /* work statistics. */
1140 if (rc == VINF_SUCCESS)
1141 iomMMIOStatLength(pVM, cb);
1142 NOREF(ppStat);
1143 return rc;
1144}
1145#endif /* IOM_WITH_MOVS_SUPPORT */
1146
1147
1148/**
1149 * Gets the address / opcode mask corresponding to the given CPU mode.
1150 *
1151 * @returns Mask.
1152 * @param enmCpuMode CPU mode.
1153 */
1154static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1155{
1156 switch (enmCpuMode)
1157 {
1158 case DISCPUMODE_16BIT: return UINT16_MAX;
1159 case DISCPUMODE_32BIT: return UINT32_MAX;
1160 case DISCPUMODE_64BIT: return UINT64_MAX;
1161 default:
1162 AssertFailedReturn(UINT32_MAX);
1163 }
1164}
1165
1166
1167/**
1168 * [REP] STOSB
1169 * [REP] STOSW
1170 * [REP] STOSD
1171 *
1172 * Restricted implementation.
1173 *
1174 *
1175 * @returns VBox status code.
1176 *
1177 * @param pVM The cross context VM structure.
1178 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1179 * @param pRegFrame Trap register frame.
1180 * @param GCPhysFault The GC physical address corresponding to pvFault.
1181 * @param pCpu Disassembler CPU state.
1182 * @param pRange Pointer MMIO range.
1183 */
1184static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1185 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1186{
1187 /*
1188 * We do not support segment prefixes or REPNE..
1189 */
1190 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1191 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1192
1193 /*
1194 * Get bytes/words/dwords/qwords count to copy.
1195 */
1196 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1197 RTGCUINTREG cTransfers = 1;
1198 if (pCpu->fPrefix & DISPREFIX_REP)
1199 {
1200#ifndef IN_RC
1201 if ( CPUMIsGuestIn64BitCode(pVCpu)
1202 && pRegFrame->rcx >= _4G)
1203 return VINF_EM_RAW_EMULATE_INSTR;
1204#endif
1205
1206 cTransfers = pRegFrame->rcx & fAddrMask;
1207 if (!cTransfers)
1208 return VINF_SUCCESS;
1209 }
1210
1211/** @todo r=bird: bounds checks! */
1212
1213 /*
1214 * Get data size.
1215 */
1216 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1217 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1218 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1219
1220#ifdef VBOX_WITH_STATISTICS
1221 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1222 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1223#endif
1224
1225
1226 RTGCPHYS Phys = GCPhysFault;
1227 int rc;
1228 if ( pRange->CTX_SUFF(pfnFillCallback)
1229 && cb <= 4 /* can only fill 32-bit values */)
1230 {
1231 /*
1232 * Use the fill callback.
1233 */
1234 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1235 if (offIncrement > 0)
1236 {
1237 /* addr++ variant. */
1238 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1239 pRegFrame->eax, cb, cTransfers);
1240 if (rc == VINF_SUCCESS)
1241 {
1242 /* Update registers. */
1243 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1244 | (pRegFrame->rdi & ~fAddrMask);
1245 if (pCpu->fPrefix & DISPREFIX_REP)
1246 pRegFrame->rcx &= ~fAddrMask;
1247 }
1248 }
1249 else
1250 {
1251 /* addr-- variant. */
1252 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1253 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1254 pRegFrame->eax, cb, cTransfers);
1255 if (rc == VINF_SUCCESS)
1256 {
1257 /* Update registers. */
1258 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1259 | (pRegFrame->rdi & ~fAddrMask);
1260 if (pCpu->fPrefix & DISPREFIX_REP)
1261 pRegFrame->rcx &= ~fAddrMask;
1262 }
1263 }
1264 }
1265 else
1266 {
1267 /*
1268 * Use the write callback.
1269 */
1270 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1271 uint64_t u64Data = pRegFrame->rax;
1272
1273 /* fill loop. */
1274 do
1275 {
1276 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1277 if (rc != VINF_SUCCESS)
1278 break;
1279
1280 Phys += offIncrement;
1281 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1282 | (pRegFrame->rdi & ~fAddrMask);
1283 cTransfers--;
1284 } while (cTransfers);
1285
1286 /* Update rcx on exit. */
1287 if (pCpu->fPrefix & DISPREFIX_REP)
1288 pRegFrame->rcx = (cTransfers & fAddrMask)
1289 | (pRegFrame->rcx & ~fAddrMask);
1290 }
1291
1292 /*
1293 * Work statistics and return.
1294 */
1295 if (rc == VINF_SUCCESS)
1296 iomMMIOStatLength(pVM, cb);
1297 return rc;
1298}
1299
1300
1301/**
1302 * [REP] LODSB
1303 * [REP] LODSW
1304 * [REP] LODSD
1305 *
1306 * Restricted implementation.
1307 *
1308 *
1309 * @returns VBox status code.
1310 *
1311 * @param pVM The cross context VM structure.
1312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1313 * @param pRegFrame Trap register frame.
1314 * @param GCPhysFault The GC physical address corresponding to pvFault.
1315 * @param pCpu Disassembler CPU state.
1316 * @param pRange Pointer MMIO range.
1317 */
1318static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1319 PIOMMMIORANGE pRange)
1320{
1321 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1322
1323 /*
1324 * We do not support segment prefixes or REP*.
1325 */
1326 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1327 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1328
1329 /*
1330 * Get data size.
1331 */
1332 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1333 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1334 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1335
1336 /*
1337 * Perform read.
1338 */
1339 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1340 if (rc == VINF_SUCCESS)
1341 {
1342 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1343 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1344 | (pRegFrame->rsi & ~fAddrMask);
1345 }
1346
1347 /*
1348 * Work statistics and return.
1349 */
1350 if (rc == VINF_SUCCESS)
1351 iomMMIOStatLength(pVM, cb);
1352 return rc;
1353}
1354
1355
1356/**
1357 * CMP [MMIO], reg|imm
1358 * CMP reg|imm, [MMIO]
1359 *
1360 * Restricted implementation.
1361 *
1362 *
1363 * @returns VBox status code.
1364 *
1365 * @param pVM The cross context VM structure.
1366 * @param pRegFrame Trap register frame.
1367 * @param GCPhysFault The GC physical address corresponding to pvFault.
1368 * @param pCpu Disassembler CPU state.
1369 * @param pRange Pointer MMIO range.
1370 */
1371static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1372 PIOMMMIORANGE pRange)
1373{
1374 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1375
1376 /*
1377 * Get the operands.
1378 */
1379 unsigned cb = 0;
1380 uint64_t uData1 = 0;
1381 uint64_t uData2 = 0;
1382 int rc;
1383 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1384 /* cmp reg, [MMIO]. */
1385 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1386 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1387 /* cmp [MMIO], reg|imm. */
1388 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1389 else
1390 {
1391 AssertMsgFailed(("Disassember CMP problem..\n"));
1392 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1393 }
1394
1395 if (rc == VINF_SUCCESS)
1396 {
1397#if HC_ARCH_BITS == 32
1398 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1399 if (cb > 4)
1400 return VINF_IOM_R3_MMIO_READ_WRITE;
1401#endif
1402 /* Emulate CMP and update guest flags. */
1403 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1404 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1405 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1406 iomMMIOStatLength(pVM, cb);
1407 }
1408
1409 return rc;
1410}
1411
1412
1413/**
1414 * AND [MMIO], reg|imm
1415 * AND reg, [MMIO]
1416 * OR [MMIO], reg|imm
1417 * OR reg, [MMIO]
1418 *
1419 * Restricted implementation.
1420 *
1421 *
1422 * @returns VBox status code.
1423 *
1424 * @param pVM The cross context VM structure.
1425 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1426 * @param pRegFrame Trap register frame.
1427 * @param GCPhysFault The GC physical address corresponding to pvFault.
1428 * @param pCpu Disassembler CPU state.
1429 * @param pRange Pointer MMIO range.
1430 * @param pfnEmulate Instruction emulation function.
1431 */
1432static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1433 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1434{
1435 unsigned cb = 0;
1436 uint64_t uData1 = 0;
1437 uint64_t uData2 = 0;
1438 bool fAndWrite;
1439 int rc;
1440
1441#ifdef LOG_ENABLED
1442 const char *pszInstr;
1443
1444 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1445 pszInstr = "Xor";
1446 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1447 pszInstr = "Or";
1448 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1449 pszInstr = "And";
1450 else
1451 pszInstr = "OrXorAnd??";
1452#endif
1453
1454 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1455 {
1456#if HC_ARCH_BITS == 32
1457 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1458 if (cb > 4)
1459 return VINF_IOM_R3_MMIO_READ_WRITE;
1460#endif
1461 /* and reg, [MMIO]. */
1462 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1463 fAndWrite = false;
1464 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1465 }
1466 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1467 {
1468#if HC_ARCH_BITS == 32
1469 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1470 if (cb > 4)
1471 return VINF_IOM_R3_MMIO_READ_WRITE;
1472#endif
1473 /* and [MMIO], reg|imm. */
1474 fAndWrite = true;
1475 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1476 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1477 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1478 else
1479 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1480 }
1481 else
1482 {
1483 AssertMsgFailed(("Disassember AND problem..\n"));
1484 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1485 }
1486
1487 if (rc == VINF_SUCCESS)
1488 {
1489 /* Emulate AND and update guest flags. */
1490 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1491
1492 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1493
1494 if (fAndWrite)
1495 /* Store result to MMIO. */
1496 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1497 else
1498 {
1499 /* Store result to register. */
1500 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1501 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1502 }
1503 if (rc == VINF_SUCCESS)
1504 {
1505 /* Update guest's eflags and finish. */
1506 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1507 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1508 iomMMIOStatLength(pVM, cb);
1509 }
1510 }
1511
1512 return rc;
1513}
1514
1515
1516/**
1517 * TEST [MMIO], reg|imm
1518 * TEST reg, [MMIO]
1519 *
1520 * Restricted implementation.
1521 *
1522 *
1523 * @returns VBox status code.
1524 *
1525 * @param pVM The cross context VM structure.
1526 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1527 * @param pRegFrame Trap register frame.
1528 * @param GCPhysFault The GC physical address corresponding to pvFault.
1529 * @param pCpu Disassembler CPU state.
1530 * @param pRange Pointer MMIO range.
1531 */
1532static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1533 PIOMMMIORANGE pRange)
1534{
1535 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1536
1537 unsigned cb = 0;
1538 uint64_t uData1 = 0;
1539 uint64_t uData2 = 0;
1540 int rc;
1541
1542 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1543 {
1544 /* and test, [MMIO]. */
1545 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1546 }
1547 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1548 {
1549 /* test [MMIO], reg|imm. */
1550 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1551 }
1552 else
1553 {
1554 AssertMsgFailed(("Disassember TEST problem..\n"));
1555 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1556 }
1557
1558 if (rc == VINF_SUCCESS)
1559 {
1560#if HC_ARCH_BITS == 32
1561 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1562 if (cb > 4)
1563 return VINF_IOM_R3_MMIO_READ_WRITE;
1564#endif
1565
1566 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1567 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1568 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1569 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1570 iomMMIOStatLength(pVM, cb);
1571 }
1572
1573 return rc;
1574}
1575
1576
1577/**
1578 * BT [MMIO], reg|imm
1579 *
1580 * Restricted implementation.
1581 *
1582 *
1583 * @returns VBox status code.
1584 *
1585 * @param pVM The cross context VM structure.
1586 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1587 * @param pRegFrame Trap register frame.
1588 * @param GCPhysFault The GC physical address corresponding to pvFault.
1589 * @param pCpu Disassembler CPU state.
1590 * @param pRange Pointer MMIO range.
1591 */
1592static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1593 PIOMMMIORANGE pRange)
1594{
1595 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1596
1597 uint64_t uBit = 0;
1598 uint64_t uData = 0;
1599 unsigned cbIgnored;
1600
1601 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1602 {
1603 AssertMsgFailed(("Disassember BT problem..\n"));
1604 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1605 }
1606 /* The size of the memory operand only matters here. */
1607 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1608
1609 /* bt [MMIO], reg|imm. */
1610 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1611 if (rc == VINF_SUCCESS)
1612 {
1613 /* Find the bit inside the faulting address */
1614 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1615 iomMMIOStatLength(pVM, cbData);
1616 }
1617
1618 return rc;
1619}
1620
1621/**
1622 * XCHG [MMIO], reg
1623 * XCHG reg, [MMIO]
1624 *
1625 * Restricted implementation.
1626 *
1627 *
1628 * @returns VBox status code.
1629 *
1630 * @param pVM The cross context VM structure.
1631 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1632 * @param pRegFrame Trap register frame.
1633 * @param GCPhysFault The GC physical address corresponding to pvFault.
1634 * @param pCpu Disassembler CPU state.
1635 * @param pRange Pointer MMIO range.
1636 */
1637static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1638 PIOMMMIORANGE pRange)
1639{
1640 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1641 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1642 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1643 return VINF_IOM_R3_MMIO_READ_WRITE;
1644
1645 int rc;
1646 unsigned cb = 0;
1647 uint64_t uData1 = 0;
1648 uint64_t uData2 = 0;
1649 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1650 {
1651 /* xchg reg, [MMIO]. */
1652 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1653 if (rc == VINF_SUCCESS)
1654 {
1655 /* Store result to MMIO. */
1656 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1657
1658 if (rc == VINF_SUCCESS)
1659 {
1660 /* Store result to register. */
1661 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1662 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1663 }
1664 else
1665 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1666 }
1667 else
1668 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1669 }
1670 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1671 {
1672 /* xchg [MMIO], reg. */
1673 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1674 if (rc == VINF_SUCCESS)
1675 {
1676 /* Store result to MMIO. */
1677 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1678 if (rc == VINF_SUCCESS)
1679 {
1680 /* Store result to register. */
1681 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1682 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1683 }
1684 else
1685 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1686 }
1687 else
1688 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1689 }
1690 else
1691 {
1692 AssertMsgFailed(("Disassember XCHG problem..\n"));
1693 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1694 }
1695 return rc;
1696}
1697
1698#endif /* !IEM_USE_IEM_INSTEAD */
1699
1700/**
1701 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
1702 *
1703 * @returns VBox status code (appropriate for GC return).
1704 * @param pVM The cross context VM structure.
1705 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1706 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1707 * any error code (the EPT misconfig hack).
1708 * @param pCtxCore Trap register frame.
1709 * @param GCPhysFault The GC physical address corresponding to pvFault.
1710 * @param pvUser Pointer to the MMIO ring-3 range entry.
1711 */
1712static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
1713 RTGCPHYS GCPhysFault, void *pvUser)
1714{
1715 int rc = IOM_LOCK_SHARED(pVM);
1716#ifndef IN_RING3
1717 if (rc == VERR_SEM_BUSY)
1718 return VINF_IOM_R3_MMIO_READ_WRITE;
1719#endif
1720 AssertRC(rc);
1721
1722 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1723 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1724
1725 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1726 Assert(pRange);
1727 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1728 iomMmioRetainRange(pRange);
1729#ifndef VBOX_WITH_STATISTICS
1730 IOM_UNLOCK_SHARED(pVM);
1731
1732#else
1733 /*
1734 * Locate the statistics.
1735 */
1736 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1737 if (!pStats)
1738 {
1739 iomMmioReleaseRange(pVM, pRange);
1740# ifdef IN_RING3
1741 return VERR_NO_MEMORY;
1742# else
1743 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1744 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1745 return VINF_IOM_R3_MMIO_READ_WRITE;
1746# endif
1747 }
1748#endif
1749
1750#ifndef IN_RING3
1751 /*
1752 * Should we defer the request right away? This isn't usually the case, so
1753 * do the simple test first and the try deal with uErrorCode being N/A.
1754 */
1755 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1756 || !pRange->CTX_SUFF(pfnReadCallback))
1757 && ( uErrorCode == UINT32_MAX
1758 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1759 : uErrorCode & X86_TRAP_PF_RW
1760 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1761 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1762 )
1763 )
1764 )
1765 {
1766 if (uErrorCode & X86_TRAP_PF_RW)
1767 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1768 else
1769 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1770
1771 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1772 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1773 iomMmioReleaseRange(pVM, pRange);
1774 return VINF_IOM_R3_MMIO_READ_WRITE;
1775 }
1776#endif /* !IN_RING3 */
1777
1778 /*
1779 * Retain the range and do locking.
1780 */
1781 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1782 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1783 if (rc != VINF_SUCCESS)
1784 {
1785 iomMmioReleaseRange(pVM, pRange);
1786 return rc;
1787 }
1788
1789#ifdef IEM_USE_IEM_INSTEAD
1790
1791 /*
1792 * Let IEM call us back via iomMmioHandler.
1793 */
1794 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1795
1796 NOREF(pCtxCore); NOREF(GCPhysFault);
1797 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1798 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1799 iomMmioReleaseRange(pVM, pRange);
1800 if (RT_SUCCESS(rcStrict))
1801 return rcStrict;
1802 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1803 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1804 {
1805 Log(("IOM: Hit unsupported IEM feature!\n"));
1806 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
1807 }
1808 return rcStrict;
1809
1810#else
1811
1812 /*
1813 * Disassemble the instruction and interpret it.
1814 */
1815 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1816 unsigned cbOp;
1817 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1818 if (RT_FAILURE(rc))
1819 {
1820 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1821 iomMmioReleaseRange(pVM, pRange);
1822 return rc;
1823 }
1824 switch (pDis->pCurInstr->uOpcode)
1825 {
1826 case OP_MOV:
1827 case OP_MOVZX:
1828 case OP_MOVSX:
1829 {
1830 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1831 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1832 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1833 ? uErrorCode & X86_TRAP_PF_RW
1834 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1835 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1836 else
1837 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1838 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1839 break;
1840 }
1841
1842
1843# ifdef IOM_WITH_MOVS_SUPPORT
1844 case OP_MOVSB:
1845 case OP_MOVSWD:
1846 {
1847 if (uErrorCode == UINT32_MAX)
1848 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1849 else
1850 {
1851 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1852 PSTAMPROFILE pStat = NULL;
1853 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1854 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1855 }
1856 break;
1857 }
1858# endif
1859
1860 case OP_STOSB:
1861 case OP_STOSWD:
1862 Assert(uErrorCode & X86_TRAP_PF_RW);
1863 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1864 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1865 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1866 break;
1867
1868 case OP_LODSB:
1869 case OP_LODSWD:
1870 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1871 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1872 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1873 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1874 break;
1875
1876 case OP_CMP:
1877 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1878 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1879 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1880 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1881 break;
1882
1883 case OP_AND:
1884 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1885 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1886 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1887 break;
1888
1889 case OP_OR:
1890 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1891 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1892 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1893 break;
1894
1895 case OP_XOR:
1896 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1897 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1898 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1899 break;
1900
1901 case OP_TEST:
1902 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1903 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1904 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1905 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1906 break;
1907
1908 case OP_BT:
1909 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1910 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1911 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1912 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1913 break;
1914
1915 case OP_XCHG:
1916 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1917 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1918 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1919 break;
1920
1921
1922 /*
1923 * The instruction isn't supported. Hand it on to ring-3.
1924 */
1925 default:
1926 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1927 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1928 break;
1929 }
1930
1931 /*
1932 * On success advance EIP.
1933 */
1934 if (rc == VINF_SUCCESS)
1935 pCtxCore->rip += cbOp;
1936 else
1937 {
1938 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1939# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1940 switch (rc)
1941 {
1942 case VINF_IOM_R3_MMIO_READ:
1943 case VINF_IOM_R3_MMIO_READ_WRITE:
1944 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1945 break;
1946 case VINF_IOM_R3_MMIO_WRITE:
1947 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1948 break;
1949 }
1950# endif
1951 }
1952
1953 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1954 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1955 iomMmioReleaseRange(pVM, pRange);
1956 return rc;
1957#endif /* !IEM_USE_IEM_INSTEAD */
1958}
1959
1960
1961/**
1962 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1963 * \#PF access handler callback for MMIO pages.}
1964 *
1965 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1966 */
1967DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1968 RTGCPHYS GCPhysFault, void *pvUser)
1969{
1970 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1971 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
1972 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1973}
1974
1975
1976/**
1977 * Physical access handler for MMIO ranges.
1978 *
1979 * @returns VBox status code (appropriate for GC return).
1980 * @param pVM The cross context VM structure.
1981 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1982 * @param uErrorCode CPU Error code.
1983 * @param pCtxCore Trap register frame.
1984 * @param GCPhysFault The GC physical address.
1985 */
1986VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1987{
1988 /*
1989 * We don't have a range here, so look it up before calling the common function.
1990 */
1991 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1992#ifndef IN_RING3
1993 if (rc2 == VERR_SEM_BUSY)
1994 return VINF_IOM_R3_MMIO_READ_WRITE;
1995#endif
1996 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1997 if (RT_UNLIKELY(!pRange))
1998 {
1999 IOM_UNLOCK_SHARED(pVM);
2000 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2001 }
2002 iomMmioRetainRange(pRange);
2003 IOM_UNLOCK_SHARED(pVM);
2004
2005 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
2006
2007 iomMmioReleaseRange(pVM, pRange);
2008 return VBOXSTRICTRC_VAL(rcStrict);
2009}
2010
2011
2012/**
2013 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
2014 *
2015 * @remarks The @a pvUser argument points to the MMIO range entry.
2016 */
2017PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
2018 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
2019{
2020 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
2021 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
2022
2023 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
2024 AssertPtr(pRange);
2025 NOREF(pvPhys); NOREF(enmOrigin);
2026
2027 /*
2028 * Validate the range.
2029 */
2030 int rc = IOM_LOCK_SHARED(pVM);
2031#ifndef IN_RING3
2032 if (rc == VERR_SEM_BUSY)
2033 {
2034 if (enmAccessType == PGMACCESSTYPE_READ)
2035 return VINF_IOM_R3_MMIO_READ;
2036 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2037 return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
2038 }
2039#endif
2040 AssertRC(rc);
2041 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
2042
2043 /*
2044 * Perform locking.
2045 */
2046 iomMmioRetainRange(pRange);
2047 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2048 IOM_UNLOCK_SHARED(pVM);
2049 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
2050 if (rcStrict == VINF_SUCCESS)
2051 {
2052 /*
2053 * Perform the access.
2054 */
2055 if (enmAccessType == PGMACCESSTYPE_READ)
2056 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2057 else
2058 {
2059 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2060#ifndef IN_RING3
2061 if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
2062 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
2063#endif
2064 }
2065
2066 /* Check the return code. */
2067#ifdef IN_RING3
2068 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
2069#else
2070 AssertMsg( rcStrict == VINF_SUCCESS
2071 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
2072 || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
2073 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
2074 || rcStrict == VINF_EM_DBG_STOP
2075 || rcStrict == VINF_EM_DBG_EVENT
2076 || rcStrict == VINF_EM_DBG_BREAKPOINT
2077 || rcStrict == VINF_EM_OFF
2078 || rcStrict == VINF_EM_SUSPEND
2079 || rcStrict == VINF_EM_RESET
2080 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
2081 //|| rcStrict == VINF_EM_HALT /* ?? */
2082 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2083 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2084#endif
2085
2086 iomMmioReleaseRange(pVM, pRange);
2087 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2088 }
2089#ifdef IN_RING3
2090 else
2091 iomMmioReleaseRange(pVM, pRange);
2092#else
2093 else
2094 {
2095 if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
2096 {
2097 if (enmAccessType == PGMACCESSTYPE_READ)
2098 rcStrict = VINF_IOM_R3_MMIO_READ;
2099 else
2100 {
2101 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2102 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
2103 }
2104 }
2105 iomMmioReleaseRange(pVM, pRange);
2106 }
2107#endif
2108 return rcStrict;
2109}
2110
2111
2112#ifdef IN_RING3 /* Only used by REM. */
2113
2114/**
2115 * Reads a MMIO register.
2116 *
2117 * @returns VBox status code.
2118 *
2119 * @param pVM The cross context VM structure.
2120 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2121 * @param GCPhys The physical address to read.
2122 * @param pu32Value Where to store the value read.
2123 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2124 */
2125VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2126{
2127 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2128 /* Take the IOM lock before performing any MMIO. */
2129 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2130#ifndef IN_RING3
2131 if (rc == VERR_SEM_BUSY)
2132 return VINF_IOM_R3_MMIO_WRITE;
2133#endif
2134 AssertRC(VBOXSTRICTRC_VAL(rc));
2135#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2136 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2137#endif
2138
2139 /*
2140 * Lookup the current context range node and statistics.
2141 */
2142 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2143 if (!pRange)
2144 {
2145 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2146 IOM_UNLOCK_SHARED(pVM);
2147 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2148 }
2149 iomMmioRetainRange(pRange);
2150#ifndef VBOX_WITH_STATISTICS
2151 IOM_UNLOCK_SHARED(pVM);
2152
2153#else /* VBOX_WITH_STATISTICS */
2154 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2155 if (!pStats)
2156 {
2157 iomMmioReleaseRange(pVM, pRange);
2158# ifdef IN_RING3
2159 return VERR_NO_MEMORY;
2160# else
2161 return VINF_IOM_R3_MMIO_READ;
2162# endif
2163 }
2164 STAM_COUNTER_INC(&pStats->Accesses);
2165#endif /* VBOX_WITH_STATISTICS */
2166
2167 if (pRange->CTX_SUFF(pfnReadCallback))
2168 {
2169 /*
2170 * Perform locking.
2171 */
2172 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2173 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2174 if (rc != VINF_SUCCESS)
2175 {
2176 iomMmioReleaseRange(pVM, pRange);
2177 return rc;
2178 }
2179
2180 /*
2181 * Perform the read and deal with the result.
2182 */
2183 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2184 if ( (cbValue == 4 && !(GCPhys & 3))
2185 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2186 || (cbValue == 8 && !(GCPhys & 7)) )
2187 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2188 pu32Value, (unsigned)cbValue);
2189 else
2190 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2191 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2192 switch (VBOXSTRICTRC_VAL(rc))
2193 {
2194 case VINF_SUCCESS:
2195 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2196 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2197 iomMmioReleaseRange(pVM, pRange);
2198 return rc;
2199#ifndef IN_RING3
2200 case VINF_IOM_R3_MMIO_READ:
2201 case VINF_IOM_R3_MMIO_READ_WRITE:
2202 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2203#endif
2204 default:
2205 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2206 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2207 iomMmioReleaseRange(pVM, pRange);
2208 return rc;
2209
2210 case VINF_IOM_MMIO_UNUSED_00:
2211 iomMMIODoRead00s(pu32Value, cbValue);
2212 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2213 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2214 iomMmioReleaseRange(pVM, pRange);
2215 return VINF_SUCCESS;
2216
2217 case VINF_IOM_MMIO_UNUSED_FF:
2218 iomMMIODoReadFFs(pu32Value, cbValue);
2219 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2220 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2221 iomMmioReleaseRange(pVM, pRange);
2222 return VINF_SUCCESS;
2223 }
2224 /* not reached */
2225 }
2226#ifndef IN_RING3
2227 if (pRange->pfnReadCallbackR3)
2228 {
2229 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2230 iomMmioReleaseRange(pVM, pRange);
2231 return VINF_IOM_R3_MMIO_READ;
2232 }
2233#endif
2234
2235 /*
2236 * Unassigned memory - this is actually not supposed t happen...
2237 */
2238 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2239 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2240 iomMMIODoReadFFs(pu32Value, cbValue);
2241 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2242 iomMmioReleaseRange(pVM, pRange);
2243 return VINF_SUCCESS;
2244}
2245
2246
2247/**
2248 * Writes to a MMIO register.
2249 *
2250 * @returns VBox status code.
2251 *
2252 * @param pVM The cross context VM structure.
2253 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2254 * @param GCPhys The physical address to write to.
2255 * @param u32Value The value to write.
2256 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2257 */
2258VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2259{
2260 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2261 /* Take the IOM lock before performing any MMIO. */
2262 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2263#ifndef IN_RING3
2264 if (rc == VERR_SEM_BUSY)
2265 return VINF_IOM_R3_MMIO_WRITE;
2266#endif
2267 AssertRC(VBOXSTRICTRC_VAL(rc));
2268#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2269 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2270#endif
2271
2272 /*
2273 * Lookup the current context range node.
2274 */
2275 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2276 if (!pRange)
2277 {
2278 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2279 IOM_UNLOCK_SHARED(pVM);
2280 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2281 }
2282 iomMmioRetainRange(pRange);
2283#ifndef VBOX_WITH_STATISTICS
2284 IOM_UNLOCK_SHARED(pVM);
2285
2286#else /* VBOX_WITH_STATISTICS */
2287 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2288 if (!pStats)
2289 {
2290 iomMmioReleaseRange(pVM, pRange);
2291# ifdef IN_RING3
2292 return VERR_NO_MEMORY;
2293# else
2294 return VINF_IOM_R3_MMIO_WRITE;
2295# endif
2296 }
2297 STAM_COUNTER_INC(&pStats->Accesses);
2298#endif /* VBOX_WITH_STATISTICS */
2299
2300 if (pRange->CTX_SUFF(pfnWriteCallback))
2301 {
2302 /*
2303 * Perform locking.
2304 */
2305 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2306 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2307 if (rc != VINF_SUCCESS)
2308 {
2309 iomMmioReleaseRange(pVM, pRange);
2310 return rc;
2311 }
2312
2313 /*
2314 * Perform the write.
2315 */
2316 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2317 if ( (cbValue == 4 && !(GCPhys & 3))
2318 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2319 || (cbValue == 8 && !(GCPhys & 7)) )
2320 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2321 GCPhys, &u32Value, (unsigned)cbValue);
2322 else
2323 rc = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2324 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2325#ifndef IN_RING3
2326 if ( rc == VINF_IOM_R3_MMIO_WRITE
2327 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2328 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2329#endif
2330 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2331 iomMmioReleaseRange(pVM, pRange);
2332 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2333 return rc;
2334 }
2335#ifndef IN_RING3
2336 if (pRange->pfnWriteCallbackR3)
2337 {
2338 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2339 iomMmioReleaseRange(pVM, pRange);
2340 return VINF_IOM_R3_MMIO_WRITE;
2341 }
2342#endif
2343
2344 /*
2345 * No write handler, nothing to do.
2346 */
2347 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2348 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2349 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2350 iomMmioReleaseRange(pVM, pRange);
2351 return VINF_SUCCESS;
2352}
2353
2354#endif /* IN_RING3 - only used by REM. */
2355#ifndef IEM_USE_IEM_INSTEAD
2356
2357/**
2358 * [REP*] INSB/INSW/INSD
2359 * ES:EDI,DX[,ECX]
2360 *
2361 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2362 *
2363 * @returns Strict VBox status code. Informational status codes other than the one documented
2364 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2365 * @retval VINF_SUCCESS Success.
2366 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2367 * status code must be passed on to EM.
2368 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2369 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2370 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2371 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2372 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2373 *
2374 * @param pVM The cross context VM structure.
2375 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2376 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2377 * @param uPort IO Port
2378 * @param uPrefix IO instruction prefix
2379 * @param enmAddrMode The address mode.
2380 * @param cbTransfer Size of transfer unit
2381 */
2382VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2383 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2384{
2385 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2386 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2387
2388 /*
2389 * We do not support REPNE or decrementing destination
2390 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2391 */
2392 if ( (uPrefix & DISPREFIX_REPNE)
2393 || pRegFrame->eflags.Bits.u1DF)
2394 return VINF_EM_RAW_EMULATE_INSTR;
2395
2396 /*
2397 * Get bytes/words/dwords count to transfer.
2398 */
2399 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2400 RTGCUINTREG cTransfers = 1;
2401 if (uPrefix & DISPREFIX_REP)
2402 {
2403#ifndef IN_RC
2404 if ( CPUMIsGuestIn64BitCode(pVCpu)
2405 && pRegFrame->rcx >= _4G)
2406 return VINF_EM_RAW_EMULATE_INSTR;
2407#endif
2408 cTransfers = pRegFrame->rcx & fAddrMask;
2409 if (!cTransfers)
2410 return VINF_SUCCESS;
2411 }
2412
2413 /* Convert destination address es:edi. */
2414 RTGCPTR GCPtrDst;
2415 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2416 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2417 &GCPtrDst);
2418 if (RT_FAILURE(rc2))
2419 {
2420 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2421 return VINF_EM_RAW_EMULATE_INSTR;
2422 }
2423
2424 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2425 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2426 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2427 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2428 if (rc2 != VINF_SUCCESS)
2429 {
2430 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2431 return VINF_EM_RAW_EMULATE_INSTR;
2432 }
2433
2434 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2435 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2436 if (cTransfers > 1)
2437 {
2438 /*
2439 * Work the string page by page, letting the device handle as much
2440 * as it likes via the string I/O interface.
2441 */
2442 for (;;)
2443 {
2444 PGMPAGEMAPLOCK Lock;
2445 void *pvDst;
2446 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2447 if (RT_SUCCESS(rc2))
2448 {
2449 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2450 if (cMaxThisTime > cTransfers)
2451 cMaxThisTime = cTransfers;
2452 if (!cMaxThisTime)
2453 break;
2454 uint32_t cThisTime = cMaxThisTime;
2455
2456 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2457 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2458 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2459
2460 uint32_t const cActual = cMaxThisTime - cThisTime;
2461 if (cActual)
2462 { /* Must dirty the page. */
2463 uint8_t b = *(uint8_t *)pvDst;
2464 iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &b, 1);
2465 }
2466
2467 PGMPhysReleasePageMappingLock(pVM, &Lock);
2468
2469 uint32_t const cbActual = cActual * cbTransfer;
2470 cTransfers -= cActual;
2471 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2472 | (pRegFrame->rdi & ~fAddrMask);
2473 GCPtrDst += cbActual;
2474
2475 if ( cThisTime
2476 || !cTransfers
2477 || rcStrict != VINF_SUCCESS
2478 || (GCPtrDst & PAGE_OFFSET_MASK))
2479 break;
2480 }
2481 else
2482 {
2483 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2484 break;
2485 }
2486 }
2487 }
2488
2489 /*
2490 * Single transfer / unmapped memory fallback.
2491 */
2492#ifdef IN_RC
2493 MMGCRamRegisterTrapHandler(pVM);
2494#endif
2495 while (cTransfers && rcStrict == VINF_SUCCESS)
2496 {
2497 uint32_t u32Value;
2498 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2499 if (!IOM_SUCCESS(rcStrict))
2500 break;
2501 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2502 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2503 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2504 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2505 | (pRegFrame->rdi & ~fAddrMask);
2506 cTransfers--;
2507 }
2508#ifdef IN_RC
2509 MMGCRamDeregisterTrapHandler(pVM);
2510#endif
2511
2512 /* Update rcx on exit. */
2513 if (uPrefix & DISPREFIX_REP)
2514 pRegFrame->rcx = (cTransfers & fAddrMask)
2515 | (pRegFrame->rcx & ~fAddrMask);
2516
2517 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2518 return rcStrict;
2519}
2520
2521
2522/**
2523 * [REP*] OUTSB/OUTSW/OUTSD
2524 * DS:ESI,DX[,ECX]
2525 *
2526 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2527 *
2528 * @returns Strict VBox status code. Informational status codes other than the one documented
2529 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2530 * @retval VINF_SUCCESS Success.
2531 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2532 * status code must be passed on to EM.
2533 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2534 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2535 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2536 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2537 *
2538 * @param pVM The cross context VM structure.
2539 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2540 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2541 * @param uPort IO Port
2542 * @param uPrefix IO instruction prefix
2543 * @param enmAddrMode The address mode.
2544 * @param cbTransfer Size of transfer unit
2545 *
2546 * @remarks This API will probably be relaced by IEM before long, so no use in
2547 * optimizing+fixing stuff too much here.
2548 */
2549VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2550 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2551{
2552 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2553 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2554
2555 /*
2556 * We do not support segment prefixes, REPNE or
2557 * decrementing source pointer.
2558 */
2559 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2560 || pRegFrame->eflags.Bits.u1DF)
2561 return VINF_EM_RAW_EMULATE_INSTR;
2562
2563 /*
2564 * Get bytes/words/dwords count to transfer.
2565 */
2566 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2567 RTGCUINTREG cTransfers = 1;
2568 if (uPrefix & DISPREFIX_REP)
2569 {
2570#ifndef IN_RC
2571 if ( CPUMIsGuestIn64BitCode(pVCpu)
2572 && pRegFrame->rcx >= _4G)
2573 return VINF_EM_RAW_EMULATE_INSTR;
2574#endif
2575 cTransfers = pRegFrame->rcx & fAddrMask;
2576 if (!cTransfers)
2577 return VINF_SUCCESS;
2578 }
2579
2580 /* Convert source address ds:esi. */
2581 RTGCPTR GCPtrSrc;
2582 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2583 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2584 &GCPtrSrc);
2585 if (RT_FAILURE(rc2))
2586 {
2587 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2588 return VINF_EM_RAW_EMULATE_INSTR;
2589 }
2590
2591 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2592 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2593 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2594 (cpl == 3) ? X86_PTE_US : 0);
2595 if (rc2 != VINF_SUCCESS)
2596 {
2597 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2598 return VINF_EM_RAW_EMULATE_INSTR;
2599 }
2600
2601 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2602 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2603 if (cTransfers > 1)
2604 {
2605 /*
2606 * Work the string page by page, letting the device handle as much
2607 * as it likes via the string I/O interface.
2608 */
2609 for (;;)
2610 {
2611 PGMPAGEMAPLOCK Lock;
2612 void const *pvSrc;
2613 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2614 if (RT_SUCCESS(rc2))
2615 {
2616 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2617 if (cMaxThisTime > cTransfers)
2618 cMaxThisTime = cTransfers;
2619 if (!cMaxThisTime)
2620 break;
2621 uint32_t cThisTime = cMaxThisTime;
2622
2623 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2624 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2625 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2626
2627 PGMPhysReleasePageMappingLock(pVM, &Lock);
2628
2629 uint32_t const cActual = cMaxThisTime - cThisTime;
2630 uint32_t const cbActual = cActual * cbTransfer;
2631 cTransfers -= cActual;
2632 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2633 | (pRegFrame->rsi & ~fAddrMask);
2634 GCPtrSrc += cbActual;
2635
2636 if ( cThisTime
2637 || !cTransfers
2638 || rcStrict != VINF_SUCCESS
2639 || (GCPtrSrc & PAGE_OFFSET_MASK))
2640 break;
2641 }
2642 else
2643 {
2644 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2645 break;
2646 }
2647 }
2648 }
2649
2650 /*
2651 * Single transfer / unmapped memory fallback.
2652 */
2653#ifdef IN_RC
2654 MMGCRamRegisterTrapHandler(pVM);
2655#endif
2656
2657 while (cTransfers && rcStrict == VINF_SUCCESS)
2658 {
2659 uint32_t u32Value = 0;
2660 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2661 if (rcStrict != VINF_SUCCESS)
2662 break;
2663 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2664 if (!IOM_SUCCESS(rcStrict))
2665 break;
2666 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2667 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2668 | (pRegFrame->rsi & ~fAddrMask);
2669 cTransfers--;
2670 }
2671
2672#ifdef IN_RC
2673 MMGCRamDeregisterTrapHandler(pVM);
2674#endif
2675
2676 /* Update rcx on exit. */
2677 if (uPrefix & DISPREFIX_REP)
2678 pRegFrame->rcx = (cTransfers & fAddrMask)
2679 | (pRegFrame->rcx & ~fAddrMask);
2680
2681 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2682 return rcStrict;
2683}
2684
2685#endif /* !IEM_USE_IEM_INSTEAD */
2686
2687
2688#ifndef IN_RC
2689
2690/**
2691 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2692 *
2693 * (This is a special optimization used by the VGA device.)
2694 *
2695 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2696 * remapping is made,.
2697 *
2698 * @param pVM The cross context VM structure.
2699 * @param GCPhys The address of the MMIO page to be changed.
2700 * @param GCPhysRemapped The address of the MMIO2 page.
2701 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2702 * for the time being.
2703 */
2704VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2705{
2706# ifndef IEM_VERIFICATION_MODE_FULL
2707 /* Currently only called from the VGA device during MMIO. */
2708 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2709 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2710 PVMCPU pVCpu = VMMGetCpu(pVM);
2711
2712 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2713 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2714 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2715 && !HMIsNestedPagingActive(pVM)))
2716 return VINF_SUCCESS; /* ignore */
2717
2718 int rc = IOM_LOCK_SHARED(pVM);
2719 if (RT_FAILURE(rc))
2720 return VINF_SUCCESS; /* better luck the next time around */
2721
2722 /*
2723 * Lookup the context range node the page belongs to.
2724 */
2725 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2726 AssertMsgReturn(pRange,
2727 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2728
2729 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2730 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2731
2732 /*
2733 * Do the aliasing; page align the addresses since PGM is picky.
2734 */
2735 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2736 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2737
2738 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2739
2740 IOM_UNLOCK_SHARED(pVM);
2741 AssertRCReturn(rc, rc);
2742
2743 /*
2744 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2745 * can simply prefetch it.
2746 *
2747 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2748 */
2749# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2750# ifdef VBOX_STRICT
2751 uint64_t fFlags;
2752 RTHCPHYS HCPhys;
2753 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2754 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2755# endif
2756# endif
2757 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2758 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2759# endif /* !IEM_VERIFICATION_MODE_FULL */
2760 return VINF_SUCCESS;
2761}
2762
2763
2764# ifndef IEM_VERIFICATION_MODE_FULL
2765/**
2766 * Mapping a HC page in place of an MMIO page for direct access.
2767 *
2768 * (This is a special optimization used by the APIC in the VT-x case.)
2769 *
2770 * @returns VBox status code.
2771 *
2772 * @param pVM The cross context VM structure.
2773 * @param pVCpu The cross context virtual CPU structure.
2774 * @param GCPhys The address of the MMIO page to be changed.
2775 * @param HCPhys The address of the host physical page.
2776 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2777 * for the time being.
2778 */
2779VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2780{
2781 /* Currently only called from VT-x code during a page fault. */
2782 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2783
2784 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2785 Assert(HMIsEnabled(pVM));
2786
2787 /*
2788 * Lookup the context range node the page belongs to.
2789 */
2790# ifdef VBOX_STRICT
2791 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2792 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2793 AssertMsgReturn(pRange,
2794 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2795 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2796 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2797# endif
2798
2799 /*
2800 * Do the aliasing; page align the addresses since PGM is picky.
2801 */
2802 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2803 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2804
2805 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2806 AssertRCReturn(rc, rc);
2807
2808 /*
2809 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2810 * can simply prefetch it.
2811 *
2812 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2813 */
2814 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2815 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2816 return VINF_SUCCESS;
2817}
2818# endif /* !IEM_VERIFICATION_MODE_FULL */
2819
2820
2821/**
2822 * Reset a previously modified MMIO region; restore the access flags.
2823 *
2824 * @returns VBox status code.
2825 *
2826 * @param pVM The cross context VM structure.
2827 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2828 */
2829VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2830{
2831 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2832
2833 PVMCPU pVCpu = VMMGetCpu(pVM);
2834
2835 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2836 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2837 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2838 && !HMIsNestedPagingActive(pVM)))
2839 return VINF_SUCCESS; /* ignore */
2840
2841 /*
2842 * Lookup the context range node the page belongs to.
2843 */
2844# ifdef VBOX_STRICT
2845 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2846 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2847 AssertMsgReturn(pRange,
2848 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2849 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2850 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2851# endif
2852
2853 /*
2854 * Call PGM to do the job work.
2855 *
2856 * After the call, all the pages should be non-present... unless there is
2857 * a page pool flush pending (unlikely).
2858 */
2859 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2860 AssertRC(rc);
2861
2862# ifdef VBOX_STRICT
2863 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2864 {
2865 uint32_t cb = pRange->cb;
2866 GCPhys = pRange->GCPhys;
2867 while (cb)
2868 {
2869 uint64_t fFlags;
2870 RTHCPHYS HCPhys;
2871 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2872 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2873 cb -= PAGE_SIZE;
2874 GCPhys += PAGE_SIZE;
2875 }
2876 }
2877# endif
2878 return rc;
2879}
2880
2881#endif /* !IN_RC */
2882
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette