VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 56873

Last change on this file since 56873 was 56660, checked in by vboxsync, 10 years ago

iomMmioCommonPfHandler: Deal with IEM failure.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 97.7 KB
Line 
1/* $Id: IOMAllMMIO.cpp 56660 2015-06-26 14:21:23Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** @def IEM_USE_IEM_INSTEAD
52 * Use IEM instead of IOM for interpreting MMIO accesses.
53 * Because of PATM/CSAM issues in raw-mode, we've split this up into 2nd and 3rd
54 * IEM deployment step. */
55#if ((defined(IN_RING3) || defined(IN_RING0)) && defined(VBOX_WITH_2ND_IEM_STEP)) \
56 || defined(VBOX_WITH_3RD_IEM_STEP)
57# define IEM_USE_IEM_INSTEAD
58#endif
59
60
61/*******************************************************************************
62* Global Variables *
63*******************************************************************************/
64
65/**
66 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
67 */
68static const unsigned g_aSize2Shift[] =
69{
70 ~0U, /* 0 - invalid */
71 0, /* *1 == 2^0 */
72 1, /* *2 == 2^1 */
73 ~0U, /* 3 - invalid */
74 2, /* *4 == 2^2 */
75 ~0U, /* 5 - invalid */
76 ~0U, /* 6 - invalid */
77 ~0U, /* 7 - invalid */
78 3 /* *8 == 2^3 */
79};
80
81/**
82 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
83 */
84#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
85
86
87/**
88 * Returns the contents of register or immediate data of instruction's parameter.
89 *
90 * @returns true on success.
91 *
92 * @todo Get rid of this code. Use DISQueryParamVal instead
93 *
94 * @param pCpu Pointer to current disassembler context.
95 * @param pParam Pointer to parameter of instruction to process.
96 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
97 * @param pu64Data Where to store retrieved data.
98 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
99 */
100bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
101{
102 NOREF(pCpu);
103 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
104 {
105 *pcbSize = 0;
106 *pu64Data = 0;
107 return false;
108 }
109
110 /* divide and conquer */
111 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
112 {
113 if (pParam->fUse & DISUSE_REG_GEN32)
114 {
115 *pcbSize = 4;
116 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
117 return true;
118 }
119
120 if (pParam->fUse & DISUSE_REG_GEN16)
121 {
122 *pcbSize = 2;
123 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
124 return true;
125 }
126
127 if (pParam->fUse & DISUSE_REG_GEN8)
128 {
129 *pcbSize = 1;
130 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
131 return true;
132 }
133
134 Assert(pParam->fUse & DISUSE_REG_GEN64);
135 *pcbSize = 8;
136 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
137 return true;
138 }
139 else
140 {
141 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
142 {
143 *pcbSize = 8;
144 *pu64Data = pParam->uValue;
145 return true;
146 }
147
148 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
149 {
150 *pcbSize = 4;
151 *pu64Data = (uint32_t)pParam->uValue;
152 return true;
153 }
154
155 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
156 {
157 *pcbSize = 2;
158 *pu64Data = (uint16_t)pParam->uValue;
159 return true;
160 }
161
162 if (pParam->fUse & DISUSE_IMMEDIATE8)
163 {
164 *pcbSize = 1;
165 *pu64Data = (uint8_t)pParam->uValue;
166 return true;
167 }
168
169 if (pParam->fUse & DISUSE_REG_SEG)
170 {
171 *pcbSize = 2;
172 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
173 return true;
174 } /* Else - error. */
175
176 AssertFailed();
177 *pcbSize = 0;
178 *pu64Data = 0;
179 return false;
180 }
181}
182
183
184/**
185 * Saves data to 8/16/32 general purpose or segment register defined by
186 * instruction's parameter.
187 *
188 * @returns true on success.
189 * @param pCpu Pointer to current disassembler context.
190 * @param pParam Pointer to parameter of instruction to process.
191 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
192 * @param u64Data 8/16/32/64 bit data to store.
193 */
194bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
195{
196 NOREF(pCpu);
197 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
198 {
199 return false;
200 }
201
202 if (pParam->fUse & DISUSE_REG_GEN32)
203 {
204 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
205 return true;
206 }
207
208 if (pParam->fUse & DISUSE_REG_GEN64)
209 {
210 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
211 return true;
212 }
213
214 if (pParam->fUse & DISUSE_REG_GEN16)
215 {
216 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
217 return true;
218 }
219
220 if (pParam->fUse & DISUSE_REG_GEN8)
221 {
222 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
223 return true;
224 }
225
226 if (pParam->fUse & DISUSE_REG_SEG)
227 {
228 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
229 return true;
230 }
231
232 /* Else - error. */
233 return false;
234}
235
236
237/**
238 * Deals with complicated MMIO writes.
239 *
240 * Complicated means unaligned or non-dword/qword sized accesses depending on
241 * the MMIO region's access mode flags.
242 *
243 * @returns Strict VBox status code. Any EM scheduling status code,
244 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
245 * VINF_IOM_R3_MMIO_READ may be returned.
246 *
247 * @param pVM Pointer to the VM.
248 * @param pRange The range to write to.
249 * @param GCPhys The physical address to start writing.
250 * @param pvValue Where to store the value.
251 * @param cbValue The size of the value to write.
252 */
253static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
254{
255 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
256 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
257 VERR_IOM_MMIO_IPE_1);
258 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
259 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
260 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
261 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
262
263 /*
264 * Do debug stop if requested.
265 */
266 int rc = VINF_SUCCESS; NOREF(pVM);
267#ifdef VBOX_STRICT
268 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
269 {
270# ifdef IN_RING3
271 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
272 R3STRING(pRange->pszDesc)));
273 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
274 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
275 if (rc == VERR_DBGF_NOT_ATTACHED)
276 rc = VINF_SUCCESS;
277# else
278 return VINF_IOM_R3_MMIO_WRITE;
279# endif
280 }
281#endif
282
283 /*
284 * Check if we should ignore the write.
285 */
286 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
287 {
288 Assert(cbValue != 4 || (GCPhys & 3));
289 return VINF_SUCCESS;
290 }
291 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
292 {
293 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
294 return VINF_SUCCESS;
295 }
296
297 /*
298 * Split and conquer.
299 */
300 for (;;)
301 {
302 unsigned const offAccess = GCPhys & 3;
303 unsigned cbThisPart = 4 - offAccess;
304 if (cbThisPart > cbValue)
305 cbThisPart = cbValue;
306
307 /*
308 * Get the missing bits (if any).
309 */
310 uint32_t u32MissingValue = 0;
311 if (fReadMissing && cbThisPart != 4)
312 {
313 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
314 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
315 switch (rc2)
316 {
317 case VINF_SUCCESS:
318 break;
319 case VINF_IOM_MMIO_UNUSED_FF:
320 u32MissingValue = UINT32_C(0xffffffff);
321 break;
322 case VINF_IOM_MMIO_UNUSED_00:
323 u32MissingValue = 0;
324 break;
325 case VINF_IOM_R3_MMIO_READ:
326 case VINF_IOM_R3_MMIO_READ_WRITE:
327 case VINF_IOM_R3_MMIO_WRITE:
328 /** @todo What if we've split a transfer and already read
329 * something? Since writes generally have sideeffects we
330 * could be kind of screwed here...
331 *
332 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
333 * to REM for MMIO accesses (like may currently do). */
334
335 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
336 return rc2;
337 default:
338 if (RT_FAILURE(rc2))
339 {
340 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
341 return rc2;
342 }
343 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
344 if (rc == VINF_SUCCESS || rc2 < rc)
345 rc = rc2;
346 break;
347 }
348 }
349
350 /*
351 * Merge missing and given bits.
352 */
353 uint32_t u32GivenMask;
354 uint32_t u32GivenValue;
355 switch (cbThisPart)
356 {
357 case 1:
358 u32GivenValue = *(uint8_t const *)pvValue;
359 u32GivenMask = UINT32_C(0x000000ff);
360 break;
361 case 2:
362 u32GivenValue = *(uint16_t const *)pvValue;
363 u32GivenMask = UINT32_C(0x0000ffff);
364 break;
365 case 3:
366 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
367 ((uint8_t const *)pvValue)[2], 0);
368 u32GivenMask = UINT32_C(0x00ffffff);
369 break;
370 case 4:
371 u32GivenValue = *(uint32_t const *)pvValue;
372 u32GivenMask = UINT32_C(0xffffffff);
373 break;
374 default:
375 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
376 }
377 if (offAccess)
378 {
379 u32GivenValue <<= offAccess * 8;
380 u32GivenMask <<= offAccess * 8;
381 }
382
383 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
384 | (u32GivenValue & u32GivenMask);
385
386 /*
387 * Do DWORD write to the device.
388 */
389 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
390 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
391 switch (rc2)
392 {
393 case VINF_SUCCESS:
394 break;
395 case VINF_IOM_R3_MMIO_READ:
396 case VINF_IOM_R3_MMIO_READ_WRITE:
397 case VINF_IOM_R3_MMIO_WRITE:
398 /** @todo What if we've split a transfer and already read
399 * something? Since reads can have sideeffects we could be
400 * kind of screwed here...
401 *
402 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
403 * to REM for MMIO accesses (like may currently do). */
404 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
405 return rc2;
406 default:
407 if (RT_FAILURE(rc2))
408 {
409 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
410 return rc2;
411 }
412 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
413 if (rc == VINF_SUCCESS || rc2 < rc)
414 rc = rc2;
415 break;
416 }
417
418 /*
419 * Advance.
420 */
421 cbValue -= cbThisPart;
422 if (!cbValue)
423 break;
424 GCPhys += cbThisPart;
425 pvValue = (uint8_t const *)pvValue + cbThisPart;
426 }
427
428 return rc;
429}
430
431
432
433
434/**
435 * Wrapper which does the write and updates range statistics when such are enabled.
436 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
437 */
438static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
439 const void *pvData, unsigned cb)
440{
441#ifdef VBOX_WITH_STATISTICS
442 int rcSem = IOM_LOCK_SHARED(pVM);
443 if (rcSem == VERR_SEM_BUSY)
444 return VINF_IOM_R3_MMIO_WRITE;
445 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
446 if (!pStats)
447# ifdef IN_RING3
448 return VERR_NO_MEMORY;
449# else
450 return VINF_IOM_R3_MMIO_WRITE;
451# endif
452 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
453#endif
454
455 VBOXSTRICTRC rcStrict;
456 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
457 {
458 if ( (cb == 4 && !(GCPhysFault & 3))
459 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
460 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
461 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
462 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
463 else
464 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
465 }
466 else
467 rcStrict = VINF_SUCCESS;
468
469 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
470 STAM_COUNTER_INC(&pStats->Accesses);
471 return rcStrict;
472}
473
474
475/**
476 * Deals with complicated MMIO reads.
477 *
478 * Complicated means unaligned or non-dword/qword sized accesses depending on
479 * the MMIO region's access mode flags.
480 *
481 * @returns Strict VBox status code. Any EM scheduling status code,
482 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
483 * VINF_IOM_R3_MMIO_WRITE may be returned.
484 *
485 * @param pVM Pointer to the VM.
486 * @param pRange The range to read from.
487 * @param GCPhys The physical address to start reading.
488 * @param pvValue Where to store the value.
489 * @param cbValue The size of the value to read.
490 */
491static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
492{
493 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
494 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
495 VERR_IOM_MMIO_IPE_1);
496 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
497 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
498
499 /*
500 * Do debug stop if requested.
501 */
502 int rc = VINF_SUCCESS; NOREF(pVM);
503#ifdef VBOX_STRICT
504 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
505 {
506# ifdef IN_RING3
507 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
508 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
509 if (rc == VERR_DBGF_NOT_ATTACHED)
510 rc = VINF_SUCCESS;
511# else
512 return VINF_IOM_R3_MMIO_READ;
513# endif
514 }
515#endif
516
517 /*
518 * Split and conquer.
519 */
520 for (;;)
521 {
522 /*
523 * Do DWORD read from the device.
524 */
525 uint32_t u32Value;
526 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
527 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
528 switch (rc2)
529 {
530 case VINF_SUCCESS:
531 break;
532 case VINF_IOM_MMIO_UNUSED_FF:
533 u32Value = UINT32_C(0xffffffff);
534 break;
535 case VINF_IOM_MMIO_UNUSED_00:
536 u32Value = 0;
537 break;
538 case VINF_IOM_R3_MMIO_READ:
539 case VINF_IOM_R3_MMIO_READ_WRITE:
540 case VINF_IOM_R3_MMIO_WRITE:
541 /** @todo What if we've split a transfer and already read
542 * something? Since reads can have sideeffects we could be
543 * kind of screwed here... */
544 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
545 return rc2;
546 default:
547 if (RT_FAILURE(rc2))
548 {
549 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
550 return rc2;
551 }
552 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
553 if (rc == VINF_SUCCESS || rc2 < rc)
554 rc = rc2;
555 break;
556 }
557 u32Value >>= (GCPhys & 3) * 8;
558
559 /*
560 * Write what we've read.
561 */
562 unsigned cbThisPart = 4 - (GCPhys & 3);
563 if (cbThisPart > cbValue)
564 cbThisPart = cbValue;
565
566 switch (cbThisPart)
567 {
568 case 1:
569 *(uint8_t *)pvValue = (uint8_t)u32Value;
570 break;
571 case 2:
572 *(uint16_t *)pvValue = (uint16_t)u32Value;
573 break;
574 case 3:
575 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
576 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
577 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
578 break;
579 case 4:
580 *(uint32_t *)pvValue = u32Value;
581 break;
582 }
583
584 /*
585 * Advance.
586 */
587 cbValue -= cbThisPart;
588 if (!cbValue)
589 break;
590 GCPhys += cbThisPart;
591 pvValue = (uint8_t *)pvValue + cbThisPart;
592 }
593
594 return rc;
595}
596
597
598/**
599 * Implements VINF_IOM_MMIO_UNUSED_FF.
600 *
601 * @returns VINF_SUCCESS.
602 * @param pvValue Where to store the zeros.
603 * @param cbValue How many bytes to read.
604 */
605static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
606{
607 switch (cbValue)
608 {
609 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
610 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
611 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
612 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
613 default:
614 {
615 uint8_t *pb = (uint8_t *)pvValue;
616 while (cbValue--)
617 *pb++ = UINT8_C(0xff);
618 break;
619 }
620 }
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Implements VINF_IOM_MMIO_UNUSED_00.
627 *
628 * @returns VINF_SUCCESS.
629 * @param pvValue Where to store the zeros.
630 * @param cbValue How many bytes to read.
631 */
632static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
633{
634 switch (cbValue)
635 {
636 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
637 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
638 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
639 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
640 default:
641 {
642 uint8_t *pb = (uint8_t *)pvValue;
643 while (cbValue--)
644 *pb++ = UINT8_C(0x00);
645 break;
646 }
647 }
648 return VINF_SUCCESS;
649}
650
651
652/**
653 * Wrapper which does the read and updates range statistics when such are enabled.
654 */
655DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
656 void *pvValue, unsigned cbValue)
657{
658#ifdef VBOX_WITH_STATISTICS
659 int rcSem = IOM_LOCK_SHARED(pVM);
660 if (rcSem == VERR_SEM_BUSY)
661 return VINF_IOM_R3_MMIO_READ;
662 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
663 if (!pStats)
664# ifdef IN_RING3
665 return VERR_NO_MEMORY;
666# else
667 return VINF_IOM_R3_MMIO_READ;
668# endif
669 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
670#endif
671
672 VBOXSTRICTRC rcStrict;
673 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
674 {
675 if ( ( cbValue == 4
676 && !(GCPhys & 3))
677 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
678 || ( cbValue == 8
679 && !(GCPhys & 7)
680 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
681 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
682 pvValue, cbValue);
683 else
684 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
685 }
686 else
687 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
688 if (rcStrict != VINF_SUCCESS)
689 {
690 switch (VBOXSTRICTRC_VAL(rcStrict))
691 {
692 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
693 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
694 }
695 }
696
697 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
698 STAM_COUNTER_INC(&pStats->Accesses);
699 return rcStrict;
700}
701
702
703/**
704 * Internal - statistics only.
705 */
706DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
707{
708#ifdef VBOX_WITH_STATISTICS
709 switch (cb)
710 {
711 case 1:
712 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
713 break;
714 case 2:
715 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
716 break;
717 case 4:
718 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
719 break;
720 case 8:
721 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
722 break;
723 default:
724 /* No way. */
725 AssertMsgFailed(("Invalid data length %d\n", cb));
726 break;
727 }
728#else
729 NOREF(pVM); NOREF(cb);
730#endif
731}
732
733
734#ifndef IEM_USE_IEM_INSTEAD
735
736/**
737 * MOV reg, mem (read)
738 * MOVZX reg, mem (read)
739 * MOVSX reg, mem (read)
740 *
741 * @returns VBox status code.
742 *
743 * @param pVM The virtual machine.
744 * @param pVCpu Pointer to the virtual CPU structure of the caller.
745 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
746 * @param pCpu Disassembler CPU state.
747 * @param pRange Pointer MMIO range.
748 * @param GCPhysFault The GC physical address corresponding to pvFault.
749 */
750static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
751 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
752{
753 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
754
755 /*
756 * Get the data size from parameter 2,
757 * and call the handler function to get the data.
758 */
759 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
760 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
761
762 uint64_t u64Data = 0;
763 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
764 if (rc == VINF_SUCCESS)
765 {
766 /*
767 * Do sign extension for MOVSX.
768 */
769 /** @todo checkup MOVSX implementation! */
770 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
771 {
772 if (cb == 1)
773 {
774 /* DWORD <- BYTE */
775 int64_t iData = (int8_t)u64Data;
776 u64Data = (uint64_t)iData;
777 }
778 else
779 {
780 /* DWORD <- WORD */
781 int64_t iData = (int16_t)u64Data;
782 u64Data = (uint64_t)iData;
783 }
784 }
785
786 /*
787 * Store the result to register (parameter 1).
788 */
789 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
790 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
791 }
792
793 if (rc == VINF_SUCCESS)
794 iomMMIOStatLength(pVM, cb);
795 return rc;
796}
797
798
799/**
800 * MOV mem, reg|imm (write)
801 *
802 * @returns VBox status code.
803 *
804 * @param pVM The virtual machine.
805 * @param pVCpu Pointer to the virtual CPU structure of the caller.
806 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
807 * @param pCpu Disassembler CPU state.
808 * @param pRange Pointer MMIO range.
809 * @param GCPhysFault The GC physical address corresponding to pvFault.
810 */
811static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
812 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
813{
814 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
815
816 /*
817 * Get data to write from second parameter,
818 * and call the callback to write it.
819 */
820 unsigned cb = 0;
821 uint64_t u64Data = 0;
822 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
823 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
824
825 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
826 if (rc == VINF_SUCCESS)
827 iomMMIOStatLength(pVM, cb);
828 return rc;
829}
830
831
832/** Wrapper for reading virtual memory. */
833DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
834{
835 /* Note: This will fail in R0 or RC if it hits an access handler. That
836 isn't a problem though since the operation can be restarted in REM. */
837#ifdef IN_RC
838 NOREF(pVCpu);
839 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
840 /* Page may be protected and not directly accessible. */
841 if (rc == VERR_ACCESS_DENIED)
842 rc = VINF_IOM_R3_IOPORT_WRITE;
843 return rc;
844#else
845 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
846#endif
847}
848
849
850/** Wrapper for writing virtual memory. */
851DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
852{
853 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
854 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
855 * as well since we're not behind the pgm lock and handler may change between calls.
856 *
857 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
858 * the state of some shadowed structures. */
859#if defined(IN_RING0) || defined(IN_RC)
860 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
861#else
862 NOREF(pCtxCore);
863 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
864#endif
865}
866
867
868#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
869/**
870 * [REP] MOVSB
871 * [REP] MOVSW
872 * [REP] MOVSD
873 *
874 * Restricted implementation.
875 *
876 *
877 * @returns VBox status code.
878 *
879 * @param pVM The virtual machine.
880 * @param uErrorCode CPU Error code.
881 * @param pRegFrame Trap register frame.
882 * @param GCPhysFault The GC physical address corresponding to pvFault.
883 * @param pCpu Disassembler CPU state.
884 * @param pRange Pointer MMIO range.
885 * @param ppStat Which sub-sample to attribute this call to.
886 */
887static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
888 PSTAMPROFILE *ppStat)
889{
890 /*
891 * We do not support segment prefixes or REPNE.
892 */
893 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
894 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
895
896 PVMCPU pVCpu = VMMGetCpu(pVM);
897
898 /*
899 * Get bytes/words/dwords/qword count to copy.
900 */
901 uint32_t cTransfers = 1;
902 if (pCpu->fPrefix & DISPREFIX_REP)
903 {
904#ifndef IN_RC
905 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
906 && pRegFrame->rcx >= _4G)
907 return VINF_EM_RAW_EMULATE_INSTR;
908#endif
909
910 cTransfers = pRegFrame->ecx;
911 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
912 cTransfers &= 0xffff;
913
914 if (!cTransfers)
915 return VINF_SUCCESS;
916 }
917
918 /* Get the current privilege level. */
919 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
920
921 /*
922 * Get data size.
923 */
924 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
925 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
926 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
927
928#ifdef VBOX_WITH_STATISTICS
929 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
930 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
931#endif
932
933/** @todo re-evaluate on page boundaries. */
934
935 RTGCPHYS Phys = GCPhysFault;
936 int rc;
937 if (fWriteAccess)
938 {
939 /*
940 * Write operation: [Mem] -> [MMIO]
941 * ds:esi (Virt Src) -> es:edi (Phys Dst)
942 */
943 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
944
945 /* Check callback. */
946 if (!pRange->CTX_SUFF(pfnWriteCallback))
947 return VINF_IOM_R3_MMIO_WRITE;
948
949 /* Convert source address ds:esi. */
950 RTGCUINTPTR pu8Virt;
951 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
952 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
953 (PRTGCPTR)&pu8Virt);
954 if (RT_SUCCESS(rc))
955 {
956
957 /* Access verification first; we currently can't recover properly from traps inside this instruction */
958 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
959 if (rc != VINF_SUCCESS)
960 {
961 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
962 return VINF_EM_RAW_EMULATE_INSTR;
963 }
964
965#ifdef IN_RC
966 MMGCRamRegisterTrapHandler(pVM);
967#endif
968
969 /* copy loop. */
970 while (cTransfers)
971 {
972 uint32_t u32Data = 0;
973 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
974 if (rc != VINF_SUCCESS)
975 break;
976 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
977 if (rc != VINF_SUCCESS)
978 break;
979
980 pu8Virt += offIncrement;
981 Phys += offIncrement;
982 pRegFrame->rsi += offIncrement;
983 pRegFrame->rdi += offIncrement;
984 cTransfers--;
985 }
986#ifdef IN_RC
987 MMGCRamDeregisterTrapHandler(pVM);
988#endif
989 /* Update ecx. */
990 if (pCpu->fPrefix & DISPREFIX_REP)
991 pRegFrame->ecx = cTransfers;
992 }
993 else
994 rc = VINF_IOM_R3_MMIO_READ_WRITE;
995 }
996 else
997 {
998 /*
999 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
1000 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
1001 */
1002 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
1003
1004 /* Check callback. */
1005 if (!pRange->CTX_SUFF(pfnReadCallback))
1006 return VINF_IOM_R3_MMIO_READ;
1007
1008 /* Convert destination address. */
1009 RTGCUINTPTR pu8Virt;
1010 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1011 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1012 (RTGCPTR *)&pu8Virt);
1013 if (RT_FAILURE(rc))
1014 return VINF_IOM_R3_MMIO_READ;
1015
1016 /* Check if destination address is MMIO. */
1017 PIOMMMIORANGE pMMIODst;
1018 RTGCPHYS PhysDst;
1019 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1020 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1021 if ( RT_SUCCESS(rc)
1022 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1023 {
1024 /** @todo implement per-device locks for MMIO access. */
1025 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1026
1027 /*
1028 * Extra: [MMIO] -> [MMIO]
1029 */
1030 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1031 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1032 {
1033 iomMmioReleaseRange(pVM, pRange);
1034 return VINF_IOM_R3_MMIO_READ_WRITE;
1035 }
1036
1037 /* copy loop. */
1038 while (cTransfers)
1039 {
1040 uint32_t u32Data;
1041 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1042 if (rc != VINF_SUCCESS)
1043 break;
1044 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1045 if (rc != VINF_SUCCESS)
1046 break;
1047
1048 Phys += offIncrement;
1049 PhysDst += offIncrement;
1050 pRegFrame->rsi += offIncrement;
1051 pRegFrame->rdi += offIncrement;
1052 cTransfers--;
1053 }
1054 iomMmioReleaseRange(pVM, pRange);
1055 }
1056 else
1057 {
1058 /*
1059 * Normal: [MMIO] -> [Mem]
1060 */
1061 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1062 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1063 if (rc != VINF_SUCCESS)
1064 {
1065 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1066 return VINF_EM_RAW_EMULATE_INSTR;
1067 }
1068
1069 /* copy loop. */
1070#ifdef IN_RC
1071 MMGCRamRegisterTrapHandler(pVM);
1072#endif
1073 while (cTransfers)
1074 {
1075 uint32_t u32Data;
1076 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1077 if (rc != VINF_SUCCESS)
1078 break;
1079 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1080 if (rc != VINF_SUCCESS)
1081 {
1082 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1083 break;
1084 }
1085
1086 pu8Virt += offIncrement;
1087 Phys += offIncrement;
1088 pRegFrame->rsi += offIncrement;
1089 pRegFrame->rdi += offIncrement;
1090 cTransfers--;
1091 }
1092#ifdef IN_RC
1093 MMGCRamDeregisterTrapHandler(pVM);
1094#endif
1095 }
1096
1097 /* Update ecx on exit. */
1098 if (pCpu->fPrefix & DISPREFIX_REP)
1099 pRegFrame->ecx = cTransfers;
1100 }
1101
1102 /* work statistics. */
1103 if (rc == VINF_SUCCESS)
1104 iomMMIOStatLength(pVM, cb);
1105 NOREF(ppStat);
1106 return rc;
1107}
1108#endif /* IOM_WITH_MOVS_SUPPORT */
1109
1110
1111/**
1112 * Gets the address / opcode mask corresponding to the given CPU mode.
1113 *
1114 * @returns Mask.
1115 * @param enmCpuMode CPU mode.
1116 */
1117static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1118{
1119 switch (enmCpuMode)
1120 {
1121 case DISCPUMODE_16BIT: return UINT16_MAX;
1122 case DISCPUMODE_32BIT: return UINT32_MAX;
1123 case DISCPUMODE_64BIT: return UINT64_MAX;
1124 default:
1125 AssertFailedReturn(UINT32_MAX);
1126 }
1127}
1128
1129
1130/**
1131 * [REP] STOSB
1132 * [REP] STOSW
1133 * [REP] STOSD
1134 *
1135 * Restricted implementation.
1136 *
1137 *
1138 * @returns VBox status code.
1139 *
1140 * @param pVM The virtual machine.
1141 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1142 * @param pRegFrame Trap register frame.
1143 * @param GCPhysFault The GC physical address corresponding to pvFault.
1144 * @param pCpu Disassembler CPU state.
1145 * @param pRange Pointer MMIO range.
1146 */
1147static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1148 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1149{
1150 /*
1151 * We do not support segment prefixes or REPNE..
1152 */
1153 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1154 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1155
1156 /*
1157 * Get bytes/words/dwords/qwords count to copy.
1158 */
1159 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1160 RTGCUINTREG cTransfers = 1;
1161 if (pCpu->fPrefix & DISPREFIX_REP)
1162 {
1163#ifndef IN_RC
1164 if ( CPUMIsGuestIn64BitCode(pVCpu)
1165 && pRegFrame->rcx >= _4G)
1166 return VINF_EM_RAW_EMULATE_INSTR;
1167#endif
1168
1169 cTransfers = pRegFrame->rcx & fAddrMask;
1170 if (!cTransfers)
1171 return VINF_SUCCESS;
1172 }
1173
1174/** @todo r=bird: bounds checks! */
1175
1176 /*
1177 * Get data size.
1178 */
1179 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1180 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1181 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1182
1183#ifdef VBOX_WITH_STATISTICS
1184 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1185 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1186#endif
1187
1188
1189 RTGCPHYS Phys = GCPhysFault;
1190 int rc;
1191 if ( pRange->CTX_SUFF(pfnFillCallback)
1192 && cb <= 4 /* can only fill 32-bit values */)
1193 {
1194 /*
1195 * Use the fill callback.
1196 */
1197 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1198 if (offIncrement > 0)
1199 {
1200 /* addr++ variant. */
1201 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1202 pRegFrame->eax, cb, cTransfers);
1203 if (rc == VINF_SUCCESS)
1204 {
1205 /* Update registers. */
1206 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1207 | (pRegFrame->rdi & ~fAddrMask);
1208 if (pCpu->fPrefix & DISPREFIX_REP)
1209 pRegFrame->rcx &= ~fAddrMask;
1210 }
1211 }
1212 else
1213 {
1214 /* addr-- variant. */
1215 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1216 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1217 pRegFrame->eax, cb, cTransfers);
1218 if (rc == VINF_SUCCESS)
1219 {
1220 /* Update registers. */
1221 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1222 | (pRegFrame->rdi & ~fAddrMask);
1223 if (pCpu->fPrefix & DISPREFIX_REP)
1224 pRegFrame->rcx &= ~fAddrMask;
1225 }
1226 }
1227 }
1228 else
1229 {
1230 /*
1231 * Use the write callback.
1232 */
1233 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1234 uint64_t u64Data = pRegFrame->rax;
1235
1236 /* fill loop. */
1237 do
1238 {
1239 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1240 if (rc != VINF_SUCCESS)
1241 break;
1242
1243 Phys += offIncrement;
1244 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1245 | (pRegFrame->rdi & ~fAddrMask);
1246 cTransfers--;
1247 } while (cTransfers);
1248
1249 /* Update rcx on exit. */
1250 if (pCpu->fPrefix & DISPREFIX_REP)
1251 pRegFrame->rcx = (cTransfers & fAddrMask)
1252 | (pRegFrame->rcx & ~fAddrMask);
1253 }
1254
1255 /*
1256 * Work statistics and return.
1257 */
1258 if (rc == VINF_SUCCESS)
1259 iomMMIOStatLength(pVM, cb);
1260 return rc;
1261}
1262
1263
1264/**
1265 * [REP] LODSB
1266 * [REP] LODSW
1267 * [REP] LODSD
1268 *
1269 * Restricted implementation.
1270 *
1271 *
1272 * @returns VBox status code.
1273 *
1274 * @param pVM The virtual machine.
1275 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1276 * @param pRegFrame Trap register frame.
1277 * @param GCPhysFault The GC physical address corresponding to pvFault.
1278 * @param pCpu Disassembler CPU state.
1279 * @param pRange Pointer MMIO range.
1280 */
1281static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1282 PIOMMMIORANGE pRange)
1283{
1284 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1285
1286 /*
1287 * We do not support segment prefixes or REP*.
1288 */
1289 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1290 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1291
1292 /*
1293 * Get data size.
1294 */
1295 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1296 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1297 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1298
1299 /*
1300 * Perform read.
1301 */
1302 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1303 if (rc == VINF_SUCCESS)
1304 {
1305 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1306 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1307 | (pRegFrame->rsi & ~fAddrMask);
1308 }
1309
1310 /*
1311 * Work statistics and return.
1312 */
1313 if (rc == VINF_SUCCESS)
1314 iomMMIOStatLength(pVM, cb);
1315 return rc;
1316}
1317
1318
1319/**
1320 * CMP [MMIO], reg|imm
1321 * CMP reg|imm, [MMIO]
1322 *
1323 * Restricted implementation.
1324 *
1325 *
1326 * @returns VBox status code.
1327 *
1328 * @param pVM The virtual machine.
1329 * @param pRegFrame Trap register frame.
1330 * @param GCPhysFault The GC physical address corresponding to pvFault.
1331 * @param pCpu Disassembler CPU state.
1332 * @param pRange Pointer MMIO range.
1333 */
1334static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1335 PIOMMMIORANGE pRange)
1336{
1337 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1338
1339 /*
1340 * Get the operands.
1341 */
1342 unsigned cb = 0;
1343 uint64_t uData1 = 0;
1344 uint64_t uData2 = 0;
1345 int rc;
1346 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1347 /* cmp reg, [MMIO]. */
1348 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1349 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1350 /* cmp [MMIO], reg|imm. */
1351 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1352 else
1353 {
1354 AssertMsgFailed(("Disassember CMP problem..\n"));
1355 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1356 }
1357
1358 if (rc == VINF_SUCCESS)
1359 {
1360#if HC_ARCH_BITS == 32
1361 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1362 if (cb > 4)
1363 return VINF_IOM_R3_MMIO_READ_WRITE;
1364#endif
1365 /* Emulate CMP and update guest flags. */
1366 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1367 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1368 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1369 iomMMIOStatLength(pVM, cb);
1370 }
1371
1372 return rc;
1373}
1374
1375
1376/**
1377 * AND [MMIO], reg|imm
1378 * AND reg, [MMIO]
1379 * OR [MMIO], reg|imm
1380 * OR reg, [MMIO]
1381 *
1382 * Restricted implementation.
1383 *
1384 *
1385 * @returns VBox status code.
1386 *
1387 * @param pVM The virtual machine.
1388 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1389 * @param pRegFrame Trap register frame.
1390 * @param GCPhysFault The GC physical address corresponding to pvFault.
1391 * @param pCpu Disassembler CPU state.
1392 * @param pRange Pointer MMIO range.
1393 * @param pfnEmulate Instruction emulation function.
1394 */
1395static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1396 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1397{
1398 unsigned cb = 0;
1399 uint64_t uData1 = 0;
1400 uint64_t uData2 = 0;
1401 bool fAndWrite;
1402 int rc;
1403
1404#ifdef LOG_ENABLED
1405 const char *pszInstr;
1406
1407 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1408 pszInstr = "Xor";
1409 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1410 pszInstr = "Or";
1411 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1412 pszInstr = "And";
1413 else
1414 pszInstr = "OrXorAnd??";
1415#endif
1416
1417 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1418 {
1419#if HC_ARCH_BITS == 32
1420 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1421 if (cb > 4)
1422 return VINF_IOM_R3_MMIO_READ_WRITE;
1423#endif
1424 /* and reg, [MMIO]. */
1425 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1426 fAndWrite = false;
1427 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1428 }
1429 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1430 {
1431#if HC_ARCH_BITS == 32
1432 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1433 if (cb > 4)
1434 return VINF_IOM_R3_MMIO_READ_WRITE;
1435#endif
1436 /* and [MMIO], reg|imm. */
1437 fAndWrite = true;
1438 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1439 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1440 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1441 else
1442 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1443 }
1444 else
1445 {
1446 AssertMsgFailed(("Disassember AND problem..\n"));
1447 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1448 }
1449
1450 if (rc == VINF_SUCCESS)
1451 {
1452 /* Emulate AND and update guest flags. */
1453 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1454
1455 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1456
1457 if (fAndWrite)
1458 /* Store result to MMIO. */
1459 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1460 else
1461 {
1462 /* Store result to register. */
1463 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1464 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1465 }
1466 if (rc == VINF_SUCCESS)
1467 {
1468 /* Update guest's eflags and finish. */
1469 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1470 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1471 iomMMIOStatLength(pVM, cb);
1472 }
1473 }
1474
1475 return rc;
1476}
1477
1478
1479/**
1480 * TEST [MMIO], reg|imm
1481 * TEST reg, [MMIO]
1482 *
1483 * Restricted implementation.
1484 *
1485 *
1486 * @returns VBox status code.
1487 *
1488 * @param pVM The virtual machine.
1489 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1490 * @param pRegFrame Trap register frame.
1491 * @param GCPhysFault The GC physical address corresponding to pvFault.
1492 * @param pCpu Disassembler CPU state.
1493 * @param pRange Pointer MMIO range.
1494 */
1495static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1496 PIOMMMIORANGE pRange)
1497{
1498 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1499
1500 unsigned cb = 0;
1501 uint64_t uData1 = 0;
1502 uint64_t uData2 = 0;
1503 int rc;
1504
1505 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1506 {
1507 /* and test, [MMIO]. */
1508 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1509 }
1510 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1511 {
1512 /* test [MMIO], reg|imm. */
1513 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1514 }
1515 else
1516 {
1517 AssertMsgFailed(("Disassember TEST problem..\n"));
1518 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1519 }
1520
1521 if (rc == VINF_SUCCESS)
1522 {
1523#if HC_ARCH_BITS == 32
1524 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1525 if (cb > 4)
1526 return VINF_IOM_R3_MMIO_READ_WRITE;
1527#endif
1528
1529 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1530 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1531 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1532 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1533 iomMMIOStatLength(pVM, cb);
1534 }
1535
1536 return rc;
1537}
1538
1539
1540/**
1541 * BT [MMIO], reg|imm
1542 *
1543 * Restricted implementation.
1544 *
1545 *
1546 * @returns VBox status code.
1547 *
1548 * @param pVM The virtual machine.
1549 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1550 * @param pRegFrame Trap register frame.
1551 * @param GCPhysFault The GC physical address corresponding to pvFault.
1552 * @param pCpu Disassembler CPU state.
1553 * @param pRange Pointer MMIO range.
1554 */
1555static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1556 PIOMMMIORANGE pRange)
1557{
1558 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1559
1560 uint64_t uBit = 0;
1561 uint64_t uData = 0;
1562 unsigned cbIgnored;
1563
1564 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1565 {
1566 AssertMsgFailed(("Disassember BT problem..\n"));
1567 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1568 }
1569 /* The size of the memory operand only matters here. */
1570 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1571
1572 /* bt [MMIO], reg|imm. */
1573 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1574 if (rc == VINF_SUCCESS)
1575 {
1576 /* Find the bit inside the faulting address */
1577 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1578 iomMMIOStatLength(pVM, cbData);
1579 }
1580
1581 return rc;
1582}
1583
1584/**
1585 * XCHG [MMIO], reg
1586 * XCHG reg, [MMIO]
1587 *
1588 * Restricted implementation.
1589 *
1590 *
1591 * @returns VBox status code.
1592 *
1593 * @param pVM The virtual machine.
1594 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1595 * @param pRegFrame Trap register frame.
1596 * @param GCPhysFault The GC physical address corresponding to pvFault.
1597 * @param pCpu Disassembler CPU state.
1598 * @param pRange Pointer MMIO range.
1599 */
1600static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1601 PIOMMMIORANGE pRange)
1602{
1603 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1604 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1605 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1606 return VINF_IOM_R3_MMIO_READ_WRITE;
1607
1608 int rc;
1609 unsigned cb = 0;
1610 uint64_t uData1 = 0;
1611 uint64_t uData2 = 0;
1612 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1613 {
1614 /* xchg reg, [MMIO]. */
1615 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1616 if (rc == VINF_SUCCESS)
1617 {
1618 /* Store result to MMIO. */
1619 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1620
1621 if (rc == VINF_SUCCESS)
1622 {
1623 /* Store result to register. */
1624 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1625 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1626 }
1627 else
1628 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1629 }
1630 else
1631 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1632 }
1633 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1634 {
1635 /* xchg [MMIO], reg. */
1636 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1637 if (rc == VINF_SUCCESS)
1638 {
1639 /* Store result to MMIO. */
1640 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1641 if (rc == VINF_SUCCESS)
1642 {
1643 /* Store result to register. */
1644 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1645 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1646 }
1647 else
1648 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1649 }
1650 else
1651 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1652 }
1653 else
1654 {
1655 AssertMsgFailed(("Disassember XCHG problem..\n"));
1656 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1657 }
1658 return rc;
1659}
1660
1661#endif /* !IEM_USE_IEM_INSTEAD */
1662
1663/**
1664 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
1665 *
1666 * @returns VBox status code (appropriate for GC return).
1667 * @param pVM Pointer to the VM.
1668 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1669 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1670 * any error code (the EPT misconfig hack).
1671 * @param pCtxCore Trap register frame.
1672 * @param GCPhysFault The GC physical address corresponding to pvFault.
1673 * @param pvUser Pointer to the MMIO ring-3 range entry.
1674 */
1675static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
1676 RTGCPHYS GCPhysFault, void *pvUser)
1677{
1678 int rc = IOM_LOCK_SHARED(pVM);
1679#ifndef IN_RING3
1680 if (rc == VERR_SEM_BUSY)
1681 return VINF_IOM_R3_MMIO_READ_WRITE;
1682#endif
1683 AssertRC(rc);
1684
1685 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1686 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1687
1688 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1689 Assert(pRange);
1690 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1691 iomMmioRetainRange(pRange);
1692#ifndef VBOX_WITH_STATISTICS
1693 IOM_UNLOCK_SHARED(pVM);
1694
1695#else
1696 /*
1697 * Locate the statistics.
1698 */
1699 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1700 if (!pStats)
1701 {
1702 iomMmioReleaseRange(pVM, pRange);
1703# ifdef IN_RING3
1704 return VERR_NO_MEMORY;
1705# else
1706 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1707 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1708 return VINF_IOM_R3_MMIO_READ_WRITE;
1709# endif
1710 }
1711#endif
1712
1713#ifndef IN_RING3
1714 /*
1715 * Should we defer the request right away? This isn't usually the case, so
1716 * do the simple test first and the try deal with uErrorCode being N/A.
1717 */
1718 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1719 || !pRange->CTX_SUFF(pfnReadCallback))
1720 && ( uErrorCode == UINT32_MAX
1721 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1722 : uErrorCode & X86_TRAP_PF_RW
1723 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1724 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1725 )
1726 )
1727 )
1728 {
1729 if (uErrorCode & X86_TRAP_PF_RW)
1730 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1731 else
1732 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1733
1734 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1735 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1736 iomMmioReleaseRange(pVM, pRange);
1737 return VINF_IOM_R3_MMIO_READ_WRITE;
1738 }
1739#endif /* !IN_RING3 */
1740
1741 /*
1742 * Retain the range and do locking.
1743 */
1744 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1745 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1746 if (rc != VINF_SUCCESS)
1747 {
1748 iomMmioReleaseRange(pVM, pRange);
1749 return rc;
1750 }
1751
1752#ifdef IEM_USE_IEM_INSTEAD
1753
1754 /*
1755 * Let IEM call us back via iomMmioHandler.
1756 */
1757 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1758
1759 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1760 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1761 iomMmioReleaseRange(pVM, pRange);
1762 if (RT_SUCCESS(rcStrict))
1763 return rcStrict;
1764 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1765 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1766 {
1767 Log(("IOM: Hit unsupported IEM feature!\n"));
1768 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
1769 }
1770 return rcStrict;
1771
1772#else
1773
1774 /*
1775 * Disassemble the instruction and interpret it.
1776 */
1777 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1778 unsigned cbOp;
1779 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1780 if (RT_FAILURE(rc))
1781 {
1782 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1783 iomMmioReleaseRange(pVM, pRange);
1784 return rc;
1785 }
1786 switch (pDis->pCurInstr->uOpcode)
1787 {
1788 case OP_MOV:
1789 case OP_MOVZX:
1790 case OP_MOVSX:
1791 {
1792 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1793 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1794 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1795 ? uErrorCode & X86_TRAP_PF_RW
1796 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1797 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1798 else
1799 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1800 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1801 break;
1802 }
1803
1804
1805# ifdef IOM_WITH_MOVS_SUPPORT
1806 case OP_MOVSB:
1807 case OP_MOVSWD:
1808 {
1809 if (uErrorCode == UINT32_MAX)
1810 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1811 else
1812 {
1813 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1814 PSTAMPROFILE pStat = NULL;
1815 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1816 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1817 }
1818 break;
1819 }
1820# endif
1821
1822 case OP_STOSB:
1823 case OP_STOSWD:
1824 Assert(uErrorCode & X86_TRAP_PF_RW);
1825 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1826 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1827 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1828 break;
1829
1830 case OP_LODSB:
1831 case OP_LODSWD:
1832 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1833 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1834 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1835 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1836 break;
1837
1838 case OP_CMP:
1839 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1840 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1841 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1842 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1843 break;
1844
1845 case OP_AND:
1846 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1847 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1848 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1849 break;
1850
1851 case OP_OR:
1852 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1853 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1854 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1855 break;
1856
1857 case OP_XOR:
1858 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1859 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1860 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1861 break;
1862
1863 case OP_TEST:
1864 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1865 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1866 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1867 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1868 break;
1869
1870 case OP_BT:
1871 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1872 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1873 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1874 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1875 break;
1876
1877 case OP_XCHG:
1878 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1879 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1880 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1881 break;
1882
1883
1884 /*
1885 * The instruction isn't supported. Hand it on to ring-3.
1886 */
1887 default:
1888 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1889 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1890 break;
1891 }
1892
1893 /*
1894 * On success advance EIP.
1895 */
1896 if (rc == VINF_SUCCESS)
1897 pCtxCore->rip += cbOp;
1898 else
1899 {
1900 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1901# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1902 switch (rc)
1903 {
1904 case VINF_IOM_R3_MMIO_READ:
1905 case VINF_IOM_R3_MMIO_READ_WRITE:
1906 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1907 break;
1908 case VINF_IOM_R3_MMIO_WRITE:
1909 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1910 break;
1911 }
1912# endif
1913 }
1914
1915 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1916 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1917 iomMmioReleaseRange(pVM, pRange);
1918 return rc;
1919#endif /* !IEM_USE_IEM_INSTEAD */
1920}
1921
1922
1923/**
1924 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1925 * \#PF access handler callback for MMIO pages.}
1926 *
1927 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1928 */
1929DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1930 RTGCPHYS GCPhysFault, void *pvUser)
1931{
1932 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1933 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1934 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1935}
1936
1937
1938/**
1939 * Physical access handler for MMIO ranges.
1940 *
1941 * @returns VBox status code (appropriate for GC return).
1942 * @param pVM Pointer to the VM.
1943 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1944 * @param uErrorCode CPU Error code.
1945 * @param pCtxCore Trap register frame.
1946 * @param GCPhysFault The GC physical address.
1947 */
1948VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1949{
1950 /*
1951 * We don't have a range here, so look it up before calling the common function.
1952 */
1953 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1954#ifndef IN_RING3
1955 if (rc2 == VERR_SEM_BUSY)
1956 return VINF_IOM_R3_MMIO_READ_WRITE;
1957#endif
1958 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1959 if (RT_UNLIKELY(!pRange))
1960 {
1961 IOM_UNLOCK_SHARED(pVM);
1962 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1963 }
1964 iomMmioRetainRange(pRange);
1965 IOM_UNLOCK_SHARED(pVM);
1966
1967 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1968
1969 iomMmioReleaseRange(pVM, pRange);
1970 return VBOXSTRICTRC_VAL(rcStrict);
1971}
1972
1973
1974/**
1975 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
1976 *
1977 * @remarks The @a pvUser argument points to the MMIO range entry.
1978 */
1979PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
1980 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1981{
1982 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1983 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1984
1985 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1986 AssertPtr(pRange);
1987 NOREF(pvPhys); NOREF(enmOrigin);
1988
1989 /*
1990 * Validate the range.
1991 */
1992 int rc = IOM_LOCK_SHARED(pVM);
1993#ifndef IN_RING3
1994 if (rc == VERR_SEM_BUSY)
1995 return VINF_IOM_R3_MMIO_READ_WRITE;
1996#endif
1997 AssertRC(rc);
1998 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1999
2000 /*
2001 * Perform locking.
2002 */
2003 iomMmioRetainRange(pRange);
2004 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2005 IOM_UNLOCK_SHARED(pVM);
2006 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
2007 if (rcStrict == VINF_SUCCESS)
2008 {
2009 /*
2010 * Perform the access.
2011 */
2012 if (enmAccessType == PGMACCESSTYPE_READ)
2013 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2014 else
2015 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2016
2017 /* Check the return code. */
2018#ifdef IN_RING3
2019 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
2020#else
2021 AssertMsg( rcStrict == VINF_SUCCESS
2022 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
2023 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
2024 || rcStrict == VINF_EM_DBG_STOP
2025 || rcStrict == VINF_EM_DBG_BREAKPOINT
2026 || rcStrict == VINF_EM_OFF
2027 || rcStrict == VINF_EM_SUSPEND
2028 || rcStrict == VINF_EM_RESET
2029 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
2030 //|| rcStrict == VINF_EM_HALT /* ?? */
2031 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2032 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2033#endif
2034
2035 iomMmioReleaseRange(pVM, pRange);
2036 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2037 }
2038 else
2039 iomMmioReleaseRange(pVM, pRange);
2040 return rcStrict;
2041}
2042
2043
2044#ifdef IN_RING3 /* Only used by REM. */
2045
2046/**
2047 * Reads a MMIO register.
2048 *
2049 * @returns VBox status code.
2050 *
2051 * @param pVM Pointer to the VM.
2052 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2053 * @param GCPhys The physical address to read.
2054 * @param pu32Value Where to store the value read.
2055 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2056 */
2057VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2058{
2059 /* Take the IOM lock before performing any MMIO. */
2060 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2061#ifndef IN_RING3
2062 if (rc == VERR_SEM_BUSY)
2063 return VINF_IOM_R3_MMIO_WRITE;
2064#endif
2065 AssertRC(VBOXSTRICTRC_VAL(rc));
2066#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2067 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2068#endif
2069
2070 /*
2071 * Lookup the current context range node and statistics.
2072 */
2073 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2074 if (!pRange)
2075 {
2076 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2077 IOM_UNLOCK_SHARED(pVM);
2078 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2079 }
2080 iomMmioRetainRange(pRange);
2081#ifndef VBOX_WITH_STATISTICS
2082 IOM_UNLOCK_SHARED(pVM);
2083
2084#else /* VBOX_WITH_STATISTICS */
2085 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2086 if (!pStats)
2087 {
2088 iomMmioReleaseRange(pVM, pRange);
2089# ifdef IN_RING3
2090 return VERR_NO_MEMORY;
2091# else
2092 return VINF_IOM_R3_MMIO_READ;
2093# endif
2094 }
2095 STAM_COUNTER_INC(&pStats->Accesses);
2096#endif /* VBOX_WITH_STATISTICS */
2097
2098 if (pRange->CTX_SUFF(pfnReadCallback))
2099 {
2100 /*
2101 * Perform locking.
2102 */
2103 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2104 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2105 if (rc != VINF_SUCCESS)
2106 {
2107 iomMmioReleaseRange(pVM, pRange);
2108 return rc;
2109 }
2110
2111 /*
2112 * Perform the read and deal with the result.
2113 */
2114 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2115 if ( (cbValue == 4 && !(GCPhys & 3))
2116 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2117 || (cbValue == 8 && !(GCPhys & 7)) )
2118 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2119 pu32Value, (unsigned)cbValue);
2120 else
2121 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2122 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2123 switch (VBOXSTRICTRC_VAL(rc))
2124 {
2125 case VINF_SUCCESS:
2126 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2127 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2128 iomMmioReleaseRange(pVM, pRange);
2129 return rc;
2130#ifndef IN_RING3
2131 case VINF_IOM_R3_MMIO_READ:
2132 case VINF_IOM_R3_MMIO_READ_WRITE:
2133 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2134#endif
2135 default:
2136 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2137 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2138 iomMmioReleaseRange(pVM, pRange);
2139 return rc;
2140
2141 case VINF_IOM_MMIO_UNUSED_00:
2142 iomMMIODoRead00s(pu32Value, cbValue);
2143 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2144 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2145 iomMmioReleaseRange(pVM, pRange);
2146 return VINF_SUCCESS;
2147
2148 case VINF_IOM_MMIO_UNUSED_FF:
2149 iomMMIODoReadFFs(pu32Value, cbValue);
2150 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2151 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2152 iomMmioReleaseRange(pVM, pRange);
2153 return VINF_SUCCESS;
2154 }
2155 /* not reached */
2156 }
2157#ifndef IN_RING3
2158 if (pRange->pfnReadCallbackR3)
2159 {
2160 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2161 iomMmioReleaseRange(pVM, pRange);
2162 return VINF_IOM_R3_MMIO_READ;
2163 }
2164#endif
2165
2166 /*
2167 * Unassigned memory - this is actually not supposed t happen...
2168 */
2169 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2170 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2171 iomMMIODoReadFFs(pu32Value, cbValue);
2172 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2173 iomMmioReleaseRange(pVM, pRange);
2174 return VINF_SUCCESS;
2175}
2176
2177
2178/**
2179 * Writes to a MMIO register.
2180 *
2181 * @returns VBox status code.
2182 *
2183 * @param pVM Pointer to the VM.
2184 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2185 * @param GCPhys The physical address to write to.
2186 * @param u32Value The value to write.
2187 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2188 */
2189VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2190{
2191 /* Take the IOM lock before performing any MMIO. */
2192 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2193#ifndef IN_RING3
2194 if (rc == VERR_SEM_BUSY)
2195 return VINF_IOM_R3_MMIO_WRITE;
2196#endif
2197 AssertRC(VBOXSTRICTRC_VAL(rc));
2198#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2199 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2200#endif
2201
2202 /*
2203 * Lookup the current context range node.
2204 */
2205 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2206 if (!pRange)
2207 {
2208 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2209 IOM_UNLOCK_SHARED(pVM);
2210 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2211 }
2212 iomMmioRetainRange(pRange);
2213#ifndef VBOX_WITH_STATISTICS
2214 IOM_UNLOCK_SHARED(pVM);
2215
2216#else /* VBOX_WITH_STATISTICS */
2217 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2218 if (!pStats)
2219 {
2220 iomMmioReleaseRange(pVM, pRange);
2221# ifdef IN_RING3
2222 return VERR_NO_MEMORY;
2223# else
2224 return VINF_IOM_R3_MMIO_WRITE;
2225# endif
2226 }
2227 STAM_COUNTER_INC(&pStats->Accesses);
2228#endif /* VBOX_WITH_STATISTICS */
2229
2230 if (pRange->CTX_SUFF(pfnWriteCallback))
2231 {
2232 /*
2233 * Perform locking.
2234 */
2235 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2236 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2237 if (rc != VINF_SUCCESS)
2238 {
2239 iomMmioReleaseRange(pVM, pRange);
2240 return rc;
2241 }
2242
2243 /*
2244 * Perform the write.
2245 */
2246 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2247 if ( (cbValue == 4 && !(GCPhys & 3))
2248 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2249 || (cbValue == 8 && !(GCPhys & 7)) )
2250 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2251 GCPhys, &u32Value, (unsigned)cbValue);
2252 else
2253 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2254 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2255#ifndef IN_RING3
2256 if ( rc == VINF_IOM_R3_MMIO_WRITE
2257 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2258 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2259#endif
2260 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2261 iomMmioReleaseRange(pVM, pRange);
2262 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2263 return rc;
2264 }
2265#ifndef IN_RING3
2266 if (pRange->pfnWriteCallbackR3)
2267 {
2268 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2269 iomMmioReleaseRange(pVM, pRange);
2270 return VINF_IOM_R3_MMIO_WRITE;
2271 }
2272#endif
2273
2274 /*
2275 * No write handler, nothing to do.
2276 */
2277 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2278 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2279 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2280 iomMmioReleaseRange(pVM, pRange);
2281 return VINF_SUCCESS;
2282}
2283
2284#endif /* IN_RING3 - only used by REM. */
2285#ifndef IEM_USE_IEM_INSTEAD
2286
2287/**
2288 * [REP*] INSB/INSW/INSD
2289 * ES:EDI,DX[,ECX]
2290 *
2291 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2292 *
2293 * @returns Strict VBox status code. Informational status codes other than the one documented
2294 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2295 * @retval VINF_SUCCESS Success.
2296 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2297 * status code must be passed on to EM.
2298 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2299 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2300 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2301 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2302 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2303 *
2304 * @param pVM The virtual machine.
2305 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2306 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2307 * @param uPort IO Port
2308 * @param uPrefix IO instruction prefix
2309 * @param enmAddrMode The address mode.
2310 * @param cbTransfer Size of transfer unit
2311 */
2312VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2313 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2314{
2315 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2316
2317 /*
2318 * We do not support REPNE or decrementing destination
2319 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2320 */
2321 if ( (uPrefix & DISPREFIX_REPNE)
2322 || pRegFrame->eflags.Bits.u1DF)
2323 return VINF_EM_RAW_EMULATE_INSTR;
2324
2325 /*
2326 * Get bytes/words/dwords count to transfer.
2327 */
2328 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2329 RTGCUINTREG cTransfers = 1;
2330 if (uPrefix & DISPREFIX_REP)
2331 {
2332#ifndef IN_RC
2333 if ( CPUMIsGuestIn64BitCode(pVCpu)
2334 && pRegFrame->rcx >= _4G)
2335 return VINF_EM_RAW_EMULATE_INSTR;
2336#endif
2337 cTransfers = pRegFrame->rcx & fAddrMask;
2338 if (!cTransfers)
2339 return VINF_SUCCESS;
2340 }
2341
2342 /* Convert destination address es:edi. */
2343 RTGCPTR GCPtrDst;
2344 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2345 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2346 &GCPtrDst);
2347 if (RT_FAILURE(rc2))
2348 {
2349 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2350 return VINF_EM_RAW_EMULATE_INSTR;
2351 }
2352
2353 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2354 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2355 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2356 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2357 if (rc2 != VINF_SUCCESS)
2358 {
2359 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2360 return VINF_EM_RAW_EMULATE_INSTR;
2361 }
2362
2363 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2364 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2365 if (cTransfers > 1)
2366 {
2367 /*
2368 * Work the string page by page, letting the device handle as much
2369 * as it likes via the string I/O interface.
2370 */
2371 for (;;)
2372 {
2373 PGMPAGEMAPLOCK Lock;
2374 void *pvDst;
2375 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2376 if (RT_SUCCESS(rc2))
2377 {
2378 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2379 if (cMaxThisTime > cTransfers)
2380 cMaxThisTime = cTransfers;
2381 if (!cMaxThisTime)
2382 break;
2383 uint32_t cThisTime = cMaxThisTime;
2384
2385 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2386 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2387 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2388
2389 uint32_t const cActual = cMaxThisTime - cThisTime;
2390 if (cActual)
2391 { /* Must dirty the page. */
2392 uint8_t b = *(uint8_t *)pvDst;
2393 iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &b, 1);
2394 }
2395
2396 PGMPhysReleasePageMappingLock(pVM, &Lock);
2397
2398 uint32_t const cbActual = cActual * cbTransfer;
2399 cTransfers -= cActual;
2400 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2401 | (pRegFrame->rdi & ~fAddrMask);
2402 GCPtrDst += cbActual;
2403
2404 if ( cThisTime
2405 || !cTransfers
2406 || rcStrict != VINF_SUCCESS
2407 || (GCPtrDst & PAGE_OFFSET_MASK))
2408 break;
2409 }
2410 else
2411 {
2412 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2413 break;
2414 }
2415 }
2416 }
2417
2418 /*
2419 * Single transfer / unmapped memory fallback.
2420 */
2421#ifdef IN_RC
2422 MMGCRamRegisterTrapHandler(pVM);
2423#endif
2424 while (cTransfers && rcStrict == VINF_SUCCESS)
2425 {
2426 uint32_t u32Value;
2427 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2428 if (!IOM_SUCCESS(rcStrict))
2429 break;
2430 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2431 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2432 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2433 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2434 | (pRegFrame->rdi & ~fAddrMask);
2435 cTransfers--;
2436 }
2437#ifdef IN_RC
2438 MMGCRamDeregisterTrapHandler(pVM);
2439#endif
2440
2441 /* Update rcx on exit. */
2442 if (uPrefix & DISPREFIX_REP)
2443 pRegFrame->rcx = (cTransfers & fAddrMask)
2444 | (pRegFrame->rcx & ~fAddrMask);
2445
2446 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2447 return rcStrict;
2448}
2449
2450
2451/**
2452 * [REP*] OUTSB/OUTSW/OUTSD
2453 * DS:ESI,DX[,ECX]
2454 *
2455 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2456 *
2457 * @returns Strict VBox status code. Informational status codes other than the one documented
2458 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2459 * @retval VINF_SUCCESS Success.
2460 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2461 * status code must be passed on to EM.
2462 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2463 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2464 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2465 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2466 *
2467 * @param pVM The virtual machine.
2468 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2469 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2470 * @param uPort IO Port
2471 * @param uPrefix IO instruction prefix
2472 * @param enmAddrMode The address mode.
2473 * @param cbTransfer Size of transfer unit
2474 *
2475 * @remarks This API will probably be relaced by IEM before long, so no use in
2476 * optimizing+fixing stuff too much here.
2477 */
2478VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2479 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2480{
2481 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2482
2483 /*
2484 * We do not support segment prefixes, REPNE or
2485 * decrementing source pointer.
2486 */
2487 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2488 || pRegFrame->eflags.Bits.u1DF)
2489 return VINF_EM_RAW_EMULATE_INSTR;
2490
2491 /*
2492 * Get bytes/words/dwords count to transfer.
2493 */
2494 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2495 RTGCUINTREG cTransfers = 1;
2496 if (uPrefix & DISPREFIX_REP)
2497 {
2498#ifndef IN_RC
2499 if ( CPUMIsGuestIn64BitCode(pVCpu)
2500 && pRegFrame->rcx >= _4G)
2501 return VINF_EM_RAW_EMULATE_INSTR;
2502#endif
2503 cTransfers = pRegFrame->rcx & fAddrMask;
2504 if (!cTransfers)
2505 return VINF_SUCCESS;
2506 }
2507
2508 /* Convert source address ds:esi. */
2509 RTGCPTR GCPtrSrc;
2510 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2511 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2512 &GCPtrSrc);
2513 if (RT_FAILURE(rc2))
2514 {
2515 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2516 return VINF_EM_RAW_EMULATE_INSTR;
2517 }
2518
2519 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2520 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2521 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2522 (cpl == 3) ? X86_PTE_US : 0);
2523 if (rc2 != VINF_SUCCESS)
2524 {
2525 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2526 return VINF_EM_RAW_EMULATE_INSTR;
2527 }
2528
2529 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2530 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2531 if (cTransfers > 1)
2532 {
2533 /*
2534 * Work the string page by page, letting the device handle as much
2535 * as it likes via the string I/O interface.
2536 */
2537 for (;;)
2538 {
2539 PGMPAGEMAPLOCK Lock;
2540 void const *pvSrc;
2541 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2542 if (RT_SUCCESS(rc2))
2543 {
2544 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2545 if (cMaxThisTime > cTransfers)
2546 cMaxThisTime = cTransfers;
2547 if (!cMaxThisTime)
2548 break;
2549 uint32_t cThisTime = cMaxThisTime;
2550
2551 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2552 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2553 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2554
2555 PGMPhysReleasePageMappingLock(pVM, &Lock);
2556
2557 uint32_t const cActual = cMaxThisTime - cThisTime;
2558 uint32_t const cbActual = cActual * cbTransfer;
2559 cTransfers -= cActual;
2560 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2561 | (pRegFrame->rsi & ~fAddrMask);
2562 GCPtrSrc += cbActual;
2563
2564 if ( cThisTime
2565 || !cTransfers
2566 || rcStrict != VINF_SUCCESS
2567 || (GCPtrSrc & PAGE_OFFSET_MASK))
2568 break;
2569 }
2570 else
2571 {
2572 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2573 break;
2574 }
2575 }
2576 }
2577
2578 /*
2579 * Single transfer / unmapped memory fallback.
2580 */
2581#ifdef IN_RC
2582 MMGCRamRegisterTrapHandler(pVM);
2583#endif
2584
2585 while (cTransfers && rcStrict == VINF_SUCCESS)
2586 {
2587 uint32_t u32Value = 0;
2588 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2589 if (rcStrict != VINF_SUCCESS)
2590 break;
2591 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2592 if (!IOM_SUCCESS(rcStrict))
2593 break;
2594 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2595 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2596 | (pRegFrame->rsi & ~fAddrMask);
2597 cTransfers--;
2598 }
2599
2600#ifdef IN_RC
2601 MMGCRamDeregisterTrapHandler(pVM);
2602#endif
2603
2604 /* Update rcx on exit. */
2605 if (uPrefix & DISPREFIX_REP)
2606 pRegFrame->rcx = (cTransfers & fAddrMask)
2607 | (pRegFrame->rcx & ~fAddrMask);
2608
2609 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2610 return rcStrict;
2611}
2612
2613#endif /* !IEM_USE_IEM_INSTEAD */
2614
2615
2616#ifndef IN_RC
2617
2618/**
2619 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2620 *
2621 * (This is a special optimization used by the VGA device.)
2622 *
2623 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2624 * remapping is made,.
2625 *
2626 * @param pVM The virtual machine.
2627 * @param GCPhys The address of the MMIO page to be changed.
2628 * @param GCPhysRemapped The address of the MMIO2 page.
2629 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2630 * for the time being.
2631 */
2632VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2633{
2634# ifndef IEM_VERIFICATION_MODE_FULL
2635 /* Currently only called from the VGA device during MMIO. */
2636 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2637 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2638 PVMCPU pVCpu = VMMGetCpu(pVM);
2639
2640 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2641 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2642 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2643 && !HMIsNestedPagingActive(pVM)))
2644 return VINF_SUCCESS; /* ignore */
2645
2646 int rc = IOM_LOCK_SHARED(pVM);
2647 if (RT_FAILURE(rc))
2648 return VINF_SUCCESS; /* better luck the next time around */
2649
2650 /*
2651 * Lookup the context range node the page belongs to.
2652 */
2653 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2654 AssertMsgReturn(pRange,
2655 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2656
2657 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2658 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2659
2660 /*
2661 * Do the aliasing; page align the addresses since PGM is picky.
2662 */
2663 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2664 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2665
2666 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2667
2668 IOM_UNLOCK_SHARED(pVM);
2669 AssertRCReturn(rc, rc);
2670
2671 /*
2672 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2673 * can simply prefetch it.
2674 *
2675 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2676 */
2677# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2678# ifdef VBOX_STRICT
2679 uint64_t fFlags;
2680 RTHCPHYS HCPhys;
2681 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2682 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2683# endif
2684# endif
2685 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2686 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2687# endif /* !IEM_VERIFICATION_MODE_FULL */
2688 return VINF_SUCCESS;
2689}
2690
2691
2692# ifndef IEM_VERIFICATION_MODE_FULL
2693/**
2694 * Mapping a HC page in place of an MMIO page for direct access.
2695 *
2696 * (This is a special optimization used by the APIC in the VT-x case.)
2697 *
2698 * @returns VBox status code.
2699 *
2700 * @param pVM Pointer to the VM.
2701 * @param pVCpu Pointer to the VMCPU.
2702 * @param GCPhys The address of the MMIO page to be changed.
2703 * @param HCPhys The address of the host physical page.
2704 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2705 * for the time being.
2706 */
2707VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2708{
2709 /* Currently only called from VT-x code during a page fault. */
2710 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2711
2712 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2713 Assert(HMIsEnabled(pVM));
2714
2715 /*
2716 * Lookup the context range node the page belongs to.
2717 */
2718# ifdef VBOX_STRICT
2719 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2720 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2721 AssertMsgReturn(pRange,
2722 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2723 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2724 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2725# endif
2726
2727 /*
2728 * Do the aliasing; page align the addresses since PGM is picky.
2729 */
2730 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2731 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2732
2733 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2734 AssertRCReturn(rc, rc);
2735
2736 /*
2737 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2738 * can simply prefetch it.
2739 *
2740 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2741 */
2742 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2743 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2744 return VINF_SUCCESS;
2745}
2746# endif /* !IEM_VERIFICATION_MODE_FULL */
2747
2748
2749/**
2750 * Reset a previously modified MMIO region; restore the access flags.
2751 *
2752 * @returns VBox status code.
2753 *
2754 * @param pVM The virtual machine.
2755 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2756 */
2757VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2758{
2759 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2760
2761 PVMCPU pVCpu = VMMGetCpu(pVM);
2762
2763 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2764 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2765 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2766 && !HMIsNestedPagingActive(pVM)))
2767 return VINF_SUCCESS; /* ignore */
2768
2769 /*
2770 * Lookup the context range node the page belongs to.
2771 */
2772# ifdef VBOX_STRICT
2773 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2774 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2775 AssertMsgReturn(pRange,
2776 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2777 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2778 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2779# endif
2780
2781 /*
2782 * Call PGM to do the job work.
2783 *
2784 * After the call, all the pages should be non-present... unless there is
2785 * a page pool flush pending (unlikely).
2786 */
2787 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2788 AssertRC(rc);
2789
2790# ifdef VBOX_STRICT
2791 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2792 {
2793 uint32_t cb = pRange->cb;
2794 GCPhys = pRange->GCPhys;
2795 while (cb)
2796 {
2797 uint64_t fFlags;
2798 RTHCPHYS HCPhys;
2799 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2800 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2801 cb -= PAGE_SIZE;
2802 GCPhys += PAGE_SIZE;
2803 }
2804 }
2805# endif
2806 return rc;
2807}
2808
2809#endif /* !IN_RC */
2810
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette