VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 56611

Last change on this file since 56611 was 56611, checked in by vboxsync, 9 years ago

IOM/INSEx: Must dirty the page.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 97.2 KB
Line 
1/* $Id: IOMAllMMIO.cpp 56611 2015-06-23 22:29:22Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** @def IEM_USE_IEM_INSTEAD
52 * Use IEM instead of IOM for interpreting MMIO accesses.
53 * Because of PATM/CSAM issues in raw-mode, we've split this up into 2nd and 3rd
54 * IEM deployment step. */
55#if ((defined(IN_RING3) || defined(IN_RING0)) && defined(VBOX_WITH_2ND_IEM_STEP)) \
56 || defined(VBOX_WITH_3RD_IEM_STEP)
57# define IEM_USE_IEM_INSTEAD
58#endif
59
60
61/*******************************************************************************
62* Global Variables *
63*******************************************************************************/
64
65/**
66 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
67 */
68static const unsigned g_aSize2Shift[] =
69{
70 ~0U, /* 0 - invalid */
71 0, /* *1 == 2^0 */
72 1, /* *2 == 2^1 */
73 ~0U, /* 3 - invalid */
74 2, /* *4 == 2^2 */
75 ~0U, /* 5 - invalid */
76 ~0U, /* 6 - invalid */
77 ~0U, /* 7 - invalid */
78 3 /* *8 == 2^3 */
79};
80
81/**
82 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
83 */
84#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
85
86
87/**
88 * Returns the contents of register or immediate data of instruction's parameter.
89 *
90 * @returns true on success.
91 *
92 * @todo Get rid of this code. Use DISQueryParamVal instead
93 *
94 * @param pCpu Pointer to current disassembler context.
95 * @param pParam Pointer to parameter of instruction to process.
96 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
97 * @param pu64Data Where to store retrieved data.
98 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
99 */
100bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
101{
102 NOREF(pCpu);
103 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
104 {
105 *pcbSize = 0;
106 *pu64Data = 0;
107 return false;
108 }
109
110 /* divide and conquer */
111 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
112 {
113 if (pParam->fUse & DISUSE_REG_GEN32)
114 {
115 *pcbSize = 4;
116 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
117 return true;
118 }
119
120 if (pParam->fUse & DISUSE_REG_GEN16)
121 {
122 *pcbSize = 2;
123 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
124 return true;
125 }
126
127 if (pParam->fUse & DISUSE_REG_GEN8)
128 {
129 *pcbSize = 1;
130 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
131 return true;
132 }
133
134 Assert(pParam->fUse & DISUSE_REG_GEN64);
135 *pcbSize = 8;
136 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
137 return true;
138 }
139 else
140 {
141 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
142 {
143 *pcbSize = 8;
144 *pu64Data = pParam->uValue;
145 return true;
146 }
147
148 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
149 {
150 *pcbSize = 4;
151 *pu64Data = (uint32_t)pParam->uValue;
152 return true;
153 }
154
155 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
156 {
157 *pcbSize = 2;
158 *pu64Data = (uint16_t)pParam->uValue;
159 return true;
160 }
161
162 if (pParam->fUse & DISUSE_IMMEDIATE8)
163 {
164 *pcbSize = 1;
165 *pu64Data = (uint8_t)pParam->uValue;
166 return true;
167 }
168
169 if (pParam->fUse & DISUSE_REG_SEG)
170 {
171 *pcbSize = 2;
172 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
173 return true;
174 } /* Else - error. */
175
176 AssertFailed();
177 *pcbSize = 0;
178 *pu64Data = 0;
179 return false;
180 }
181}
182
183
184/**
185 * Saves data to 8/16/32 general purpose or segment register defined by
186 * instruction's parameter.
187 *
188 * @returns true on success.
189 * @param pCpu Pointer to current disassembler context.
190 * @param pParam Pointer to parameter of instruction to process.
191 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
192 * @param u64Data 8/16/32/64 bit data to store.
193 */
194bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
195{
196 NOREF(pCpu);
197 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
198 {
199 return false;
200 }
201
202 if (pParam->fUse & DISUSE_REG_GEN32)
203 {
204 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
205 return true;
206 }
207
208 if (pParam->fUse & DISUSE_REG_GEN64)
209 {
210 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
211 return true;
212 }
213
214 if (pParam->fUse & DISUSE_REG_GEN16)
215 {
216 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
217 return true;
218 }
219
220 if (pParam->fUse & DISUSE_REG_GEN8)
221 {
222 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
223 return true;
224 }
225
226 if (pParam->fUse & DISUSE_REG_SEG)
227 {
228 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
229 return true;
230 }
231
232 /* Else - error. */
233 return false;
234}
235
236
237/**
238 * Deals with complicated MMIO writes.
239 *
240 * Complicated means unaligned or non-dword/qword sized accesses depending on
241 * the MMIO region's access mode flags.
242 *
243 * @returns Strict VBox status code. Any EM scheduling status code,
244 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
245 * VINF_IOM_R3_MMIO_READ may be returned.
246 *
247 * @param pVM Pointer to the VM.
248 * @param pRange The range to write to.
249 * @param GCPhys The physical address to start writing.
250 * @param pvValue Where to store the value.
251 * @param cbValue The size of the value to write.
252 */
253static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
254{
255 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
256 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
257 VERR_IOM_MMIO_IPE_1);
258 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
259 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
260 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
261 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
262
263 /*
264 * Do debug stop if requested.
265 */
266 int rc = VINF_SUCCESS; NOREF(pVM);
267#ifdef VBOX_STRICT
268 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
269 {
270# ifdef IN_RING3
271 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
272 R3STRING(pRange->pszDesc)));
273 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
274 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
275 if (rc == VERR_DBGF_NOT_ATTACHED)
276 rc = VINF_SUCCESS;
277# else
278 return VINF_IOM_R3_MMIO_WRITE;
279# endif
280 }
281#endif
282
283 /*
284 * Check if we should ignore the write.
285 */
286 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
287 {
288 Assert(cbValue != 4 || (GCPhys & 3));
289 return VINF_SUCCESS;
290 }
291 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
292 {
293 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
294 return VINF_SUCCESS;
295 }
296
297 /*
298 * Split and conquer.
299 */
300 for (;;)
301 {
302 unsigned const offAccess = GCPhys & 3;
303 unsigned cbThisPart = 4 - offAccess;
304 if (cbThisPart > cbValue)
305 cbThisPart = cbValue;
306
307 /*
308 * Get the missing bits (if any).
309 */
310 uint32_t u32MissingValue = 0;
311 if (fReadMissing && cbThisPart != 4)
312 {
313 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
314 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
315 switch (rc2)
316 {
317 case VINF_SUCCESS:
318 break;
319 case VINF_IOM_MMIO_UNUSED_FF:
320 u32MissingValue = UINT32_C(0xffffffff);
321 break;
322 case VINF_IOM_MMIO_UNUSED_00:
323 u32MissingValue = 0;
324 break;
325 case VINF_IOM_R3_MMIO_READ:
326 case VINF_IOM_R3_MMIO_READ_WRITE:
327 case VINF_IOM_R3_MMIO_WRITE:
328 /** @todo What if we've split a transfer and already read
329 * something? Since writes generally have sideeffects we
330 * could be kind of screwed here...
331 *
332 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
333 * to REM for MMIO accesses (like may currently do). */
334
335 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
336 return rc2;
337 default:
338 if (RT_FAILURE(rc2))
339 {
340 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
341 return rc2;
342 }
343 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
344 if (rc == VINF_SUCCESS || rc2 < rc)
345 rc = rc2;
346 break;
347 }
348 }
349
350 /*
351 * Merge missing and given bits.
352 */
353 uint32_t u32GivenMask;
354 uint32_t u32GivenValue;
355 switch (cbThisPart)
356 {
357 case 1:
358 u32GivenValue = *(uint8_t const *)pvValue;
359 u32GivenMask = UINT32_C(0x000000ff);
360 break;
361 case 2:
362 u32GivenValue = *(uint16_t const *)pvValue;
363 u32GivenMask = UINT32_C(0x0000ffff);
364 break;
365 case 3:
366 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
367 ((uint8_t const *)pvValue)[2], 0);
368 u32GivenMask = UINT32_C(0x00ffffff);
369 break;
370 case 4:
371 u32GivenValue = *(uint32_t const *)pvValue;
372 u32GivenMask = UINT32_C(0xffffffff);
373 break;
374 default:
375 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
376 }
377 if (offAccess)
378 {
379 u32GivenValue <<= offAccess * 8;
380 u32GivenMask <<= offAccess * 8;
381 }
382
383 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
384 | (u32GivenValue & u32GivenMask);
385
386 /*
387 * Do DWORD write to the device.
388 */
389 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
390 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
391 switch (rc2)
392 {
393 case VINF_SUCCESS:
394 break;
395 case VINF_IOM_R3_MMIO_READ:
396 case VINF_IOM_R3_MMIO_READ_WRITE:
397 case VINF_IOM_R3_MMIO_WRITE:
398 /** @todo What if we've split a transfer and already read
399 * something? Since reads can have sideeffects we could be
400 * kind of screwed here...
401 *
402 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
403 * to REM for MMIO accesses (like may currently do). */
404 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
405 return rc2;
406 default:
407 if (RT_FAILURE(rc2))
408 {
409 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
410 return rc2;
411 }
412 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
413 if (rc == VINF_SUCCESS || rc2 < rc)
414 rc = rc2;
415 break;
416 }
417
418 /*
419 * Advance.
420 */
421 cbValue -= cbThisPart;
422 if (!cbValue)
423 break;
424 GCPhys += cbThisPart;
425 pvValue = (uint8_t const *)pvValue + cbThisPart;
426 }
427
428 return rc;
429}
430
431
432
433
434/**
435 * Wrapper which does the write and updates range statistics when such are enabled.
436 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
437 */
438static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
439 const void *pvData, unsigned cb)
440{
441#ifdef VBOX_WITH_STATISTICS
442 int rcSem = IOM_LOCK_SHARED(pVM);
443 if (rcSem == VERR_SEM_BUSY)
444 return VINF_IOM_R3_MMIO_WRITE;
445 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
446 if (!pStats)
447# ifdef IN_RING3
448 return VERR_NO_MEMORY;
449# else
450 return VINF_IOM_R3_MMIO_WRITE;
451# endif
452 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
453#endif
454
455 VBOXSTRICTRC rcStrict;
456 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
457 {
458 if ( (cb == 4 && !(GCPhysFault & 3))
459 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
460 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
461 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
462 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
463 else
464 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
465 }
466 else
467 rcStrict = VINF_SUCCESS;
468
469 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
470 STAM_COUNTER_INC(&pStats->Accesses);
471 return rcStrict;
472}
473
474
475/**
476 * Deals with complicated MMIO reads.
477 *
478 * Complicated means unaligned or non-dword/qword sized accesses depending on
479 * the MMIO region's access mode flags.
480 *
481 * @returns Strict VBox status code. Any EM scheduling status code,
482 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
483 * VINF_IOM_R3_MMIO_WRITE may be returned.
484 *
485 * @param pVM Pointer to the VM.
486 * @param pRange The range to read from.
487 * @param GCPhys The physical address to start reading.
488 * @param pvValue Where to store the value.
489 * @param cbValue The size of the value to read.
490 */
491static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
492{
493 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
494 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
495 VERR_IOM_MMIO_IPE_1);
496 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
497 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
498
499 /*
500 * Do debug stop if requested.
501 */
502 int rc = VINF_SUCCESS; NOREF(pVM);
503#ifdef VBOX_STRICT
504 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
505 {
506# ifdef IN_RING3
507 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
508 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
509 if (rc == VERR_DBGF_NOT_ATTACHED)
510 rc = VINF_SUCCESS;
511# else
512 return VINF_IOM_R3_MMIO_READ;
513# endif
514 }
515#endif
516
517 /*
518 * Split and conquer.
519 */
520 for (;;)
521 {
522 /*
523 * Do DWORD read from the device.
524 */
525 uint32_t u32Value;
526 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
527 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
528 switch (rc2)
529 {
530 case VINF_SUCCESS:
531 break;
532 case VINF_IOM_MMIO_UNUSED_FF:
533 u32Value = UINT32_C(0xffffffff);
534 break;
535 case VINF_IOM_MMIO_UNUSED_00:
536 u32Value = 0;
537 break;
538 case VINF_IOM_R3_MMIO_READ:
539 case VINF_IOM_R3_MMIO_READ_WRITE:
540 case VINF_IOM_R3_MMIO_WRITE:
541 /** @todo What if we've split a transfer and already read
542 * something? Since reads can have sideeffects we could be
543 * kind of screwed here... */
544 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
545 return rc2;
546 default:
547 if (RT_FAILURE(rc2))
548 {
549 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
550 return rc2;
551 }
552 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
553 if (rc == VINF_SUCCESS || rc2 < rc)
554 rc = rc2;
555 break;
556 }
557 u32Value >>= (GCPhys & 3) * 8;
558
559 /*
560 * Write what we've read.
561 */
562 unsigned cbThisPart = 4 - (GCPhys & 3);
563 if (cbThisPart > cbValue)
564 cbThisPart = cbValue;
565
566 switch (cbThisPart)
567 {
568 case 1:
569 *(uint8_t *)pvValue = (uint8_t)u32Value;
570 break;
571 case 2:
572 *(uint16_t *)pvValue = (uint16_t)u32Value;
573 break;
574 case 3:
575 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
576 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
577 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
578 break;
579 case 4:
580 *(uint32_t *)pvValue = u32Value;
581 break;
582 }
583
584 /*
585 * Advance.
586 */
587 cbValue -= cbThisPart;
588 if (!cbValue)
589 break;
590 GCPhys += cbThisPart;
591 pvValue = (uint8_t *)pvValue + cbThisPart;
592 }
593
594 return rc;
595}
596
597
598/**
599 * Implements VINF_IOM_MMIO_UNUSED_FF.
600 *
601 * @returns VINF_SUCCESS.
602 * @param pvValue Where to store the zeros.
603 * @param cbValue How many bytes to read.
604 */
605static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
606{
607 switch (cbValue)
608 {
609 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
610 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
611 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
612 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
613 default:
614 {
615 uint8_t *pb = (uint8_t *)pvValue;
616 while (cbValue--)
617 *pb++ = UINT8_C(0xff);
618 break;
619 }
620 }
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Implements VINF_IOM_MMIO_UNUSED_00.
627 *
628 * @returns VINF_SUCCESS.
629 * @param pvValue Where to store the zeros.
630 * @param cbValue How many bytes to read.
631 */
632static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
633{
634 switch (cbValue)
635 {
636 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
637 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
638 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
639 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
640 default:
641 {
642 uint8_t *pb = (uint8_t *)pvValue;
643 while (cbValue--)
644 *pb++ = UINT8_C(0x00);
645 break;
646 }
647 }
648 return VINF_SUCCESS;
649}
650
651
652/**
653 * Wrapper which does the read and updates range statistics when such are enabled.
654 */
655DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
656 void *pvValue, unsigned cbValue)
657{
658#ifdef VBOX_WITH_STATISTICS
659 int rcSem = IOM_LOCK_SHARED(pVM);
660 if (rcSem == VERR_SEM_BUSY)
661 return VINF_IOM_R3_MMIO_READ;
662 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
663 if (!pStats)
664# ifdef IN_RING3
665 return VERR_NO_MEMORY;
666# else
667 return VINF_IOM_R3_MMIO_READ;
668# endif
669 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
670#endif
671
672 VBOXSTRICTRC rcStrict;
673 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
674 {
675 if ( ( cbValue == 4
676 && !(GCPhys & 3))
677 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
678 || ( cbValue == 8
679 && !(GCPhys & 7)
680 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
681 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
682 pvValue, cbValue);
683 else
684 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
685 }
686 else
687 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
688 if (rcStrict != VINF_SUCCESS)
689 {
690 switch (VBOXSTRICTRC_VAL(rcStrict))
691 {
692 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
693 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
694 }
695 }
696
697 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
698 STAM_COUNTER_INC(&pStats->Accesses);
699 return rcStrict;
700}
701
702
703/**
704 * Internal - statistics only.
705 */
706DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
707{
708#ifdef VBOX_WITH_STATISTICS
709 switch (cb)
710 {
711 case 1:
712 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
713 break;
714 case 2:
715 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
716 break;
717 case 4:
718 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
719 break;
720 case 8:
721 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
722 break;
723 default:
724 /* No way. */
725 AssertMsgFailed(("Invalid data length %d\n", cb));
726 break;
727 }
728#else
729 NOREF(pVM); NOREF(cb);
730#endif
731}
732
733
734#ifndef IEM_USE_IEM_INSTEAD
735
736/**
737 * MOV reg, mem (read)
738 * MOVZX reg, mem (read)
739 * MOVSX reg, mem (read)
740 *
741 * @returns VBox status code.
742 *
743 * @param pVM The virtual machine.
744 * @param pVCpu Pointer to the virtual CPU structure of the caller.
745 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
746 * @param pCpu Disassembler CPU state.
747 * @param pRange Pointer MMIO range.
748 * @param GCPhysFault The GC physical address corresponding to pvFault.
749 */
750static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
751 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
752{
753 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
754
755 /*
756 * Get the data size from parameter 2,
757 * and call the handler function to get the data.
758 */
759 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
760 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
761
762 uint64_t u64Data = 0;
763 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
764 if (rc == VINF_SUCCESS)
765 {
766 /*
767 * Do sign extension for MOVSX.
768 */
769 /** @todo checkup MOVSX implementation! */
770 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
771 {
772 if (cb == 1)
773 {
774 /* DWORD <- BYTE */
775 int64_t iData = (int8_t)u64Data;
776 u64Data = (uint64_t)iData;
777 }
778 else
779 {
780 /* DWORD <- WORD */
781 int64_t iData = (int16_t)u64Data;
782 u64Data = (uint64_t)iData;
783 }
784 }
785
786 /*
787 * Store the result to register (parameter 1).
788 */
789 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
790 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
791 }
792
793 if (rc == VINF_SUCCESS)
794 iomMMIOStatLength(pVM, cb);
795 return rc;
796}
797
798
799/**
800 * MOV mem, reg|imm (write)
801 *
802 * @returns VBox status code.
803 *
804 * @param pVM The virtual machine.
805 * @param pVCpu Pointer to the virtual CPU structure of the caller.
806 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
807 * @param pCpu Disassembler CPU state.
808 * @param pRange Pointer MMIO range.
809 * @param GCPhysFault The GC physical address corresponding to pvFault.
810 */
811static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
812 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
813{
814 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
815
816 /*
817 * Get data to write from second parameter,
818 * and call the callback to write it.
819 */
820 unsigned cb = 0;
821 uint64_t u64Data = 0;
822 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
823 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
824
825 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
826 if (rc == VINF_SUCCESS)
827 iomMMIOStatLength(pVM, cb);
828 return rc;
829}
830
831
832/** Wrapper for reading virtual memory. */
833DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
834{
835 /* Note: This will fail in R0 or RC if it hits an access handler. That
836 isn't a problem though since the operation can be restarted in REM. */
837#ifdef IN_RC
838 NOREF(pVCpu);
839 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
840 /* Page may be protected and not directly accessible. */
841 if (rc == VERR_ACCESS_DENIED)
842 rc = VINF_IOM_R3_IOPORT_WRITE;
843 return rc;
844#else
845 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
846#endif
847}
848
849
850/** Wrapper for writing virtual memory. */
851DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
852{
853 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
854 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
855 * as well since we're not behind the pgm lock and handler may change between calls.
856 *
857 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
858 * the state of some shadowed structures. */
859#if defined(IN_RING0) || defined(IN_RC)
860 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
861#else
862 NOREF(pCtxCore);
863 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
864#endif
865}
866
867
868#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
869/**
870 * [REP] MOVSB
871 * [REP] MOVSW
872 * [REP] MOVSD
873 *
874 * Restricted implementation.
875 *
876 *
877 * @returns VBox status code.
878 *
879 * @param pVM The virtual machine.
880 * @param uErrorCode CPU Error code.
881 * @param pRegFrame Trap register frame.
882 * @param GCPhysFault The GC physical address corresponding to pvFault.
883 * @param pCpu Disassembler CPU state.
884 * @param pRange Pointer MMIO range.
885 * @param ppStat Which sub-sample to attribute this call to.
886 */
887static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
888 PSTAMPROFILE *ppStat)
889{
890 /*
891 * We do not support segment prefixes or REPNE.
892 */
893 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
894 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
895
896 PVMCPU pVCpu = VMMGetCpu(pVM);
897
898 /*
899 * Get bytes/words/dwords/qword count to copy.
900 */
901 uint32_t cTransfers = 1;
902 if (pCpu->fPrefix & DISPREFIX_REP)
903 {
904#ifndef IN_RC
905 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
906 && pRegFrame->rcx >= _4G)
907 return VINF_EM_RAW_EMULATE_INSTR;
908#endif
909
910 cTransfers = pRegFrame->ecx;
911 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
912 cTransfers &= 0xffff;
913
914 if (!cTransfers)
915 return VINF_SUCCESS;
916 }
917
918 /* Get the current privilege level. */
919 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
920
921 /*
922 * Get data size.
923 */
924 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
925 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
926 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
927
928#ifdef VBOX_WITH_STATISTICS
929 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
930 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
931#endif
932
933/** @todo re-evaluate on page boundaries. */
934
935 RTGCPHYS Phys = GCPhysFault;
936 int rc;
937 if (fWriteAccess)
938 {
939 /*
940 * Write operation: [Mem] -> [MMIO]
941 * ds:esi (Virt Src) -> es:edi (Phys Dst)
942 */
943 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
944
945 /* Check callback. */
946 if (!pRange->CTX_SUFF(pfnWriteCallback))
947 return VINF_IOM_R3_MMIO_WRITE;
948
949 /* Convert source address ds:esi. */
950 RTGCUINTPTR pu8Virt;
951 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
952 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
953 (PRTGCPTR)&pu8Virt);
954 if (RT_SUCCESS(rc))
955 {
956
957 /* Access verification first; we currently can't recover properly from traps inside this instruction */
958 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
959 if (rc != VINF_SUCCESS)
960 {
961 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
962 return VINF_EM_RAW_EMULATE_INSTR;
963 }
964
965#ifdef IN_RC
966 MMGCRamRegisterTrapHandler(pVM);
967#endif
968
969 /* copy loop. */
970 while (cTransfers)
971 {
972 uint32_t u32Data = 0;
973 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
974 if (rc != VINF_SUCCESS)
975 break;
976 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
977 if (rc != VINF_SUCCESS)
978 break;
979
980 pu8Virt += offIncrement;
981 Phys += offIncrement;
982 pRegFrame->rsi += offIncrement;
983 pRegFrame->rdi += offIncrement;
984 cTransfers--;
985 }
986#ifdef IN_RC
987 MMGCRamDeregisterTrapHandler(pVM);
988#endif
989 /* Update ecx. */
990 if (pCpu->fPrefix & DISPREFIX_REP)
991 pRegFrame->ecx = cTransfers;
992 }
993 else
994 rc = VINF_IOM_R3_MMIO_READ_WRITE;
995 }
996 else
997 {
998 /*
999 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
1000 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
1001 */
1002 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
1003
1004 /* Check callback. */
1005 if (!pRange->CTX_SUFF(pfnReadCallback))
1006 return VINF_IOM_R3_MMIO_READ;
1007
1008 /* Convert destination address. */
1009 RTGCUINTPTR pu8Virt;
1010 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1011 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1012 (RTGCPTR *)&pu8Virt);
1013 if (RT_FAILURE(rc))
1014 return VINF_IOM_R3_MMIO_READ;
1015
1016 /* Check if destination address is MMIO. */
1017 PIOMMMIORANGE pMMIODst;
1018 RTGCPHYS PhysDst;
1019 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1020 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1021 if ( RT_SUCCESS(rc)
1022 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1023 {
1024 /** @todo implement per-device locks for MMIO access. */
1025 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1026
1027 /*
1028 * Extra: [MMIO] -> [MMIO]
1029 */
1030 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1031 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1032 {
1033 iomMmioReleaseRange(pVM, pRange);
1034 return VINF_IOM_R3_MMIO_READ_WRITE;
1035 }
1036
1037 /* copy loop. */
1038 while (cTransfers)
1039 {
1040 uint32_t u32Data;
1041 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1042 if (rc != VINF_SUCCESS)
1043 break;
1044 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1045 if (rc != VINF_SUCCESS)
1046 break;
1047
1048 Phys += offIncrement;
1049 PhysDst += offIncrement;
1050 pRegFrame->rsi += offIncrement;
1051 pRegFrame->rdi += offIncrement;
1052 cTransfers--;
1053 }
1054 iomMmioReleaseRange(pVM, pRange);
1055 }
1056 else
1057 {
1058 /*
1059 * Normal: [MMIO] -> [Mem]
1060 */
1061 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1062 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1063 if (rc != VINF_SUCCESS)
1064 {
1065 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1066 return VINF_EM_RAW_EMULATE_INSTR;
1067 }
1068
1069 /* copy loop. */
1070#ifdef IN_RC
1071 MMGCRamRegisterTrapHandler(pVM);
1072#endif
1073 while (cTransfers)
1074 {
1075 uint32_t u32Data;
1076 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1077 if (rc != VINF_SUCCESS)
1078 break;
1079 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1080 if (rc != VINF_SUCCESS)
1081 {
1082 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1083 break;
1084 }
1085
1086 pu8Virt += offIncrement;
1087 Phys += offIncrement;
1088 pRegFrame->rsi += offIncrement;
1089 pRegFrame->rdi += offIncrement;
1090 cTransfers--;
1091 }
1092#ifdef IN_RC
1093 MMGCRamDeregisterTrapHandler(pVM);
1094#endif
1095 }
1096
1097 /* Update ecx on exit. */
1098 if (pCpu->fPrefix & DISPREFIX_REP)
1099 pRegFrame->ecx = cTransfers;
1100 }
1101
1102 /* work statistics. */
1103 if (rc == VINF_SUCCESS)
1104 iomMMIOStatLength(pVM, cb);
1105 NOREF(ppStat);
1106 return rc;
1107}
1108#endif /* IOM_WITH_MOVS_SUPPORT */
1109
1110
1111/**
1112 * Gets the address / opcode mask corresponding to the given CPU mode.
1113 *
1114 * @returns Mask.
1115 * @param enmCpuMode CPU mode.
1116 */
1117static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1118{
1119 switch (enmCpuMode)
1120 {
1121 case DISCPUMODE_16BIT: return UINT16_MAX;
1122 case DISCPUMODE_32BIT: return UINT32_MAX;
1123 case DISCPUMODE_64BIT: return UINT64_MAX;
1124 default:
1125 AssertFailedReturn(UINT32_MAX);
1126 }
1127}
1128
1129
1130/**
1131 * [REP] STOSB
1132 * [REP] STOSW
1133 * [REP] STOSD
1134 *
1135 * Restricted implementation.
1136 *
1137 *
1138 * @returns VBox status code.
1139 *
1140 * @param pVM The virtual machine.
1141 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1142 * @param pRegFrame Trap register frame.
1143 * @param GCPhysFault The GC physical address corresponding to pvFault.
1144 * @param pCpu Disassembler CPU state.
1145 * @param pRange Pointer MMIO range.
1146 */
1147static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1148 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1149{
1150 /*
1151 * We do not support segment prefixes or REPNE..
1152 */
1153 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1154 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1155
1156 /*
1157 * Get bytes/words/dwords/qwords count to copy.
1158 */
1159 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1160 RTGCUINTREG cTransfers = 1;
1161 if (pCpu->fPrefix & DISPREFIX_REP)
1162 {
1163#ifndef IN_RC
1164 if ( CPUMIsGuestIn64BitCode(pVCpu)
1165 && pRegFrame->rcx >= _4G)
1166 return VINF_EM_RAW_EMULATE_INSTR;
1167#endif
1168
1169 cTransfers = pRegFrame->rcx & fAddrMask;
1170 if (!cTransfers)
1171 return VINF_SUCCESS;
1172 }
1173
1174/** @todo r=bird: bounds checks! */
1175
1176 /*
1177 * Get data size.
1178 */
1179 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1180 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1181 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1182
1183#ifdef VBOX_WITH_STATISTICS
1184 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1185 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1186#endif
1187
1188
1189 RTGCPHYS Phys = GCPhysFault;
1190 int rc;
1191 if ( pRange->CTX_SUFF(pfnFillCallback)
1192 && cb <= 4 /* can only fill 32-bit values */)
1193 {
1194 /*
1195 * Use the fill callback.
1196 */
1197 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1198 if (offIncrement > 0)
1199 {
1200 /* addr++ variant. */
1201 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1202 pRegFrame->eax, cb, cTransfers);
1203 if (rc == VINF_SUCCESS)
1204 {
1205 /* Update registers. */
1206 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1207 | (pRegFrame->rdi & ~fAddrMask);
1208 if (pCpu->fPrefix & DISPREFIX_REP)
1209 pRegFrame->rcx &= ~fAddrMask;
1210 }
1211 }
1212 else
1213 {
1214 /* addr-- variant. */
1215 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1216 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1217 pRegFrame->eax, cb, cTransfers);
1218 if (rc == VINF_SUCCESS)
1219 {
1220 /* Update registers. */
1221 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1222 | (pRegFrame->rdi & ~fAddrMask);
1223 if (pCpu->fPrefix & DISPREFIX_REP)
1224 pRegFrame->rcx &= ~fAddrMask;
1225 }
1226 }
1227 }
1228 else
1229 {
1230 /*
1231 * Use the write callback.
1232 */
1233 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1234 uint64_t u64Data = pRegFrame->rax;
1235
1236 /* fill loop. */
1237 do
1238 {
1239 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1240 if (rc != VINF_SUCCESS)
1241 break;
1242
1243 Phys += offIncrement;
1244 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1245 | (pRegFrame->rdi & ~fAddrMask);
1246 cTransfers--;
1247 } while (cTransfers);
1248
1249 /* Update rcx on exit. */
1250 if (pCpu->fPrefix & DISPREFIX_REP)
1251 pRegFrame->rcx = (cTransfers & fAddrMask)
1252 | (pRegFrame->rcx & ~fAddrMask);
1253 }
1254
1255 /*
1256 * Work statistics and return.
1257 */
1258 if (rc == VINF_SUCCESS)
1259 iomMMIOStatLength(pVM, cb);
1260 return rc;
1261}
1262
1263
1264/**
1265 * [REP] LODSB
1266 * [REP] LODSW
1267 * [REP] LODSD
1268 *
1269 * Restricted implementation.
1270 *
1271 *
1272 * @returns VBox status code.
1273 *
1274 * @param pVM The virtual machine.
1275 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1276 * @param pRegFrame Trap register frame.
1277 * @param GCPhysFault The GC physical address corresponding to pvFault.
1278 * @param pCpu Disassembler CPU state.
1279 * @param pRange Pointer MMIO range.
1280 */
1281static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1282 PIOMMMIORANGE pRange)
1283{
1284 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1285
1286 /*
1287 * We do not support segment prefixes or REP*.
1288 */
1289 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1290 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1291
1292 /*
1293 * Get data size.
1294 */
1295 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1296 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1297 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1298
1299 /*
1300 * Perform read.
1301 */
1302 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1303 if (rc == VINF_SUCCESS)
1304 {
1305 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1306 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1307 | (pRegFrame->rsi & ~fAddrMask);
1308 }
1309
1310 /*
1311 * Work statistics and return.
1312 */
1313 if (rc == VINF_SUCCESS)
1314 iomMMIOStatLength(pVM, cb);
1315 return rc;
1316}
1317
1318
1319/**
1320 * CMP [MMIO], reg|imm
1321 * CMP reg|imm, [MMIO]
1322 *
1323 * Restricted implementation.
1324 *
1325 *
1326 * @returns VBox status code.
1327 *
1328 * @param pVM The virtual machine.
1329 * @param pRegFrame Trap register frame.
1330 * @param GCPhysFault The GC physical address corresponding to pvFault.
1331 * @param pCpu Disassembler CPU state.
1332 * @param pRange Pointer MMIO range.
1333 */
1334static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1335 PIOMMMIORANGE pRange)
1336{
1337 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1338
1339 /*
1340 * Get the operands.
1341 */
1342 unsigned cb = 0;
1343 uint64_t uData1 = 0;
1344 uint64_t uData2 = 0;
1345 int rc;
1346 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1347 /* cmp reg, [MMIO]. */
1348 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1349 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1350 /* cmp [MMIO], reg|imm. */
1351 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1352 else
1353 {
1354 AssertMsgFailed(("Disassember CMP problem..\n"));
1355 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1356 }
1357
1358 if (rc == VINF_SUCCESS)
1359 {
1360#if HC_ARCH_BITS == 32
1361 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1362 if (cb > 4)
1363 return VINF_IOM_R3_MMIO_READ_WRITE;
1364#endif
1365 /* Emulate CMP and update guest flags. */
1366 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1367 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1368 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1369 iomMMIOStatLength(pVM, cb);
1370 }
1371
1372 return rc;
1373}
1374
1375
1376/**
1377 * AND [MMIO], reg|imm
1378 * AND reg, [MMIO]
1379 * OR [MMIO], reg|imm
1380 * OR reg, [MMIO]
1381 *
1382 * Restricted implementation.
1383 *
1384 *
1385 * @returns VBox status code.
1386 *
1387 * @param pVM The virtual machine.
1388 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1389 * @param pRegFrame Trap register frame.
1390 * @param GCPhysFault The GC physical address corresponding to pvFault.
1391 * @param pCpu Disassembler CPU state.
1392 * @param pRange Pointer MMIO range.
1393 * @param pfnEmulate Instruction emulation function.
1394 */
1395static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1396 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1397{
1398 unsigned cb = 0;
1399 uint64_t uData1 = 0;
1400 uint64_t uData2 = 0;
1401 bool fAndWrite;
1402 int rc;
1403
1404#ifdef LOG_ENABLED
1405 const char *pszInstr;
1406
1407 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1408 pszInstr = "Xor";
1409 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1410 pszInstr = "Or";
1411 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1412 pszInstr = "And";
1413 else
1414 pszInstr = "OrXorAnd??";
1415#endif
1416
1417 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1418 {
1419#if HC_ARCH_BITS == 32
1420 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1421 if (cb > 4)
1422 return VINF_IOM_R3_MMIO_READ_WRITE;
1423#endif
1424 /* and reg, [MMIO]. */
1425 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1426 fAndWrite = false;
1427 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1428 }
1429 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1430 {
1431#if HC_ARCH_BITS == 32
1432 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1433 if (cb > 4)
1434 return VINF_IOM_R3_MMIO_READ_WRITE;
1435#endif
1436 /* and [MMIO], reg|imm. */
1437 fAndWrite = true;
1438 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1439 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1440 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1441 else
1442 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1443 }
1444 else
1445 {
1446 AssertMsgFailed(("Disassember AND problem..\n"));
1447 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1448 }
1449
1450 if (rc == VINF_SUCCESS)
1451 {
1452 /* Emulate AND and update guest flags. */
1453 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1454
1455 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1456
1457 if (fAndWrite)
1458 /* Store result to MMIO. */
1459 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1460 else
1461 {
1462 /* Store result to register. */
1463 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1464 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1465 }
1466 if (rc == VINF_SUCCESS)
1467 {
1468 /* Update guest's eflags and finish. */
1469 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1470 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1471 iomMMIOStatLength(pVM, cb);
1472 }
1473 }
1474
1475 return rc;
1476}
1477
1478
1479/**
1480 * TEST [MMIO], reg|imm
1481 * TEST reg, [MMIO]
1482 *
1483 * Restricted implementation.
1484 *
1485 *
1486 * @returns VBox status code.
1487 *
1488 * @param pVM The virtual machine.
1489 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1490 * @param pRegFrame Trap register frame.
1491 * @param GCPhysFault The GC physical address corresponding to pvFault.
1492 * @param pCpu Disassembler CPU state.
1493 * @param pRange Pointer MMIO range.
1494 */
1495static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1496 PIOMMMIORANGE pRange)
1497{
1498 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1499
1500 unsigned cb = 0;
1501 uint64_t uData1 = 0;
1502 uint64_t uData2 = 0;
1503 int rc;
1504
1505 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1506 {
1507 /* and test, [MMIO]. */
1508 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1509 }
1510 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1511 {
1512 /* test [MMIO], reg|imm. */
1513 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1514 }
1515 else
1516 {
1517 AssertMsgFailed(("Disassember TEST problem..\n"));
1518 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1519 }
1520
1521 if (rc == VINF_SUCCESS)
1522 {
1523#if HC_ARCH_BITS == 32
1524 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1525 if (cb > 4)
1526 return VINF_IOM_R3_MMIO_READ_WRITE;
1527#endif
1528
1529 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1530 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1531 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1532 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1533 iomMMIOStatLength(pVM, cb);
1534 }
1535
1536 return rc;
1537}
1538
1539
1540/**
1541 * BT [MMIO], reg|imm
1542 *
1543 * Restricted implementation.
1544 *
1545 *
1546 * @returns VBox status code.
1547 *
1548 * @param pVM The virtual machine.
1549 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1550 * @param pRegFrame Trap register frame.
1551 * @param GCPhysFault The GC physical address corresponding to pvFault.
1552 * @param pCpu Disassembler CPU state.
1553 * @param pRange Pointer MMIO range.
1554 */
1555static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1556 PIOMMMIORANGE pRange)
1557{
1558 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1559
1560 uint64_t uBit = 0;
1561 uint64_t uData = 0;
1562 unsigned cbIgnored;
1563
1564 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1565 {
1566 AssertMsgFailed(("Disassember BT problem..\n"));
1567 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1568 }
1569 /* The size of the memory operand only matters here. */
1570 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1571
1572 /* bt [MMIO], reg|imm. */
1573 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1574 if (rc == VINF_SUCCESS)
1575 {
1576 /* Find the bit inside the faulting address */
1577 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1578 iomMMIOStatLength(pVM, cbData);
1579 }
1580
1581 return rc;
1582}
1583
1584/**
1585 * XCHG [MMIO], reg
1586 * XCHG reg, [MMIO]
1587 *
1588 * Restricted implementation.
1589 *
1590 *
1591 * @returns VBox status code.
1592 *
1593 * @param pVM The virtual machine.
1594 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1595 * @param pRegFrame Trap register frame.
1596 * @param GCPhysFault The GC physical address corresponding to pvFault.
1597 * @param pCpu Disassembler CPU state.
1598 * @param pRange Pointer MMIO range.
1599 */
1600static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1601 PIOMMMIORANGE pRange)
1602{
1603 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1604 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1605 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1606 return VINF_IOM_R3_MMIO_READ_WRITE;
1607
1608 int rc;
1609 unsigned cb = 0;
1610 uint64_t uData1 = 0;
1611 uint64_t uData2 = 0;
1612 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1613 {
1614 /* xchg reg, [MMIO]. */
1615 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1616 if (rc == VINF_SUCCESS)
1617 {
1618 /* Store result to MMIO. */
1619 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1620
1621 if (rc == VINF_SUCCESS)
1622 {
1623 /* Store result to register. */
1624 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1625 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1626 }
1627 else
1628 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1629 }
1630 else
1631 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1632 }
1633 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1634 {
1635 /* xchg [MMIO], reg. */
1636 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1637 if (rc == VINF_SUCCESS)
1638 {
1639 /* Store result to MMIO. */
1640 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1641 if (rc == VINF_SUCCESS)
1642 {
1643 /* Store result to register. */
1644 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1645 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1646 }
1647 else
1648 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1649 }
1650 else
1651 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1652 }
1653 else
1654 {
1655 AssertMsgFailed(("Disassember XCHG problem..\n"));
1656 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1657 }
1658 return rc;
1659}
1660
1661#endif /* !IEM_USE_IEM_INSTEAD */
1662
1663/**
1664 * \#PF Handler callback for MMIO ranges.
1665 *
1666 * @returns VBox status code (appropriate for GC return).
1667 * @param pVM Pointer to the VM.
1668 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1669 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1670 * any error code (the EPT misconfig hack).
1671 * @param pCtxCore Trap register frame.
1672 * @param GCPhysFault The GC physical address corresponding to pvFault.
1673 * @param pvUser Pointer to the MMIO ring-3 range entry.
1674 */
1675static VBOXSTRICTRC iomMMIOHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1676{
1677 int rc = IOM_LOCK_SHARED(pVM);
1678#ifndef IN_RING3
1679 if (rc == VERR_SEM_BUSY)
1680 return VINF_IOM_R3_MMIO_READ_WRITE;
1681#endif
1682 AssertRC(rc);
1683
1684 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1685 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1686
1687 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1688 Assert(pRange);
1689 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1690 iomMmioRetainRange(pRange);
1691#ifndef VBOX_WITH_STATISTICS
1692 IOM_UNLOCK_SHARED(pVM);
1693
1694#else
1695 /*
1696 * Locate the statistics.
1697 */
1698 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1699 if (!pStats)
1700 {
1701 iomMmioReleaseRange(pVM, pRange);
1702# ifdef IN_RING3
1703 return VERR_NO_MEMORY;
1704# else
1705 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1706 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1707 return VINF_IOM_R3_MMIO_READ_WRITE;
1708# endif
1709 }
1710#endif
1711
1712#ifndef IN_RING3
1713 /*
1714 * Should we defer the request right away? This isn't usually the case, so
1715 * do the simple test first and the try deal with uErrorCode being N/A.
1716 */
1717 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1718 || !pRange->CTX_SUFF(pfnReadCallback))
1719 && ( uErrorCode == UINT32_MAX
1720 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1721 : uErrorCode & X86_TRAP_PF_RW
1722 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1723 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1724 )
1725 )
1726 )
1727 {
1728 if (uErrorCode & X86_TRAP_PF_RW)
1729 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1730 else
1731 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1732
1733 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1734 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1735 iomMmioReleaseRange(pVM, pRange);
1736 return VINF_IOM_R3_MMIO_READ_WRITE;
1737 }
1738#endif /* !IN_RING3 */
1739
1740 /*
1741 * Retain the range and do locking.
1742 */
1743 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1744 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1745 if (rc != VINF_SUCCESS)
1746 {
1747 iomMmioReleaseRange(pVM, pRange);
1748 return rc;
1749 }
1750
1751#ifdef IEM_USE_IEM_INSTEAD
1752
1753 /*
1754 * Let IEM call us back via iomMmioHandler.
1755 */
1756 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1757
1758 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1759 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1760 iomMmioReleaseRange(pVM, pRange);
1761 return rcStrict;
1762
1763#else
1764
1765 /*
1766 * Disassemble the instruction and interpret it.
1767 */
1768 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1769 unsigned cbOp;
1770 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1771 if (RT_FAILURE(rc))
1772 {
1773 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1774 iomMmioReleaseRange(pVM, pRange);
1775 return rc;
1776 }
1777 switch (pDis->pCurInstr->uOpcode)
1778 {
1779 case OP_MOV:
1780 case OP_MOVZX:
1781 case OP_MOVSX:
1782 {
1783 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1784 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1785 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1786 ? uErrorCode & X86_TRAP_PF_RW
1787 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1788 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1789 else
1790 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1791 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1792 break;
1793 }
1794
1795
1796# ifdef IOM_WITH_MOVS_SUPPORT
1797 case OP_MOVSB:
1798 case OP_MOVSWD:
1799 {
1800 if (uErrorCode == UINT32_MAX)
1801 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1802 else
1803 {
1804 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1805 PSTAMPROFILE pStat = NULL;
1806 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1807 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1808 }
1809 break;
1810 }
1811# endif
1812
1813 case OP_STOSB:
1814 case OP_STOSWD:
1815 Assert(uErrorCode & X86_TRAP_PF_RW);
1816 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1817 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1818 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1819 break;
1820
1821 case OP_LODSB:
1822 case OP_LODSWD:
1823 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1824 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1825 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1826 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1827 break;
1828
1829 case OP_CMP:
1830 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1831 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1832 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1833 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1834 break;
1835
1836 case OP_AND:
1837 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1838 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1839 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1840 break;
1841
1842 case OP_OR:
1843 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1844 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1845 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1846 break;
1847
1848 case OP_XOR:
1849 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1850 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1851 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1852 break;
1853
1854 case OP_TEST:
1855 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1856 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1857 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1858 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1859 break;
1860
1861 case OP_BT:
1862 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1863 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1864 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1865 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1866 break;
1867
1868 case OP_XCHG:
1869 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1870 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1871 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1872 break;
1873
1874
1875 /*
1876 * The instruction isn't supported. Hand it on to ring-3.
1877 */
1878 default:
1879 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1880 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1881 break;
1882 }
1883
1884 /*
1885 * On success advance EIP.
1886 */
1887 if (rc == VINF_SUCCESS)
1888 pCtxCore->rip += cbOp;
1889 else
1890 {
1891 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1892# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1893 switch (rc)
1894 {
1895 case VINF_IOM_R3_MMIO_READ:
1896 case VINF_IOM_R3_MMIO_READ_WRITE:
1897 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1898 break;
1899 case VINF_IOM_R3_MMIO_WRITE:
1900 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1901 break;
1902 }
1903# endif
1904 }
1905
1906 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1907 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1908 iomMmioReleaseRange(pVM, pRange);
1909 return rc;
1910#endif /* !IEM_USE_IEM_INSTEAD */
1911}
1912
1913
1914/**
1915 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1916 * \#PF access handler callback for MMIO pages.}
1917 *
1918 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1919 */
1920DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1921 RTGCPHYS GCPhysFault, void *pvUser)
1922{
1923 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1924 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1925 return iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1926}
1927
1928
1929/**
1930 * Physical access handler for MMIO ranges.
1931 *
1932 * @returns VBox status code (appropriate for GC return).
1933 * @param pVM Pointer to the VM.
1934 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1935 * @param uErrorCode CPU Error code.
1936 * @param pCtxCore Trap register frame.
1937 * @param GCPhysFault The GC physical address.
1938 */
1939VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1940{
1941 /*
1942 * We don't have a range here, so look it up before calling the common function.
1943 */
1944 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1945#ifndef IN_RING3
1946 if (rc2 == VERR_SEM_BUSY)
1947 return VINF_IOM_R3_MMIO_READ_WRITE;
1948#endif
1949 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1950 if (RT_UNLIKELY(!pRange))
1951 {
1952 IOM_UNLOCK_SHARED(pVM);
1953 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1954 }
1955 iomMmioRetainRange(pRange);
1956 IOM_UNLOCK_SHARED(pVM);
1957
1958 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1959
1960 iomMmioReleaseRange(pVM, pRange);
1961 return VBOXSTRICTRC_VAL(rcStrict);
1962}
1963
1964
1965/**
1966 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
1967 *
1968 * @remarks The @a pvUser argument points to the MMIO range entry.
1969 */
1970PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
1971 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1972{
1973 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1974 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1975
1976 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1977 AssertPtr(pRange);
1978 NOREF(pvPhys); NOREF(enmOrigin);
1979
1980 /*
1981 * Validate the range.
1982 */
1983 int rc = IOM_LOCK_SHARED(pVM);
1984 AssertRC(rc);
1985 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1986
1987 /*
1988 * Perform locking.
1989 */
1990 iomMmioRetainRange(pRange);
1991 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1992 IOM_UNLOCK_SHARED(pVM);
1993 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1994 if (rcStrict == VINF_SUCCESS)
1995 {
1996 /*
1997 * Perform the access.
1998 */
1999 if (enmAccessType == PGMACCESSTYPE_READ)
2000 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2001 else
2002 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2003
2004 /* Check the return code. */
2005#ifdef IN_RING3
2006 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
2007#else
2008 AssertMsg( rcStrict == VINF_SUCCESS
2009 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
2010 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
2011 || rcStrict == VINF_EM_DBG_STOP
2012 || rcStrict == VINF_EM_DBG_BREAKPOINT
2013 || rcStrict == VINF_EM_OFF
2014 || rcStrict == VINF_EM_SUSPEND
2015 || rcStrict == VINF_EM_RESET
2016 //|| rcStrict == VINF_EM_HALT /* ?? */
2017 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2018 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2019#endif
2020
2021 iomMmioReleaseRange(pVM, pRange);
2022 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2023 }
2024 else
2025 iomMmioReleaseRange(pVM, pRange);
2026 return rcStrict;
2027}
2028
2029
2030#ifdef IN_RING3 /* Only used by REM. */
2031
2032/**
2033 * Reads a MMIO register.
2034 *
2035 * @returns VBox status code.
2036 *
2037 * @param pVM Pointer to the VM.
2038 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2039 * @param GCPhys The physical address to read.
2040 * @param pu32Value Where to store the value read.
2041 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2042 */
2043VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2044{
2045 /* Take the IOM lock before performing any MMIO. */
2046 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2047#ifndef IN_RING3
2048 if (rc == VERR_SEM_BUSY)
2049 return VINF_IOM_R3_MMIO_WRITE;
2050#endif
2051 AssertRC(VBOXSTRICTRC_VAL(rc));
2052#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2053 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2054#endif
2055
2056 /*
2057 * Lookup the current context range node and statistics.
2058 */
2059 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2060 if (!pRange)
2061 {
2062 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2063 IOM_UNLOCK_SHARED(pVM);
2064 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2065 }
2066 iomMmioRetainRange(pRange);
2067#ifndef VBOX_WITH_STATISTICS
2068 IOM_UNLOCK_SHARED(pVM);
2069
2070#else /* VBOX_WITH_STATISTICS */
2071 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2072 if (!pStats)
2073 {
2074 iomMmioReleaseRange(pVM, pRange);
2075# ifdef IN_RING3
2076 return VERR_NO_MEMORY;
2077# else
2078 return VINF_IOM_R3_MMIO_READ;
2079# endif
2080 }
2081 STAM_COUNTER_INC(&pStats->Accesses);
2082#endif /* VBOX_WITH_STATISTICS */
2083
2084 if (pRange->CTX_SUFF(pfnReadCallback))
2085 {
2086 /*
2087 * Perform locking.
2088 */
2089 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2090 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2091 if (rc != VINF_SUCCESS)
2092 {
2093 iomMmioReleaseRange(pVM, pRange);
2094 return rc;
2095 }
2096
2097 /*
2098 * Perform the read and deal with the result.
2099 */
2100 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2101 if ( (cbValue == 4 && !(GCPhys & 3))
2102 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2103 || (cbValue == 8 && !(GCPhys & 7)) )
2104 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2105 pu32Value, (unsigned)cbValue);
2106 else
2107 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2108 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2109 switch (VBOXSTRICTRC_VAL(rc))
2110 {
2111 case VINF_SUCCESS:
2112 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2113 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2114 iomMmioReleaseRange(pVM, pRange);
2115 return rc;
2116#ifndef IN_RING3
2117 case VINF_IOM_R3_MMIO_READ:
2118 case VINF_IOM_R3_MMIO_READ_WRITE:
2119 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2120#endif
2121 default:
2122 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2123 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2124 iomMmioReleaseRange(pVM, pRange);
2125 return rc;
2126
2127 case VINF_IOM_MMIO_UNUSED_00:
2128 iomMMIODoRead00s(pu32Value, cbValue);
2129 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2130 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2131 iomMmioReleaseRange(pVM, pRange);
2132 return VINF_SUCCESS;
2133
2134 case VINF_IOM_MMIO_UNUSED_FF:
2135 iomMMIODoReadFFs(pu32Value, cbValue);
2136 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2137 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2138 iomMmioReleaseRange(pVM, pRange);
2139 return VINF_SUCCESS;
2140 }
2141 /* not reached */
2142 }
2143#ifndef IN_RING3
2144 if (pRange->pfnReadCallbackR3)
2145 {
2146 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2147 iomMmioReleaseRange(pVM, pRange);
2148 return VINF_IOM_R3_MMIO_READ;
2149 }
2150#endif
2151
2152 /*
2153 * Unassigned memory - this is actually not supposed t happen...
2154 */
2155 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2156 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2157 iomMMIODoReadFFs(pu32Value, cbValue);
2158 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2159 iomMmioReleaseRange(pVM, pRange);
2160 return VINF_SUCCESS;
2161}
2162
2163
2164/**
2165 * Writes to a MMIO register.
2166 *
2167 * @returns VBox status code.
2168 *
2169 * @param pVM Pointer to the VM.
2170 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2171 * @param GCPhys The physical address to write to.
2172 * @param u32Value The value to write.
2173 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2174 */
2175VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2176{
2177 /* Take the IOM lock before performing any MMIO. */
2178 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2179#ifndef IN_RING3
2180 if (rc == VERR_SEM_BUSY)
2181 return VINF_IOM_R3_MMIO_WRITE;
2182#endif
2183 AssertRC(VBOXSTRICTRC_VAL(rc));
2184#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2185 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2186#endif
2187
2188 /*
2189 * Lookup the current context range node.
2190 */
2191 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2192 if (!pRange)
2193 {
2194 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2195 IOM_UNLOCK_SHARED(pVM);
2196 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2197 }
2198 iomMmioRetainRange(pRange);
2199#ifndef VBOX_WITH_STATISTICS
2200 IOM_UNLOCK_SHARED(pVM);
2201
2202#else /* VBOX_WITH_STATISTICS */
2203 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2204 if (!pStats)
2205 {
2206 iomMmioReleaseRange(pVM, pRange);
2207# ifdef IN_RING3
2208 return VERR_NO_MEMORY;
2209# else
2210 return VINF_IOM_R3_MMIO_WRITE;
2211# endif
2212 }
2213 STAM_COUNTER_INC(&pStats->Accesses);
2214#endif /* VBOX_WITH_STATISTICS */
2215
2216 if (pRange->CTX_SUFF(pfnWriteCallback))
2217 {
2218 /*
2219 * Perform locking.
2220 */
2221 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2222 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2223 if (rc != VINF_SUCCESS)
2224 {
2225 iomMmioReleaseRange(pVM, pRange);
2226 return rc;
2227 }
2228
2229 /*
2230 * Perform the write.
2231 */
2232 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2233 if ( (cbValue == 4 && !(GCPhys & 3))
2234 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2235 || (cbValue == 8 && !(GCPhys & 7)) )
2236 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2237 GCPhys, &u32Value, (unsigned)cbValue);
2238 else
2239 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2240 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2241#ifndef IN_RING3
2242 if ( rc == VINF_IOM_R3_MMIO_WRITE
2243 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2244 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2245#endif
2246 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2247 iomMmioReleaseRange(pVM, pRange);
2248 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2249 return rc;
2250 }
2251#ifndef IN_RING3
2252 if (pRange->pfnWriteCallbackR3)
2253 {
2254 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2255 iomMmioReleaseRange(pVM, pRange);
2256 return VINF_IOM_R3_MMIO_WRITE;
2257 }
2258#endif
2259
2260 /*
2261 * No write handler, nothing to do.
2262 */
2263 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2264 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2265 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2266 iomMmioReleaseRange(pVM, pRange);
2267 return VINF_SUCCESS;
2268}
2269
2270#endif /* IN_RING3 - only used by REM. */
2271#ifndef IEM_USE_IEM_INSTEAD
2272
2273/**
2274 * [REP*] INSB/INSW/INSD
2275 * ES:EDI,DX[,ECX]
2276 *
2277 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2278 *
2279 * @returns Strict VBox status code. Informational status codes other than the one documented
2280 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2281 * @retval VINF_SUCCESS Success.
2282 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2283 * status code must be passed on to EM.
2284 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2285 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2286 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2287 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2288 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2289 *
2290 * @param pVM The virtual machine.
2291 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2292 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2293 * @param uPort IO Port
2294 * @param uPrefix IO instruction prefix
2295 * @param enmAddrMode The address mode.
2296 * @param cbTransfer Size of transfer unit
2297 */
2298VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2299 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2300{
2301 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2302
2303 /*
2304 * We do not support REPNE or decrementing destination
2305 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2306 */
2307 if ( (uPrefix & DISPREFIX_REPNE)
2308 || pRegFrame->eflags.Bits.u1DF)
2309 return VINF_EM_RAW_EMULATE_INSTR;
2310
2311 /*
2312 * Get bytes/words/dwords count to transfer.
2313 */
2314 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2315 RTGCUINTREG cTransfers = 1;
2316 if (uPrefix & DISPREFIX_REP)
2317 {
2318#ifndef IN_RC
2319 if ( CPUMIsGuestIn64BitCode(pVCpu)
2320 && pRegFrame->rcx >= _4G)
2321 return VINF_EM_RAW_EMULATE_INSTR;
2322#endif
2323 cTransfers = pRegFrame->rcx & fAddrMask;
2324 if (!cTransfers)
2325 return VINF_SUCCESS;
2326 }
2327
2328 /* Convert destination address es:edi. */
2329 RTGCPTR GCPtrDst;
2330 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2331 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2332 &GCPtrDst);
2333 if (RT_FAILURE(rc2))
2334 {
2335 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2336 return VINF_EM_RAW_EMULATE_INSTR;
2337 }
2338
2339 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2340 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2341 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2342 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2343 if (rc2 != VINF_SUCCESS)
2344 {
2345 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2346 return VINF_EM_RAW_EMULATE_INSTR;
2347 }
2348
2349 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2350 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2351 if (cTransfers > 1)
2352 {
2353 /*
2354 * Work the string page by page, letting the device handle as much
2355 * as it likes via the string I/O interface.
2356 */
2357 for (;;)
2358 {
2359 PGMPAGEMAPLOCK Lock;
2360 void *pvDst;
2361 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2362 if (RT_SUCCESS(rc2))
2363 {
2364 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2365 if (cMaxThisTime > cTransfers)
2366 cMaxThisTime = cTransfers;
2367 if (!cMaxThisTime)
2368 break;
2369 uint32_t cThisTime = cMaxThisTime;
2370
2371 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2372 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2373 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2374
2375 uint32_t const cActual = cMaxThisTime - cThisTime;
2376 if (cActual)
2377 { /* Must dirty the page. */
2378 uint8_t b = *(uint8_t *)pvDst;
2379 iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &b, 1);
2380 }
2381
2382 PGMPhysReleasePageMappingLock(pVM, &Lock);
2383
2384 uint32_t const cbActual = cActual * cbTransfer;
2385 cTransfers -= cActual;
2386 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2387 | (pRegFrame->rdi & ~fAddrMask);
2388 GCPtrDst += cbActual;
2389
2390 if ( cThisTime
2391 || !cTransfers
2392 || rcStrict != VINF_SUCCESS
2393 || (GCPtrDst & PAGE_OFFSET_MASK))
2394 break;
2395 }
2396 else
2397 {
2398 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2399 break;
2400 }
2401 }
2402 }
2403
2404 /*
2405 * Single transfer / unmapped memory fallback.
2406 */
2407#ifdef IN_RC
2408 MMGCRamRegisterTrapHandler(pVM);
2409#endif
2410 while (cTransfers && rcStrict == VINF_SUCCESS)
2411 {
2412 uint32_t u32Value;
2413 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2414 if (!IOM_SUCCESS(rcStrict))
2415 break;
2416 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2417 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2418 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2419 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2420 | (pRegFrame->rdi & ~fAddrMask);
2421 cTransfers--;
2422 }
2423#ifdef IN_RC
2424 MMGCRamDeregisterTrapHandler(pVM);
2425#endif
2426
2427 /* Update rcx on exit. */
2428 if (uPrefix & DISPREFIX_REP)
2429 pRegFrame->rcx = (cTransfers & fAddrMask)
2430 | (pRegFrame->rcx & ~fAddrMask);
2431
2432 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2433 return rcStrict;
2434}
2435
2436
2437/**
2438 * [REP*] OUTSB/OUTSW/OUTSD
2439 * DS:ESI,DX[,ECX]
2440 *
2441 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2442 *
2443 * @returns Strict VBox status code. Informational status codes other than the one documented
2444 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2445 * @retval VINF_SUCCESS Success.
2446 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2447 * status code must be passed on to EM.
2448 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2449 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2450 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2451 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2452 *
2453 * @param pVM The virtual machine.
2454 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2455 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2456 * @param uPort IO Port
2457 * @param uPrefix IO instruction prefix
2458 * @param enmAddrMode The address mode.
2459 * @param cbTransfer Size of transfer unit
2460 *
2461 * @remarks This API will probably be relaced by IEM before long, so no use in
2462 * optimizing+fixing stuff too much here.
2463 */
2464VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2465 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2466{
2467 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2468
2469 /*
2470 * We do not support segment prefixes, REPNE or
2471 * decrementing source pointer.
2472 */
2473 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2474 || pRegFrame->eflags.Bits.u1DF)
2475 return VINF_EM_RAW_EMULATE_INSTR;
2476
2477 /*
2478 * Get bytes/words/dwords count to transfer.
2479 */
2480 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2481 RTGCUINTREG cTransfers = 1;
2482 if (uPrefix & DISPREFIX_REP)
2483 {
2484#ifndef IN_RC
2485 if ( CPUMIsGuestIn64BitCode(pVCpu)
2486 && pRegFrame->rcx >= _4G)
2487 return VINF_EM_RAW_EMULATE_INSTR;
2488#endif
2489 cTransfers = pRegFrame->rcx & fAddrMask;
2490 if (!cTransfers)
2491 return VINF_SUCCESS;
2492 }
2493
2494 /* Convert source address ds:esi. */
2495 RTGCPTR GCPtrSrc;
2496 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2497 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2498 &GCPtrSrc);
2499 if (RT_FAILURE(rc2))
2500 {
2501 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2502 return VINF_EM_RAW_EMULATE_INSTR;
2503 }
2504
2505 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2506 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2507 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2508 (cpl == 3) ? X86_PTE_US : 0);
2509 if (rc2 != VINF_SUCCESS)
2510 {
2511 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2512 return VINF_EM_RAW_EMULATE_INSTR;
2513 }
2514
2515 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2516 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2517 if (cTransfers > 1)
2518 {
2519 /*
2520 * Work the string page by page, letting the device handle as much
2521 * as it likes via the string I/O interface.
2522 */
2523 for (;;)
2524 {
2525 PGMPAGEMAPLOCK Lock;
2526 void const *pvSrc;
2527 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2528 if (RT_SUCCESS(rc2))
2529 {
2530 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2531 if (cMaxThisTime > cTransfers)
2532 cMaxThisTime = cTransfers;
2533 if (!cMaxThisTime)
2534 break;
2535 uint32_t cThisTime = cMaxThisTime;
2536
2537 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2538 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2539 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2540
2541 PGMPhysReleasePageMappingLock(pVM, &Lock);
2542
2543 uint32_t const cActual = cMaxThisTime - cThisTime;
2544 uint32_t const cbActual = cActual * cbTransfer;
2545 cTransfers -= cActual;
2546 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2547 | (pRegFrame->rsi & ~fAddrMask);
2548 GCPtrSrc += cbActual;
2549
2550 if ( cThisTime
2551 || !cTransfers
2552 || rcStrict != VINF_SUCCESS
2553 || (GCPtrSrc & PAGE_OFFSET_MASK))
2554 break;
2555 }
2556 else
2557 {
2558 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2559 break;
2560 }
2561 }
2562 }
2563
2564 /*
2565 * Single transfer / unmapped memory fallback.
2566 */
2567#ifdef IN_RC
2568 MMGCRamRegisterTrapHandler(pVM);
2569#endif
2570
2571 while (cTransfers && rcStrict == VINF_SUCCESS)
2572 {
2573 uint32_t u32Value = 0;
2574 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2575 if (rcStrict != VINF_SUCCESS)
2576 break;
2577 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2578 if (!IOM_SUCCESS(rcStrict))
2579 break;
2580 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2581 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2582 | (pRegFrame->rsi & ~fAddrMask);
2583 cTransfers--;
2584 }
2585
2586#ifdef IN_RC
2587 MMGCRamDeregisterTrapHandler(pVM);
2588#endif
2589
2590 /* Update rcx on exit. */
2591 if (uPrefix & DISPREFIX_REP)
2592 pRegFrame->rcx = (cTransfers & fAddrMask)
2593 | (pRegFrame->rcx & ~fAddrMask);
2594
2595 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2596 return rcStrict;
2597}
2598
2599#endif /* !IEM_USE_IEM_INSTEAD */
2600
2601
2602#ifndef IN_RC
2603
2604/**
2605 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2606 *
2607 * (This is a special optimization used by the VGA device.)
2608 *
2609 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2610 * remapping is made,.
2611 *
2612 * @param pVM The virtual machine.
2613 * @param GCPhys The address of the MMIO page to be changed.
2614 * @param GCPhysRemapped The address of the MMIO2 page.
2615 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2616 * for the time being.
2617 */
2618VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2619{
2620# ifndef IEM_VERIFICATION_MODE_FULL
2621 /* Currently only called from the VGA device during MMIO. */
2622 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2623 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2624 PVMCPU pVCpu = VMMGetCpu(pVM);
2625
2626 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2627 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2628 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2629 && !HMIsNestedPagingActive(pVM)))
2630 return VINF_SUCCESS; /* ignore */
2631
2632 int rc = IOM_LOCK_SHARED(pVM);
2633 if (RT_FAILURE(rc))
2634 return VINF_SUCCESS; /* better luck the next time around */
2635
2636 /*
2637 * Lookup the context range node the page belongs to.
2638 */
2639 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2640 AssertMsgReturn(pRange,
2641 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2642
2643 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2644 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2645
2646 /*
2647 * Do the aliasing; page align the addresses since PGM is picky.
2648 */
2649 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2650 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2651
2652 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2653
2654 IOM_UNLOCK_SHARED(pVM);
2655 AssertRCReturn(rc, rc);
2656
2657 /*
2658 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2659 * can simply prefetch it.
2660 *
2661 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2662 */
2663# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2664# ifdef VBOX_STRICT
2665 uint64_t fFlags;
2666 RTHCPHYS HCPhys;
2667 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2668 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2669# endif
2670# endif
2671 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2672 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2673# endif /* !IEM_VERIFICATION_MODE_FULL */
2674 return VINF_SUCCESS;
2675}
2676
2677
2678# ifndef IEM_VERIFICATION_MODE_FULL
2679/**
2680 * Mapping a HC page in place of an MMIO page for direct access.
2681 *
2682 * (This is a special optimization used by the APIC in the VT-x case.)
2683 *
2684 * @returns VBox status code.
2685 *
2686 * @param pVM Pointer to the VM.
2687 * @param pVCpu Pointer to the VMCPU.
2688 * @param GCPhys The address of the MMIO page to be changed.
2689 * @param HCPhys The address of the host physical page.
2690 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2691 * for the time being.
2692 */
2693VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2694{
2695 /* Currently only called from VT-x code during a page fault. */
2696 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2697
2698 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2699 Assert(HMIsEnabled(pVM));
2700
2701 /*
2702 * Lookup the context range node the page belongs to.
2703 */
2704# ifdef VBOX_STRICT
2705 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2706 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2707 AssertMsgReturn(pRange,
2708 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2709 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2710 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2711# endif
2712
2713 /*
2714 * Do the aliasing; page align the addresses since PGM is picky.
2715 */
2716 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2717 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2718
2719 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2720 AssertRCReturn(rc, rc);
2721
2722 /*
2723 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2724 * can simply prefetch it.
2725 *
2726 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2727 */
2728 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2729 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2730 return VINF_SUCCESS;
2731}
2732# endif /* !IEM_VERIFICATION_MODE_FULL */
2733
2734
2735/**
2736 * Reset a previously modified MMIO region; restore the access flags.
2737 *
2738 * @returns VBox status code.
2739 *
2740 * @param pVM The virtual machine.
2741 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2742 */
2743VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2744{
2745 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2746
2747 PVMCPU pVCpu = VMMGetCpu(pVM);
2748
2749 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2750 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2751 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2752 && !HMIsNestedPagingActive(pVM)))
2753 return VINF_SUCCESS; /* ignore */
2754
2755 /*
2756 * Lookup the context range node the page belongs to.
2757 */
2758# ifdef VBOX_STRICT
2759 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2760 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2761 AssertMsgReturn(pRange,
2762 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2763 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2764 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2765# endif
2766
2767 /*
2768 * Call PGM to do the job work.
2769 *
2770 * After the call, all the pages should be non-present... unless there is
2771 * a page pool flush pending (unlikely).
2772 */
2773 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2774 AssertRC(rc);
2775
2776# ifdef VBOX_STRICT
2777 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2778 {
2779 uint32_t cb = pRange->cb;
2780 GCPhys = pRange->GCPhys;
2781 while (cb)
2782 {
2783 uint64_t fFlags;
2784 RTHCPHYS HCPhys;
2785 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2786 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2787 cb -= PAGE_SIZE;
2788 GCPhys += PAGE_SIZE;
2789 }
2790 }
2791# endif
2792 return rc;
2793}
2794
2795#endif /* !IN_RC */
2796
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette