VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 57952

Last change on this file since 57952 was 57860, checked in by vboxsync, 9 years ago

VMM: warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 98.2 KB
Line 
1/* $Id: IOMAllMMIO.cpp 57860 2015-09-22 14:57:16Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52/** @def IEM_USE_IEM_INSTEAD
53 * Use IEM instead of IOM for interpreting MMIO accesses.
54 * Because of PATM/CSAM issues in raw-mode, we've split this up into 2nd and 3rd
55 * IEM deployment step. */
56#if ((defined(IN_RING3) || defined(IN_RING0)) && defined(VBOX_WITH_2ND_IEM_STEP)) \
57 || defined(VBOX_WITH_3RD_IEM_STEP)
58# define IEM_USE_IEM_INSTEAD
59#endif
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65
66/**
67 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
68 */
69static const unsigned g_aSize2Shift[] =
70{
71 ~0U, /* 0 - invalid */
72 0, /* *1 == 2^0 */
73 1, /* *2 == 2^1 */
74 ~0U, /* 3 - invalid */
75 2, /* *4 == 2^2 */
76 ~0U, /* 5 - invalid */
77 ~0U, /* 6 - invalid */
78 ~0U, /* 7 - invalid */
79 3 /* *8 == 2^3 */
80};
81
82/**
83 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
84 */
85#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
86
87
88/**
89 * Returns the contents of register or immediate data of instruction's parameter.
90 *
91 * @returns true on success.
92 *
93 * @todo Get rid of this code. Use DISQueryParamVal instead
94 *
95 * @param pCpu Pointer to current disassembler context.
96 * @param pParam Pointer to parameter of instruction to process.
97 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
98 * @param pu64Data Where to store retrieved data.
99 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
100 */
101bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
102{
103 NOREF(pCpu);
104 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
105 {
106 *pcbSize = 0;
107 *pu64Data = 0;
108 return false;
109 }
110
111 /* divide and conquer */
112 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
113 {
114 if (pParam->fUse & DISUSE_REG_GEN32)
115 {
116 *pcbSize = 4;
117 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
118 return true;
119 }
120
121 if (pParam->fUse & DISUSE_REG_GEN16)
122 {
123 *pcbSize = 2;
124 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
125 return true;
126 }
127
128 if (pParam->fUse & DISUSE_REG_GEN8)
129 {
130 *pcbSize = 1;
131 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
132 return true;
133 }
134
135 Assert(pParam->fUse & DISUSE_REG_GEN64);
136 *pcbSize = 8;
137 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
138 return true;
139 }
140 else
141 {
142 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
143 {
144 *pcbSize = 8;
145 *pu64Data = pParam->uValue;
146 return true;
147 }
148
149 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
150 {
151 *pcbSize = 4;
152 *pu64Data = (uint32_t)pParam->uValue;
153 return true;
154 }
155
156 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
157 {
158 *pcbSize = 2;
159 *pu64Data = (uint16_t)pParam->uValue;
160 return true;
161 }
162
163 if (pParam->fUse & DISUSE_IMMEDIATE8)
164 {
165 *pcbSize = 1;
166 *pu64Data = (uint8_t)pParam->uValue;
167 return true;
168 }
169
170 if (pParam->fUse & DISUSE_REG_SEG)
171 {
172 *pcbSize = 2;
173 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
174 return true;
175 } /* Else - error. */
176
177 AssertFailed();
178 *pcbSize = 0;
179 *pu64Data = 0;
180 return false;
181 }
182}
183
184
185/**
186 * Saves data to 8/16/32 general purpose or segment register defined by
187 * instruction's parameter.
188 *
189 * @returns true on success.
190 * @param pCpu Pointer to current disassembler context.
191 * @param pParam Pointer to parameter of instruction to process.
192 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
193 * @param u64Data 8/16/32/64 bit data to store.
194 */
195bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
196{
197 NOREF(pCpu);
198 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
199 {
200 return false;
201 }
202
203 if (pParam->fUse & DISUSE_REG_GEN32)
204 {
205 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
206 return true;
207 }
208
209 if (pParam->fUse & DISUSE_REG_GEN64)
210 {
211 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
212 return true;
213 }
214
215 if (pParam->fUse & DISUSE_REG_GEN16)
216 {
217 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
218 return true;
219 }
220
221 if (pParam->fUse & DISUSE_REG_GEN8)
222 {
223 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
224 return true;
225 }
226
227 if (pParam->fUse & DISUSE_REG_SEG)
228 {
229 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
230 return true;
231 }
232
233 /* Else - error. */
234 return false;
235}
236
237
238/**
239 * Deals with complicated MMIO writes.
240 *
241 * Complicated means unaligned or non-dword/qword sized accesses depending on
242 * the MMIO region's access mode flags.
243 *
244 * @returns Strict VBox status code. Any EM scheduling status code,
245 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
246 * VINF_IOM_R3_MMIO_READ may be returned.
247 *
248 * @param pVM Pointer to the VM.
249 * @param pRange The range to write to.
250 * @param GCPhys The physical address to start writing.
251 * @param pvValue Where to store the value.
252 * @param cbValue The size of the value to write.
253 */
254static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
255{
256 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
257 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
258 VERR_IOM_MMIO_IPE_1);
259 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
260 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
261 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
262 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
263
264 /*
265 * Do debug stop if requested.
266 */
267 int rc = VINF_SUCCESS; NOREF(pVM);
268#ifdef VBOX_STRICT
269 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
270 {
271# ifdef IN_RING3
272 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
273 R3STRING(pRange->pszDesc)));
274 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
275 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
276 if (rc == VERR_DBGF_NOT_ATTACHED)
277 rc = VINF_SUCCESS;
278# else
279 return VINF_IOM_R3_MMIO_WRITE;
280# endif
281 }
282#endif
283
284 /*
285 * Check if we should ignore the write.
286 */
287 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
288 {
289 Assert(cbValue != 4 || (GCPhys & 3));
290 return VINF_SUCCESS;
291 }
292 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
293 {
294 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
295 return VINF_SUCCESS;
296 }
297
298 /*
299 * Split and conquer.
300 */
301 for (;;)
302 {
303 unsigned const offAccess = GCPhys & 3;
304 unsigned cbThisPart = 4 - offAccess;
305 if (cbThisPart > cbValue)
306 cbThisPart = cbValue;
307
308 /*
309 * Get the missing bits (if any).
310 */
311 uint32_t u32MissingValue = 0;
312 if (fReadMissing && cbThisPart != 4)
313 {
314 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
315 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
316 switch (rc2)
317 {
318 case VINF_SUCCESS:
319 break;
320 case VINF_IOM_MMIO_UNUSED_FF:
321 u32MissingValue = UINT32_C(0xffffffff);
322 break;
323 case VINF_IOM_MMIO_UNUSED_00:
324 u32MissingValue = 0;
325 break;
326 case VINF_IOM_R3_MMIO_READ:
327 case VINF_IOM_R3_MMIO_READ_WRITE:
328 case VINF_IOM_R3_MMIO_WRITE:
329 /** @todo What if we've split a transfer and already read
330 * something? Since writes generally have sideeffects we
331 * could be kind of screwed here...
332 *
333 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
334 * to REM for MMIO accesses (like may currently do). */
335
336 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
337 return rc2;
338 default:
339 if (RT_FAILURE(rc2))
340 {
341 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
342 return rc2;
343 }
344 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
345 if (rc == VINF_SUCCESS || rc2 < rc)
346 rc = rc2;
347 break;
348 }
349 }
350
351 /*
352 * Merge missing and given bits.
353 */
354 uint32_t u32GivenMask;
355 uint32_t u32GivenValue;
356 switch (cbThisPart)
357 {
358 case 1:
359 u32GivenValue = *(uint8_t const *)pvValue;
360 u32GivenMask = UINT32_C(0x000000ff);
361 break;
362 case 2:
363 u32GivenValue = *(uint16_t const *)pvValue;
364 u32GivenMask = UINT32_C(0x0000ffff);
365 break;
366 case 3:
367 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
368 ((uint8_t const *)pvValue)[2], 0);
369 u32GivenMask = UINT32_C(0x00ffffff);
370 break;
371 case 4:
372 u32GivenValue = *(uint32_t const *)pvValue;
373 u32GivenMask = UINT32_C(0xffffffff);
374 break;
375 default:
376 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
377 }
378 if (offAccess)
379 {
380 u32GivenValue <<= offAccess * 8;
381 u32GivenMask <<= offAccess * 8;
382 }
383
384 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
385 | (u32GivenValue & u32GivenMask);
386
387 /*
388 * Do DWORD write to the device.
389 */
390 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
391 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
392 switch (rc2)
393 {
394 case VINF_SUCCESS:
395 break;
396 case VINF_IOM_R3_MMIO_READ:
397 case VINF_IOM_R3_MMIO_READ_WRITE:
398 case VINF_IOM_R3_MMIO_WRITE:
399 /** @todo What if we've split a transfer and already read
400 * something? Since reads can have sideeffects we could be
401 * kind of screwed here...
402 *
403 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
404 * to REM for MMIO accesses (like may currently do). */
405 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
406 return rc2;
407 default:
408 if (RT_FAILURE(rc2))
409 {
410 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
411 return rc2;
412 }
413 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
414 if (rc == VINF_SUCCESS || rc2 < rc)
415 rc = rc2;
416 break;
417 }
418
419 /*
420 * Advance.
421 */
422 cbValue -= cbThisPart;
423 if (!cbValue)
424 break;
425 GCPhys += cbThisPart;
426 pvValue = (uint8_t const *)pvValue + cbThisPart;
427 }
428
429 return rc;
430}
431
432
433
434
435/**
436 * Wrapper which does the write and updates range statistics when such are enabled.
437 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
438 */
439static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
440 const void *pvData, unsigned cb)
441{
442#ifdef VBOX_WITH_STATISTICS
443 int rcSem = IOM_LOCK_SHARED(pVM);
444 if (rcSem == VERR_SEM_BUSY)
445 return VINF_IOM_R3_MMIO_WRITE;
446 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
447 if (!pStats)
448# ifdef IN_RING3
449 return VERR_NO_MEMORY;
450# else
451 return VINF_IOM_R3_MMIO_WRITE;
452# endif
453 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
454#else
455 NOREF(pVCpu);
456#endif
457
458 VBOXSTRICTRC rcStrict;
459 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
460 {
461 if ( (cb == 4 && !(GCPhysFault & 3))
462 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
463 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
464 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
465 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
466 else
467 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
468 }
469 else
470 rcStrict = VINF_SUCCESS;
471
472 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
473 STAM_COUNTER_INC(&pStats->Accesses);
474 return rcStrict;
475}
476
477
478/**
479 * Deals with complicated MMIO reads.
480 *
481 * Complicated means unaligned or non-dword/qword sized accesses depending on
482 * the MMIO region's access mode flags.
483 *
484 * @returns Strict VBox status code. Any EM scheduling status code,
485 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
486 * VINF_IOM_R3_MMIO_WRITE may be returned.
487 *
488 * @param pVM Pointer to the VM.
489 * @param pRange The range to read from.
490 * @param GCPhys The physical address to start reading.
491 * @param pvValue Where to store the value.
492 * @param cbValue The size of the value to read.
493 */
494static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
495{
496 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
497 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
498 VERR_IOM_MMIO_IPE_1);
499 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
500 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
501
502 /*
503 * Do debug stop if requested.
504 */
505 int rc = VINF_SUCCESS; NOREF(pVM);
506#ifdef VBOX_STRICT
507 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
508 {
509# ifdef IN_RING3
510 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
511 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
512 if (rc == VERR_DBGF_NOT_ATTACHED)
513 rc = VINF_SUCCESS;
514# else
515 return VINF_IOM_R3_MMIO_READ;
516# endif
517 }
518#endif
519
520 /*
521 * Split and conquer.
522 */
523 for (;;)
524 {
525 /*
526 * Do DWORD read from the device.
527 */
528 uint32_t u32Value;
529 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
530 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
531 switch (rc2)
532 {
533 case VINF_SUCCESS:
534 break;
535 case VINF_IOM_MMIO_UNUSED_FF:
536 u32Value = UINT32_C(0xffffffff);
537 break;
538 case VINF_IOM_MMIO_UNUSED_00:
539 u32Value = 0;
540 break;
541 case VINF_IOM_R3_MMIO_READ:
542 case VINF_IOM_R3_MMIO_READ_WRITE:
543 case VINF_IOM_R3_MMIO_WRITE:
544 /** @todo What if we've split a transfer and already read
545 * something? Since reads can have sideeffects we could be
546 * kind of screwed here... */
547 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
548 return rc2;
549 default:
550 if (RT_FAILURE(rc2))
551 {
552 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
553 return rc2;
554 }
555 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
556 if (rc == VINF_SUCCESS || rc2 < rc)
557 rc = rc2;
558 break;
559 }
560 u32Value >>= (GCPhys & 3) * 8;
561
562 /*
563 * Write what we've read.
564 */
565 unsigned cbThisPart = 4 - (GCPhys & 3);
566 if (cbThisPart > cbValue)
567 cbThisPart = cbValue;
568
569 switch (cbThisPart)
570 {
571 case 1:
572 *(uint8_t *)pvValue = (uint8_t)u32Value;
573 break;
574 case 2:
575 *(uint16_t *)pvValue = (uint16_t)u32Value;
576 break;
577 case 3:
578 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
579 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
580 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
581 break;
582 case 4:
583 *(uint32_t *)pvValue = u32Value;
584 break;
585 }
586
587 /*
588 * Advance.
589 */
590 cbValue -= cbThisPart;
591 if (!cbValue)
592 break;
593 GCPhys += cbThisPart;
594 pvValue = (uint8_t *)pvValue + cbThisPart;
595 }
596
597 return rc;
598}
599
600
601/**
602 * Implements VINF_IOM_MMIO_UNUSED_FF.
603 *
604 * @returns VINF_SUCCESS.
605 * @param pvValue Where to store the zeros.
606 * @param cbValue How many bytes to read.
607 */
608static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
609{
610 switch (cbValue)
611 {
612 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
613 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
614 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
615 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
616 default:
617 {
618 uint8_t *pb = (uint8_t *)pvValue;
619 while (cbValue--)
620 *pb++ = UINT8_C(0xff);
621 break;
622 }
623 }
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Implements VINF_IOM_MMIO_UNUSED_00.
630 *
631 * @returns VINF_SUCCESS.
632 * @param pvValue Where to store the zeros.
633 * @param cbValue How many bytes to read.
634 */
635static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
636{
637 switch (cbValue)
638 {
639 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
640 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
641 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
642 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
643 default:
644 {
645 uint8_t *pb = (uint8_t *)pvValue;
646 while (cbValue--)
647 *pb++ = UINT8_C(0x00);
648 break;
649 }
650 }
651 return VINF_SUCCESS;
652}
653
654
655/**
656 * Wrapper which does the read and updates range statistics when such are enabled.
657 */
658DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
659 void *pvValue, unsigned cbValue)
660{
661#ifdef VBOX_WITH_STATISTICS
662 int rcSem = IOM_LOCK_SHARED(pVM);
663 if (rcSem == VERR_SEM_BUSY)
664 return VINF_IOM_R3_MMIO_READ;
665 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
666 if (!pStats)
667# ifdef IN_RING3
668 return VERR_NO_MEMORY;
669# else
670 return VINF_IOM_R3_MMIO_READ;
671# endif
672 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
673#else
674 NOREF(pVCpu);
675#endif
676
677 VBOXSTRICTRC rcStrict;
678 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
679 {
680 if ( ( cbValue == 4
681 && !(GCPhys & 3))
682 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
683 || ( cbValue == 8
684 && !(GCPhys & 7)
685 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
686 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
687 pvValue, cbValue);
688 else
689 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
690 }
691 else
692 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
693 if (rcStrict != VINF_SUCCESS)
694 {
695 switch (VBOXSTRICTRC_VAL(rcStrict))
696 {
697 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
698 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
699 }
700 }
701
702 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
703 STAM_COUNTER_INC(&pStats->Accesses);
704 return rcStrict;
705}
706
707
708/**
709 * Internal - statistics only.
710 */
711DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
712{
713#ifdef VBOX_WITH_STATISTICS
714 switch (cb)
715 {
716 case 1:
717 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
718 break;
719 case 2:
720 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
721 break;
722 case 4:
723 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
724 break;
725 case 8:
726 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
727 break;
728 default:
729 /* No way. */
730 AssertMsgFailed(("Invalid data length %d\n", cb));
731 break;
732 }
733#else
734 NOREF(pVM); NOREF(cb);
735#endif
736}
737
738
739#ifndef IEM_USE_IEM_INSTEAD
740
741/**
742 * MOV reg, mem (read)
743 * MOVZX reg, mem (read)
744 * MOVSX reg, mem (read)
745 *
746 * @returns VBox status code.
747 *
748 * @param pVM The virtual machine.
749 * @param pVCpu Pointer to the virtual CPU structure of the caller.
750 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
751 * @param pCpu Disassembler CPU state.
752 * @param pRange Pointer MMIO range.
753 * @param GCPhysFault The GC physical address corresponding to pvFault.
754 */
755static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
756 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
757{
758 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
759
760 /*
761 * Get the data size from parameter 2,
762 * and call the handler function to get the data.
763 */
764 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
765 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
766
767 uint64_t u64Data = 0;
768 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
769 if (rc == VINF_SUCCESS)
770 {
771 /*
772 * Do sign extension for MOVSX.
773 */
774 /** @todo checkup MOVSX implementation! */
775 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
776 {
777 if (cb == 1)
778 {
779 /* DWORD <- BYTE */
780 int64_t iData = (int8_t)u64Data;
781 u64Data = (uint64_t)iData;
782 }
783 else
784 {
785 /* DWORD <- WORD */
786 int64_t iData = (int16_t)u64Data;
787 u64Data = (uint64_t)iData;
788 }
789 }
790
791 /*
792 * Store the result to register (parameter 1).
793 */
794 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
795 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
796 }
797
798 if (rc == VINF_SUCCESS)
799 iomMMIOStatLength(pVM, cb);
800 return rc;
801}
802
803
804/**
805 * MOV mem, reg|imm (write)
806 *
807 * @returns VBox status code.
808 *
809 * @param pVM The virtual machine.
810 * @param pVCpu Pointer to the virtual CPU structure of the caller.
811 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
812 * @param pCpu Disassembler CPU state.
813 * @param pRange Pointer MMIO range.
814 * @param GCPhysFault The GC physical address corresponding to pvFault.
815 */
816static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
817 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
818{
819 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
820
821 /*
822 * Get data to write from second parameter,
823 * and call the callback to write it.
824 */
825 unsigned cb = 0;
826 uint64_t u64Data = 0;
827 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
828 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
829
830 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
831 if (rc == VINF_SUCCESS)
832 iomMMIOStatLength(pVM, cb);
833 return rc;
834}
835
836
837/** Wrapper for reading virtual memory. */
838DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
839{
840 /* Note: This will fail in R0 or RC if it hits an access handler. That
841 isn't a problem though since the operation can be restarted in REM. */
842#ifdef IN_RC
843 NOREF(pVCpu);
844 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
845 /* Page may be protected and not directly accessible. */
846 if (rc == VERR_ACCESS_DENIED)
847 rc = VINF_IOM_R3_IOPORT_WRITE;
848 return rc;
849#else
850 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
851#endif
852}
853
854
855/** Wrapper for writing virtual memory. */
856DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
857{
858 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
859 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
860 * as well since we're not behind the pgm lock and handler may change between calls.
861 *
862 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
863 * the state of some shadowed structures. */
864#if defined(IN_RING0) || defined(IN_RC)
865 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
866#else
867 NOREF(pCtxCore);
868 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
869#endif
870}
871
872
873#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
874/**
875 * [REP] MOVSB
876 * [REP] MOVSW
877 * [REP] MOVSD
878 *
879 * Restricted implementation.
880 *
881 *
882 * @returns VBox status code.
883 *
884 * @param pVM The virtual machine.
885 * @param uErrorCode CPU Error code.
886 * @param pRegFrame Trap register frame.
887 * @param GCPhysFault The GC physical address corresponding to pvFault.
888 * @param pCpu Disassembler CPU state.
889 * @param pRange Pointer MMIO range.
890 * @param ppStat Which sub-sample to attribute this call to.
891 */
892static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
893 PSTAMPROFILE *ppStat)
894{
895 /*
896 * We do not support segment prefixes or REPNE.
897 */
898 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
899 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
900
901 PVMCPU pVCpu = VMMGetCpu(pVM);
902
903 /*
904 * Get bytes/words/dwords/qword count to copy.
905 */
906 uint32_t cTransfers = 1;
907 if (pCpu->fPrefix & DISPREFIX_REP)
908 {
909#ifndef IN_RC
910 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
911 && pRegFrame->rcx >= _4G)
912 return VINF_EM_RAW_EMULATE_INSTR;
913#endif
914
915 cTransfers = pRegFrame->ecx;
916 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
917 cTransfers &= 0xffff;
918
919 if (!cTransfers)
920 return VINF_SUCCESS;
921 }
922
923 /* Get the current privilege level. */
924 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
925
926 /*
927 * Get data size.
928 */
929 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
930 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
931 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
932
933#ifdef VBOX_WITH_STATISTICS
934 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
935 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
936#endif
937
938/** @todo re-evaluate on page boundaries. */
939
940 RTGCPHYS Phys = GCPhysFault;
941 int rc;
942 if (fWriteAccess)
943 {
944 /*
945 * Write operation: [Mem] -> [MMIO]
946 * ds:esi (Virt Src) -> es:edi (Phys Dst)
947 */
948 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
949
950 /* Check callback. */
951 if (!pRange->CTX_SUFF(pfnWriteCallback))
952 return VINF_IOM_R3_MMIO_WRITE;
953
954 /* Convert source address ds:esi. */
955 RTGCUINTPTR pu8Virt;
956 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
957 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
958 (PRTGCPTR)&pu8Virt);
959 if (RT_SUCCESS(rc))
960 {
961
962 /* Access verification first; we currently can't recover properly from traps inside this instruction */
963 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
964 if (rc != VINF_SUCCESS)
965 {
966 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
967 return VINF_EM_RAW_EMULATE_INSTR;
968 }
969
970#ifdef IN_RC
971 MMGCRamRegisterTrapHandler(pVM);
972#endif
973
974 /* copy loop. */
975 while (cTransfers)
976 {
977 uint32_t u32Data = 0;
978 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
979 if (rc != VINF_SUCCESS)
980 break;
981 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
982 if (rc != VINF_SUCCESS)
983 break;
984
985 pu8Virt += offIncrement;
986 Phys += offIncrement;
987 pRegFrame->rsi += offIncrement;
988 pRegFrame->rdi += offIncrement;
989 cTransfers--;
990 }
991#ifdef IN_RC
992 MMGCRamDeregisterTrapHandler(pVM);
993#endif
994 /* Update ecx. */
995 if (pCpu->fPrefix & DISPREFIX_REP)
996 pRegFrame->ecx = cTransfers;
997 }
998 else
999 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1000 }
1001 else
1002 {
1003 /*
1004 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
1005 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
1006 */
1007 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
1008
1009 /* Check callback. */
1010 if (!pRange->CTX_SUFF(pfnReadCallback))
1011 return VINF_IOM_R3_MMIO_READ;
1012
1013 /* Convert destination address. */
1014 RTGCUINTPTR pu8Virt;
1015 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1016 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1017 (RTGCPTR *)&pu8Virt);
1018 if (RT_FAILURE(rc))
1019 return VINF_IOM_R3_MMIO_READ;
1020
1021 /* Check if destination address is MMIO. */
1022 PIOMMMIORANGE pMMIODst;
1023 RTGCPHYS PhysDst;
1024 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1025 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1026 if ( RT_SUCCESS(rc)
1027 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1028 {
1029 /** @todo implement per-device locks for MMIO access. */
1030 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1031
1032 /*
1033 * Extra: [MMIO] -> [MMIO]
1034 */
1035 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1036 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1037 {
1038 iomMmioReleaseRange(pVM, pRange);
1039 return VINF_IOM_R3_MMIO_READ_WRITE;
1040 }
1041
1042 /* copy loop. */
1043 while (cTransfers)
1044 {
1045 uint32_t u32Data;
1046 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1047 if (rc != VINF_SUCCESS)
1048 break;
1049 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1050 if (rc != VINF_SUCCESS)
1051 break;
1052
1053 Phys += offIncrement;
1054 PhysDst += offIncrement;
1055 pRegFrame->rsi += offIncrement;
1056 pRegFrame->rdi += offIncrement;
1057 cTransfers--;
1058 }
1059 iomMmioReleaseRange(pVM, pRange);
1060 }
1061 else
1062 {
1063 /*
1064 * Normal: [MMIO] -> [Mem]
1065 */
1066 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1067 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1068 if (rc != VINF_SUCCESS)
1069 {
1070 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1071 return VINF_EM_RAW_EMULATE_INSTR;
1072 }
1073
1074 /* copy loop. */
1075#ifdef IN_RC
1076 MMGCRamRegisterTrapHandler(pVM);
1077#endif
1078 while (cTransfers)
1079 {
1080 uint32_t u32Data;
1081 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1082 if (rc != VINF_SUCCESS)
1083 break;
1084 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1085 if (rc != VINF_SUCCESS)
1086 {
1087 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1088 break;
1089 }
1090
1091 pu8Virt += offIncrement;
1092 Phys += offIncrement;
1093 pRegFrame->rsi += offIncrement;
1094 pRegFrame->rdi += offIncrement;
1095 cTransfers--;
1096 }
1097#ifdef IN_RC
1098 MMGCRamDeregisterTrapHandler(pVM);
1099#endif
1100 }
1101
1102 /* Update ecx on exit. */
1103 if (pCpu->fPrefix & DISPREFIX_REP)
1104 pRegFrame->ecx = cTransfers;
1105 }
1106
1107 /* work statistics. */
1108 if (rc == VINF_SUCCESS)
1109 iomMMIOStatLength(pVM, cb);
1110 NOREF(ppStat);
1111 return rc;
1112}
1113#endif /* IOM_WITH_MOVS_SUPPORT */
1114
1115
1116/**
1117 * Gets the address / opcode mask corresponding to the given CPU mode.
1118 *
1119 * @returns Mask.
1120 * @param enmCpuMode CPU mode.
1121 */
1122static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1123{
1124 switch (enmCpuMode)
1125 {
1126 case DISCPUMODE_16BIT: return UINT16_MAX;
1127 case DISCPUMODE_32BIT: return UINT32_MAX;
1128 case DISCPUMODE_64BIT: return UINT64_MAX;
1129 default:
1130 AssertFailedReturn(UINT32_MAX);
1131 }
1132}
1133
1134
1135/**
1136 * [REP] STOSB
1137 * [REP] STOSW
1138 * [REP] STOSD
1139 *
1140 * Restricted implementation.
1141 *
1142 *
1143 * @returns VBox status code.
1144 *
1145 * @param pVM The virtual machine.
1146 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1147 * @param pRegFrame Trap register frame.
1148 * @param GCPhysFault The GC physical address corresponding to pvFault.
1149 * @param pCpu Disassembler CPU state.
1150 * @param pRange Pointer MMIO range.
1151 */
1152static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1153 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1154{
1155 /*
1156 * We do not support segment prefixes or REPNE..
1157 */
1158 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1159 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1160
1161 /*
1162 * Get bytes/words/dwords/qwords count to copy.
1163 */
1164 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1165 RTGCUINTREG cTransfers = 1;
1166 if (pCpu->fPrefix & DISPREFIX_REP)
1167 {
1168#ifndef IN_RC
1169 if ( CPUMIsGuestIn64BitCode(pVCpu)
1170 && pRegFrame->rcx >= _4G)
1171 return VINF_EM_RAW_EMULATE_INSTR;
1172#endif
1173
1174 cTransfers = pRegFrame->rcx & fAddrMask;
1175 if (!cTransfers)
1176 return VINF_SUCCESS;
1177 }
1178
1179/** @todo r=bird: bounds checks! */
1180
1181 /*
1182 * Get data size.
1183 */
1184 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1185 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1186 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1187
1188#ifdef VBOX_WITH_STATISTICS
1189 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1190 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1191#endif
1192
1193
1194 RTGCPHYS Phys = GCPhysFault;
1195 int rc;
1196 if ( pRange->CTX_SUFF(pfnFillCallback)
1197 && cb <= 4 /* can only fill 32-bit values */)
1198 {
1199 /*
1200 * Use the fill callback.
1201 */
1202 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1203 if (offIncrement > 0)
1204 {
1205 /* addr++ variant. */
1206 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1207 pRegFrame->eax, cb, cTransfers);
1208 if (rc == VINF_SUCCESS)
1209 {
1210 /* Update registers. */
1211 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1212 | (pRegFrame->rdi & ~fAddrMask);
1213 if (pCpu->fPrefix & DISPREFIX_REP)
1214 pRegFrame->rcx &= ~fAddrMask;
1215 }
1216 }
1217 else
1218 {
1219 /* addr-- variant. */
1220 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1221 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1222 pRegFrame->eax, cb, cTransfers);
1223 if (rc == VINF_SUCCESS)
1224 {
1225 /* Update registers. */
1226 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1227 | (pRegFrame->rdi & ~fAddrMask);
1228 if (pCpu->fPrefix & DISPREFIX_REP)
1229 pRegFrame->rcx &= ~fAddrMask;
1230 }
1231 }
1232 }
1233 else
1234 {
1235 /*
1236 * Use the write callback.
1237 */
1238 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1239 uint64_t u64Data = pRegFrame->rax;
1240
1241 /* fill loop. */
1242 do
1243 {
1244 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1245 if (rc != VINF_SUCCESS)
1246 break;
1247
1248 Phys += offIncrement;
1249 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1250 | (pRegFrame->rdi & ~fAddrMask);
1251 cTransfers--;
1252 } while (cTransfers);
1253
1254 /* Update rcx on exit. */
1255 if (pCpu->fPrefix & DISPREFIX_REP)
1256 pRegFrame->rcx = (cTransfers & fAddrMask)
1257 | (pRegFrame->rcx & ~fAddrMask);
1258 }
1259
1260 /*
1261 * Work statistics and return.
1262 */
1263 if (rc == VINF_SUCCESS)
1264 iomMMIOStatLength(pVM, cb);
1265 return rc;
1266}
1267
1268
1269/**
1270 * [REP] LODSB
1271 * [REP] LODSW
1272 * [REP] LODSD
1273 *
1274 * Restricted implementation.
1275 *
1276 *
1277 * @returns VBox status code.
1278 *
1279 * @param pVM The virtual machine.
1280 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1281 * @param pRegFrame Trap register frame.
1282 * @param GCPhysFault The GC physical address corresponding to pvFault.
1283 * @param pCpu Disassembler CPU state.
1284 * @param pRange Pointer MMIO range.
1285 */
1286static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1287 PIOMMMIORANGE pRange)
1288{
1289 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1290
1291 /*
1292 * We do not support segment prefixes or REP*.
1293 */
1294 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1295 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1296
1297 /*
1298 * Get data size.
1299 */
1300 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1301 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1302 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1303
1304 /*
1305 * Perform read.
1306 */
1307 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1308 if (rc == VINF_SUCCESS)
1309 {
1310 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1311 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1312 | (pRegFrame->rsi & ~fAddrMask);
1313 }
1314
1315 /*
1316 * Work statistics and return.
1317 */
1318 if (rc == VINF_SUCCESS)
1319 iomMMIOStatLength(pVM, cb);
1320 return rc;
1321}
1322
1323
1324/**
1325 * CMP [MMIO], reg|imm
1326 * CMP reg|imm, [MMIO]
1327 *
1328 * Restricted implementation.
1329 *
1330 *
1331 * @returns VBox status code.
1332 *
1333 * @param pVM The virtual machine.
1334 * @param pRegFrame Trap register frame.
1335 * @param GCPhysFault The GC physical address corresponding to pvFault.
1336 * @param pCpu Disassembler CPU state.
1337 * @param pRange Pointer MMIO range.
1338 */
1339static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1340 PIOMMMIORANGE pRange)
1341{
1342 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1343
1344 /*
1345 * Get the operands.
1346 */
1347 unsigned cb = 0;
1348 uint64_t uData1 = 0;
1349 uint64_t uData2 = 0;
1350 int rc;
1351 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1352 /* cmp reg, [MMIO]. */
1353 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1354 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1355 /* cmp [MMIO], reg|imm. */
1356 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1357 else
1358 {
1359 AssertMsgFailed(("Disassember CMP problem..\n"));
1360 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1361 }
1362
1363 if (rc == VINF_SUCCESS)
1364 {
1365#if HC_ARCH_BITS == 32
1366 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1367 if (cb > 4)
1368 return VINF_IOM_R3_MMIO_READ_WRITE;
1369#endif
1370 /* Emulate CMP and update guest flags. */
1371 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1372 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1373 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1374 iomMMIOStatLength(pVM, cb);
1375 }
1376
1377 return rc;
1378}
1379
1380
1381/**
1382 * AND [MMIO], reg|imm
1383 * AND reg, [MMIO]
1384 * OR [MMIO], reg|imm
1385 * OR reg, [MMIO]
1386 *
1387 * Restricted implementation.
1388 *
1389 *
1390 * @returns VBox status code.
1391 *
1392 * @param pVM The virtual machine.
1393 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1394 * @param pRegFrame Trap register frame.
1395 * @param GCPhysFault The GC physical address corresponding to pvFault.
1396 * @param pCpu Disassembler CPU state.
1397 * @param pRange Pointer MMIO range.
1398 * @param pfnEmulate Instruction emulation function.
1399 */
1400static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1401 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1402{
1403 unsigned cb = 0;
1404 uint64_t uData1 = 0;
1405 uint64_t uData2 = 0;
1406 bool fAndWrite;
1407 int rc;
1408
1409#ifdef LOG_ENABLED
1410 const char *pszInstr;
1411
1412 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1413 pszInstr = "Xor";
1414 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1415 pszInstr = "Or";
1416 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1417 pszInstr = "And";
1418 else
1419 pszInstr = "OrXorAnd??";
1420#endif
1421
1422 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1423 {
1424#if HC_ARCH_BITS == 32
1425 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1426 if (cb > 4)
1427 return VINF_IOM_R3_MMIO_READ_WRITE;
1428#endif
1429 /* and reg, [MMIO]. */
1430 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1431 fAndWrite = false;
1432 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1433 }
1434 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1435 {
1436#if HC_ARCH_BITS == 32
1437 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1438 if (cb > 4)
1439 return VINF_IOM_R3_MMIO_READ_WRITE;
1440#endif
1441 /* and [MMIO], reg|imm. */
1442 fAndWrite = true;
1443 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1444 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1445 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1446 else
1447 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1448 }
1449 else
1450 {
1451 AssertMsgFailed(("Disassember AND problem..\n"));
1452 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1453 }
1454
1455 if (rc == VINF_SUCCESS)
1456 {
1457 /* Emulate AND and update guest flags. */
1458 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1459
1460 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1461
1462 if (fAndWrite)
1463 /* Store result to MMIO. */
1464 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1465 else
1466 {
1467 /* Store result to register. */
1468 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1469 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1470 }
1471 if (rc == VINF_SUCCESS)
1472 {
1473 /* Update guest's eflags and finish. */
1474 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1475 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1476 iomMMIOStatLength(pVM, cb);
1477 }
1478 }
1479
1480 return rc;
1481}
1482
1483
1484/**
1485 * TEST [MMIO], reg|imm
1486 * TEST reg, [MMIO]
1487 *
1488 * Restricted implementation.
1489 *
1490 *
1491 * @returns VBox status code.
1492 *
1493 * @param pVM The virtual machine.
1494 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1495 * @param pRegFrame Trap register frame.
1496 * @param GCPhysFault The GC physical address corresponding to pvFault.
1497 * @param pCpu Disassembler CPU state.
1498 * @param pRange Pointer MMIO range.
1499 */
1500static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1501 PIOMMMIORANGE pRange)
1502{
1503 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1504
1505 unsigned cb = 0;
1506 uint64_t uData1 = 0;
1507 uint64_t uData2 = 0;
1508 int rc;
1509
1510 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1511 {
1512 /* and test, [MMIO]. */
1513 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1514 }
1515 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1516 {
1517 /* test [MMIO], reg|imm. */
1518 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1519 }
1520 else
1521 {
1522 AssertMsgFailed(("Disassember TEST problem..\n"));
1523 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1524 }
1525
1526 if (rc == VINF_SUCCESS)
1527 {
1528#if HC_ARCH_BITS == 32
1529 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1530 if (cb > 4)
1531 return VINF_IOM_R3_MMIO_READ_WRITE;
1532#endif
1533
1534 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1535 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1536 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1537 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1538 iomMMIOStatLength(pVM, cb);
1539 }
1540
1541 return rc;
1542}
1543
1544
1545/**
1546 * BT [MMIO], reg|imm
1547 *
1548 * Restricted implementation.
1549 *
1550 *
1551 * @returns VBox status code.
1552 *
1553 * @param pVM The virtual machine.
1554 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1555 * @param pRegFrame Trap register frame.
1556 * @param GCPhysFault The GC physical address corresponding to pvFault.
1557 * @param pCpu Disassembler CPU state.
1558 * @param pRange Pointer MMIO range.
1559 */
1560static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1561 PIOMMMIORANGE pRange)
1562{
1563 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1564
1565 uint64_t uBit = 0;
1566 uint64_t uData = 0;
1567 unsigned cbIgnored;
1568
1569 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1570 {
1571 AssertMsgFailed(("Disassember BT problem..\n"));
1572 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1573 }
1574 /* The size of the memory operand only matters here. */
1575 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1576
1577 /* bt [MMIO], reg|imm. */
1578 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1579 if (rc == VINF_SUCCESS)
1580 {
1581 /* Find the bit inside the faulting address */
1582 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1583 iomMMIOStatLength(pVM, cbData);
1584 }
1585
1586 return rc;
1587}
1588
1589/**
1590 * XCHG [MMIO], reg
1591 * XCHG reg, [MMIO]
1592 *
1593 * Restricted implementation.
1594 *
1595 *
1596 * @returns VBox status code.
1597 *
1598 * @param pVM The virtual machine.
1599 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1600 * @param pRegFrame Trap register frame.
1601 * @param GCPhysFault The GC physical address corresponding to pvFault.
1602 * @param pCpu Disassembler CPU state.
1603 * @param pRange Pointer MMIO range.
1604 */
1605static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1606 PIOMMMIORANGE pRange)
1607{
1608 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1609 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1610 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1611 return VINF_IOM_R3_MMIO_READ_WRITE;
1612
1613 int rc;
1614 unsigned cb = 0;
1615 uint64_t uData1 = 0;
1616 uint64_t uData2 = 0;
1617 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1618 {
1619 /* xchg reg, [MMIO]. */
1620 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1621 if (rc == VINF_SUCCESS)
1622 {
1623 /* Store result to MMIO. */
1624 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1625
1626 if (rc == VINF_SUCCESS)
1627 {
1628 /* Store result to register. */
1629 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1630 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1631 }
1632 else
1633 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1634 }
1635 else
1636 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1637 }
1638 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1639 {
1640 /* xchg [MMIO], reg. */
1641 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1642 if (rc == VINF_SUCCESS)
1643 {
1644 /* Store result to MMIO. */
1645 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1646 if (rc == VINF_SUCCESS)
1647 {
1648 /* Store result to register. */
1649 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1650 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1651 }
1652 else
1653 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1654 }
1655 else
1656 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1657 }
1658 else
1659 {
1660 AssertMsgFailed(("Disassember XCHG problem..\n"));
1661 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1662 }
1663 return rc;
1664}
1665
1666#endif /* !IEM_USE_IEM_INSTEAD */
1667
1668/**
1669 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
1670 *
1671 * @returns VBox status code (appropriate for GC return).
1672 * @param pVM Pointer to the VM.
1673 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1674 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1675 * any error code (the EPT misconfig hack).
1676 * @param pCtxCore Trap register frame.
1677 * @param GCPhysFault The GC physical address corresponding to pvFault.
1678 * @param pvUser Pointer to the MMIO ring-3 range entry.
1679 */
1680static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
1681 RTGCPHYS GCPhysFault, void *pvUser)
1682{
1683 int rc = IOM_LOCK_SHARED(pVM);
1684#ifndef IN_RING3
1685 if (rc == VERR_SEM_BUSY)
1686 return VINF_IOM_R3_MMIO_READ_WRITE;
1687#endif
1688 AssertRC(rc);
1689
1690 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1691 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1692
1693 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1694 Assert(pRange);
1695 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1696 iomMmioRetainRange(pRange);
1697#ifndef VBOX_WITH_STATISTICS
1698 IOM_UNLOCK_SHARED(pVM);
1699
1700#else
1701 /*
1702 * Locate the statistics.
1703 */
1704 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1705 if (!pStats)
1706 {
1707 iomMmioReleaseRange(pVM, pRange);
1708# ifdef IN_RING3
1709 return VERR_NO_MEMORY;
1710# else
1711 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1712 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1713 return VINF_IOM_R3_MMIO_READ_WRITE;
1714# endif
1715 }
1716#endif
1717
1718#ifndef IN_RING3
1719 /*
1720 * Should we defer the request right away? This isn't usually the case, so
1721 * do the simple test first and the try deal with uErrorCode being N/A.
1722 */
1723 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1724 || !pRange->CTX_SUFF(pfnReadCallback))
1725 && ( uErrorCode == UINT32_MAX
1726 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1727 : uErrorCode & X86_TRAP_PF_RW
1728 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1729 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1730 )
1731 )
1732 )
1733 {
1734 if (uErrorCode & X86_TRAP_PF_RW)
1735 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1736 else
1737 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1738
1739 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1740 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1741 iomMmioReleaseRange(pVM, pRange);
1742 return VINF_IOM_R3_MMIO_READ_WRITE;
1743 }
1744#endif /* !IN_RING3 */
1745
1746 /*
1747 * Retain the range and do locking.
1748 */
1749 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1750 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1751 if (rc != VINF_SUCCESS)
1752 {
1753 iomMmioReleaseRange(pVM, pRange);
1754 return rc;
1755 }
1756
1757#ifdef IEM_USE_IEM_INSTEAD
1758
1759 /*
1760 * Let IEM call us back via iomMmioHandler.
1761 */
1762 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1763
1764 NOREF(pCtxCore); NOREF(GCPhysFault);
1765 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1766 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1767 iomMmioReleaseRange(pVM, pRange);
1768 if (RT_SUCCESS(rcStrict))
1769 return rcStrict;
1770 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1771 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1772 {
1773 Log(("IOM: Hit unsupported IEM feature!\n"));
1774 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
1775 }
1776 return rcStrict;
1777
1778#else
1779
1780 /*
1781 * Disassemble the instruction and interpret it.
1782 */
1783 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1784 unsigned cbOp;
1785 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1786 if (RT_FAILURE(rc))
1787 {
1788 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1789 iomMmioReleaseRange(pVM, pRange);
1790 return rc;
1791 }
1792 switch (pDis->pCurInstr->uOpcode)
1793 {
1794 case OP_MOV:
1795 case OP_MOVZX:
1796 case OP_MOVSX:
1797 {
1798 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1799 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1800 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1801 ? uErrorCode & X86_TRAP_PF_RW
1802 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1803 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1804 else
1805 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1806 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1807 break;
1808 }
1809
1810
1811# ifdef IOM_WITH_MOVS_SUPPORT
1812 case OP_MOVSB:
1813 case OP_MOVSWD:
1814 {
1815 if (uErrorCode == UINT32_MAX)
1816 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1817 else
1818 {
1819 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1820 PSTAMPROFILE pStat = NULL;
1821 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1822 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1823 }
1824 break;
1825 }
1826# endif
1827
1828 case OP_STOSB:
1829 case OP_STOSWD:
1830 Assert(uErrorCode & X86_TRAP_PF_RW);
1831 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1832 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1833 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1834 break;
1835
1836 case OP_LODSB:
1837 case OP_LODSWD:
1838 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1839 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1840 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1841 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1842 break;
1843
1844 case OP_CMP:
1845 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1846 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1847 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1848 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1849 break;
1850
1851 case OP_AND:
1852 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1853 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1854 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1855 break;
1856
1857 case OP_OR:
1858 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1859 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1860 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1861 break;
1862
1863 case OP_XOR:
1864 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1865 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1866 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1867 break;
1868
1869 case OP_TEST:
1870 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1871 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1872 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1873 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1874 break;
1875
1876 case OP_BT:
1877 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1878 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1879 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1880 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1881 break;
1882
1883 case OP_XCHG:
1884 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1885 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1886 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1887 break;
1888
1889
1890 /*
1891 * The instruction isn't supported. Hand it on to ring-3.
1892 */
1893 default:
1894 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1895 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1896 break;
1897 }
1898
1899 /*
1900 * On success advance EIP.
1901 */
1902 if (rc == VINF_SUCCESS)
1903 pCtxCore->rip += cbOp;
1904 else
1905 {
1906 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1907# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1908 switch (rc)
1909 {
1910 case VINF_IOM_R3_MMIO_READ:
1911 case VINF_IOM_R3_MMIO_READ_WRITE:
1912 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1913 break;
1914 case VINF_IOM_R3_MMIO_WRITE:
1915 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1916 break;
1917 }
1918# endif
1919 }
1920
1921 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1922 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1923 iomMmioReleaseRange(pVM, pRange);
1924 return rc;
1925#endif /* !IEM_USE_IEM_INSTEAD */
1926}
1927
1928
1929/**
1930 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1931 * \#PF access handler callback for MMIO pages.}
1932 *
1933 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1934 */
1935DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1936 RTGCPHYS GCPhysFault, void *pvUser)
1937{
1938 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1939 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
1940 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1941}
1942
1943
1944/**
1945 * Physical access handler for MMIO ranges.
1946 *
1947 * @returns VBox status code (appropriate for GC return).
1948 * @param pVM Pointer to the VM.
1949 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1950 * @param uErrorCode CPU Error code.
1951 * @param pCtxCore Trap register frame.
1952 * @param GCPhysFault The GC physical address.
1953 */
1954VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1955{
1956 /*
1957 * We don't have a range here, so look it up before calling the common function.
1958 */
1959 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1960#ifndef IN_RING3
1961 if (rc2 == VERR_SEM_BUSY)
1962 return VINF_IOM_R3_MMIO_READ_WRITE;
1963#endif
1964 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1965 if (RT_UNLIKELY(!pRange))
1966 {
1967 IOM_UNLOCK_SHARED(pVM);
1968 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1969 }
1970 iomMmioRetainRange(pRange);
1971 IOM_UNLOCK_SHARED(pVM);
1972
1973 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1974
1975 iomMmioReleaseRange(pVM, pRange);
1976 return VBOXSTRICTRC_VAL(rcStrict);
1977}
1978
1979
1980/**
1981 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
1982 *
1983 * @remarks The @a pvUser argument points to the MMIO range entry.
1984 */
1985PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
1986 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1987{
1988 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1989 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1990
1991 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1992 AssertPtr(pRange);
1993 NOREF(pvPhys); NOREF(enmOrigin);
1994
1995 /*
1996 * Validate the range.
1997 */
1998 int rc = IOM_LOCK_SHARED(pVM);
1999#ifndef IN_RING3
2000 if (rc == VERR_SEM_BUSY)
2001 return VINF_IOM_R3_MMIO_READ_WRITE;
2002#endif
2003 AssertRC(rc);
2004 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
2005
2006 /*
2007 * Perform locking.
2008 */
2009 iomMmioRetainRange(pRange);
2010 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2011 IOM_UNLOCK_SHARED(pVM);
2012 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
2013 if (rcStrict == VINF_SUCCESS)
2014 {
2015 /*
2016 * Perform the access.
2017 */
2018 if (enmAccessType == PGMACCESSTYPE_READ)
2019 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2020 else
2021 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2022
2023 /* Check the return code. */
2024#ifdef IN_RING3
2025 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
2026#else
2027 AssertMsg( rcStrict == VINF_SUCCESS
2028 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
2029 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
2030 || rcStrict == VINF_EM_DBG_STOP
2031 || rcStrict == VINF_EM_DBG_BREAKPOINT
2032 || rcStrict == VINF_EM_OFF
2033 || rcStrict == VINF_EM_SUSPEND
2034 || rcStrict == VINF_EM_RESET
2035 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
2036 //|| rcStrict == VINF_EM_HALT /* ?? */
2037 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2038 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2039#endif
2040
2041 iomMmioReleaseRange(pVM, pRange);
2042 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2043 }
2044 else
2045 iomMmioReleaseRange(pVM, pRange);
2046 return rcStrict;
2047}
2048
2049
2050#ifdef IN_RING3 /* Only used by REM. */
2051
2052/**
2053 * Reads a MMIO register.
2054 *
2055 * @returns VBox status code.
2056 *
2057 * @param pVM Pointer to the VM.
2058 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2059 * @param GCPhys The physical address to read.
2060 * @param pu32Value Where to store the value read.
2061 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2062 */
2063VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2064{
2065 /* Take the IOM lock before performing any MMIO. */
2066 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2067#ifndef IN_RING3
2068 if (rc == VERR_SEM_BUSY)
2069 return VINF_IOM_R3_MMIO_WRITE;
2070#endif
2071 AssertRC(VBOXSTRICTRC_VAL(rc));
2072#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2073 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2074#endif
2075
2076 /*
2077 * Lookup the current context range node and statistics.
2078 */
2079 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2080 if (!pRange)
2081 {
2082 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2083 IOM_UNLOCK_SHARED(pVM);
2084 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2085 }
2086 iomMmioRetainRange(pRange);
2087#ifndef VBOX_WITH_STATISTICS
2088 IOM_UNLOCK_SHARED(pVM);
2089
2090#else /* VBOX_WITH_STATISTICS */
2091 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2092 if (!pStats)
2093 {
2094 iomMmioReleaseRange(pVM, pRange);
2095# ifdef IN_RING3
2096 return VERR_NO_MEMORY;
2097# else
2098 return VINF_IOM_R3_MMIO_READ;
2099# endif
2100 }
2101 STAM_COUNTER_INC(&pStats->Accesses);
2102#endif /* VBOX_WITH_STATISTICS */
2103
2104 if (pRange->CTX_SUFF(pfnReadCallback))
2105 {
2106 /*
2107 * Perform locking.
2108 */
2109 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2110 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2111 if (rc != VINF_SUCCESS)
2112 {
2113 iomMmioReleaseRange(pVM, pRange);
2114 return rc;
2115 }
2116
2117 /*
2118 * Perform the read and deal with the result.
2119 */
2120 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2121 if ( (cbValue == 4 && !(GCPhys & 3))
2122 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2123 || (cbValue == 8 && !(GCPhys & 7)) )
2124 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2125 pu32Value, (unsigned)cbValue);
2126 else
2127 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2128 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2129 switch (VBOXSTRICTRC_VAL(rc))
2130 {
2131 case VINF_SUCCESS:
2132 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2133 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2134 iomMmioReleaseRange(pVM, pRange);
2135 return rc;
2136#ifndef IN_RING3
2137 case VINF_IOM_R3_MMIO_READ:
2138 case VINF_IOM_R3_MMIO_READ_WRITE:
2139 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2140#endif
2141 default:
2142 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2143 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2144 iomMmioReleaseRange(pVM, pRange);
2145 return rc;
2146
2147 case VINF_IOM_MMIO_UNUSED_00:
2148 iomMMIODoRead00s(pu32Value, cbValue);
2149 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2150 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2151 iomMmioReleaseRange(pVM, pRange);
2152 return VINF_SUCCESS;
2153
2154 case VINF_IOM_MMIO_UNUSED_FF:
2155 iomMMIODoReadFFs(pu32Value, cbValue);
2156 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2157 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2158 iomMmioReleaseRange(pVM, pRange);
2159 return VINF_SUCCESS;
2160 }
2161 /* not reached */
2162 }
2163#ifndef IN_RING3
2164 if (pRange->pfnReadCallbackR3)
2165 {
2166 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2167 iomMmioReleaseRange(pVM, pRange);
2168 return VINF_IOM_R3_MMIO_READ;
2169 }
2170#endif
2171
2172 /*
2173 * Unassigned memory - this is actually not supposed t happen...
2174 */
2175 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2176 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2177 iomMMIODoReadFFs(pu32Value, cbValue);
2178 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2179 iomMmioReleaseRange(pVM, pRange);
2180 return VINF_SUCCESS;
2181}
2182
2183
2184/**
2185 * Writes to a MMIO register.
2186 *
2187 * @returns VBox status code.
2188 *
2189 * @param pVM Pointer to the VM.
2190 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2191 * @param GCPhys The physical address to write to.
2192 * @param u32Value The value to write.
2193 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2194 */
2195VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2196{
2197 /* Take the IOM lock before performing any MMIO. */
2198 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2199#ifndef IN_RING3
2200 if (rc == VERR_SEM_BUSY)
2201 return VINF_IOM_R3_MMIO_WRITE;
2202#endif
2203 AssertRC(VBOXSTRICTRC_VAL(rc));
2204#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2205 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2206#endif
2207
2208 /*
2209 * Lookup the current context range node.
2210 */
2211 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2212 if (!pRange)
2213 {
2214 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2215 IOM_UNLOCK_SHARED(pVM);
2216 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2217 }
2218 iomMmioRetainRange(pRange);
2219#ifndef VBOX_WITH_STATISTICS
2220 IOM_UNLOCK_SHARED(pVM);
2221
2222#else /* VBOX_WITH_STATISTICS */
2223 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2224 if (!pStats)
2225 {
2226 iomMmioReleaseRange(pVM, pRange);
2227# ifdef IN_RING3
2228 return VERR_NO_MEMORY;
2229# else
2230 return VINF_IOM_R3_MMIO_WRITE;
2231# endif
2232 }
2233 STAM_COUNTER_INC(&pStats->Accesses);
2234#endif /* VBOX_WITH_STATISTICS */
2235
2236 if (pRange->CTX_SUFF(pfnWriteCallback))
2237 {
2238 /*
2239 * Perform locking.
2240 */
2241 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2242 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2243 if (rc != VINF_SUCCESS)
2244 {
2245 iomMmioReleaseRange(pVM, pRange);
2246 return rc;
2247 }
2248
2249 /*
2250 * Perform the write.
2251 */
2252 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2253 if ( (cbValue == 4 && !(GCPhys & 3))
2254 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2255 || (cbValue == 8 && !(GCPhys & 7)) )
2256 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2257 GCPhys, &u32Value, (unsigned)cbValue);
2258 else
2259 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2260 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2261#ifndef IN_RING3
2262 if ( rc == VINF_IOM_R3_MMIO_WRITE
2263 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2264 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2265#endif
2266 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2267 iomMmioReleaseRange(pVM, pRange);
2268 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2269 return rc;
2270 }
2271#ifndef IN_RING3
2272 if (pRange->pfnWriteCallbackR3)
2273 {
2274 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2275 iomMmioReleaseRange(pVM, pRange);
2276 return VINF_IOM_R3_MMIO_WRITE;
2277 }
2278#endif
2279
2280 /*
2281 * No write handler, nothing to do.
2282 */
2283 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2284 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2285 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2286 iomMmioReleaseRange(pVM, pRange);
2287 return VINF_SUCCESS;
2288}
2289
2290#endif /* IN_RING3 - only used by REM. */
2291#ifndef IEM_USE_IEM_INSTEAD
2292
2293/**
2294 * [REP*] INSB/INSW/INSD
2295 * ES:EDI,DX[,ECX]
2296 *
2297 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2298 *
2299 * @returns Strict VBox status code. Informational status codes other than the one documented
2300 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2301 * @retval VINF_SUCCESS Success.
2302 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2303 * status code must be passed on to EM.
2304 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2305 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2306 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2307 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2308 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2309 *
2310 * @param pVM The virtual machine.
2311 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2312 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2313 * @param uPort IO Port
2314 * @param uPrefix IO instruction prefix
2315 * @param enmAddrMode The address mode.
2316 * @param cbTransfer Size of transfer unit
2317 */
2318VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2319 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2320{
2321 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2322
2323 /*
2324 * We do not support REPNE or decrementing destination
2325 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2326 */
2327 if ( (uPrefix & DISPREFIX_REPNE)
2328 || pRegFrame->eflags.Bits.u1DF)
2329 return VINF_EM_RAW_EMULATE_INSTR;
2330
2331 /*
2332 * Get bytes/words/dwords count to transfer.
2333 */
2334 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2335 RTGCUINTREG cTransfers = 1;
2336 if (uPrefix & DISPREFIX_REP)
2337 {
2338#ifndef IN_RC
2339 if ( CPUMIsGuestIn64BitCode(pVCpu)
2340 && pRegFrame->rcx >= _4G)
2341 return VINF_EM_RAW_EMULATE_INSTR;
2342#endif
2343 cTransfers = pRegFrame->rcx & fAddrMask;
2344 if (!cTransfers)
2345 return VINF_SUCCESS;
2346 }
2347
2348 /* Convert destination address es:edi. */
2349 RTGCPTR GCPtrDst;
2350 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2351 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2352 &GCPtrDst);
2353 if (RT_FAILURE(rc2))
2354 {
2355 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2356 return VINF_EM_RAW_EMULATE_INSTR;
2357 }
2358
2359 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2360 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2361 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2362 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2363 if (rc2 != VINF_SUCCESS)
2364 {
2365 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2366 return VINF_EM_RAW_EMULATE_INSTR;
2367 }
2368
2369 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2370 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2371 if (cTransfers > 1)
2372 {
2373 /*
2374 * Work the string page by page, letting the device handle as much
2375 * as it likes via the string I/O interface.
2376 */
2377 for (;;)
2378 {
2379 PGMPAGEMAPLOCK Lock;
2380 void *pvDst;
2381 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2382 if (RT_SUCCESS(rc2))
2383 {
2384 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2385 if (cMaxThisTime > cTransfers)
2386 cMaxThisTime = cTransfers;
2387 if (!cMaxThisTime)
2388 break;
2389 uint32_t cThisTime = cMaxThisTime;
2390
2391 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2392 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2393 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2394
2395 uint32_t const cActual = cMaxThisTime - cThisTime;
2396 if (cActual)
2397 { /* Must dirty the page. */
2398 uint8_t b = *(uint8_t *)pvDst;
2399 iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &b, 1);
2400 }
2401
2402 PGMPhysReleasePageMappingLock(pVM, &Lock);
2403
2404 uint32_t const cbActual = cActual * cbTransfer;
2405 cTransfers -= cActual;
2406 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2407 | (pRegFrame->rdi & ~fAddrMask);
2408 GCPtrDst += cbActual;
2409
2410 if ( cThisTime
2411 || !cTransfers
2412 || rcStrict != VINF_SUCCESS
2413 || (GCPtrDst & PAGE_OFFSET_MASK))
2414 break;
2415 }
2416 else
2417 {
2418 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2419 break;
2420 }
2421 }
2422 }
2423
2424 /*
2425 * Single transfer / unmapped memory fallback.
2426 */
2427#ifdef IN_RC
2428 MMGCRamRegisterTrapHandler(pVM);
2429#endif
2430 while (cTransfers && rcStrict == VINF_SUCCESS)
2431 {
2432 uint32_t u32Value;
2433 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2434 if (!IOM_SUCCESS(rcStrict))
2435 break;
2436 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2437 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2438 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2439 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2440 | (pRegFrame->rdi & ~fAddrMask);
2441 cTransfers--;
2442 }
2443#ifdef IN_RC
2444 MMGCRamDeregisterTrapHandler(pVM);
2445#endif
2446
2447 /* Update rcx on exit. */
2448 if (uPrefix & DISPREFIX_REP)
2449 pRegFrame->rcx = (cTransfers & fAddrMask)
2450 | (pRegFrame->rcx & ~fAddrMask);
2451
2452 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2453 return rcStrict;
2454}
2455
2456
2457/**
2458 * [REP*] OUTSB/OUTSW/OUTSD
2459 * DS:ESI,DX[,ECX]
2460 *
2461 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2462 *
2463 * @returns Strict VBox status code. Informational status codes other than the one documented
2464 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2465 * @retval VINF_SUCCESS Success.
2466 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2467 * status code must be passed on to EM.
2468 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2469 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2470 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2471 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2472 *
2473 * @param pVM The virtual machine.
2474 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2475 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2476 * @param uPort IO Port
2477 * @param uPrefix IO instruction prefix
2478 * @param enmAddrMode The address mode.
2479 * @param cbTransfer Size of transfer unit
2480 *
2481 * @remarks This API will probably be relaced by IEM before long, so no use in
2482 * optimizing+fixing stuff too much here.
2483 */
2484VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2485 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2486{
2487 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2488
2489 /*
2490 * We do not support segment prefixes, REPNE or
2491 * decrementing source pointer.
2492 */
2493 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2494 || pRegFrame->eflags.Bits.u1DF)
2495 return VINF_EM_RAW_EMULATE_INSTR;
2496
2497 /*
2498 * Get bytes/words/dwords count to transfer.
2499 */
2500 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2501 RTGCUINTREG cTransfers = 1;
2502 if (uPrefix & DISPREFIX_REP)
2503 {
2504#ifndef IN_RC
2505 if ( CPUMIsGuestIn64BitCode(pVCpu)
2506 && pRegFrame->rcx >= _4G)
2507 return VINF_EM_RAW_EMULATE_INSTR;
2508#endif
2509 cTransfers = pRegFrame->rcx & fAddrMask;
2510 if (!cTransfers)
2511 return VINF_SUCCESS;
2512 }
2513
2514 /* Convert source address ds:esi. */
2515 RTGCPTR GCPtrSrc;
2516 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2517 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2518 &GCPtrSrc);
2519 if (RT_FAILURE(rc2))
2520 {
2521 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2522 return VINF_EM_RAW_EMULATE_INSTR;
2523 }
2524
2525 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2526 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2527 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2528 (cpl == 3) ? X86_PTE_US : 0);
2529 if (rc2 != VINF_SUCCESS)
2530 {
2531 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2532 return VINF_EM_RAW_EMULATE_INSTR;
2533 }
2534
2535 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2536 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2537 if (cTransfers > 1)
2538 {
2539 /*
2540 * Work the string page by page, letting the device handle as much
2541 * as it likes via the string I/O interface.
2542 */
2543 for (;;)
2544 {
2545 PGMPAGEMAPLOCK Lock;
2546 void const *pvSrc;
2547 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2548 if (RT_SUCCESS(rc2))
2549 {
2550 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2551 if (cMaxThisTime > cTransfers)
2552 cMaxThisTime = cTransfers;
2553 if (!cMaxThisTime)
2554 break;
2555 uint32_t cThisTime = cMaxThisTime;
2556
2557 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2558 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2559 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2560
2561 PGMPhysReleasePageMappingLock(pVM, &Lock);
2562
2563 uint32_t const cActual = cMaxThisTime - cThisTime;
2564 uint32_t const cbActual = cActual * cbTransfer;
2565 cTransfers -= cActual;
2566 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2567 | (pRegFrame->rsi & ~fAddrMask);
2568 GCPtrSrc += cbActual;
2569
2570 if ( cThisTime
2571 || !cTransfers
2572 || rcStrict != VINF_SUCCESS
2573 || (GCPtrSrc & PAGE_OFFSET_MASK))
2574 break;
2575 }
2576 else
2577 {
2578 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2579 break;
2580 }
2581 }
2582 }
2583
2584 /*
2585 * Single transfer / unmapped memory fallback.
2586 */
2587#ifdef IN_RC
2588 MMGCRamRegisterTrapHandler(pVM);
2589#endif
2590
2591 while (cTransfers && rcStrict == VINF_SUCCESS)
2592 {
2593 uint32_t u32Value = 0;
2594 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2595 if (rcStrict != VINF_SUCCESS)
2596 break;
2597 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2598 if (!IOM_SUCCESS(rcStrict))
2599 break;
2600 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2601 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2602 | (pRegFrame->rsi & ~fAddrMask);
2603 cTransfers--;
2604 }
2605
2606#ifdef IN_RC
2607 MMGCRamDeregisterTrapHandler(pVM);
2608#endif
2609
2610 /* Update rcx on exit. */
2611 if (uPrefix & DISPREFIX_REP)
2612 pRegFrame->rcx = (cTransfers & fAddrMask)
2613 | (pRegFrame->rcx & ~fAddrMask);
2614
2615 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2616 return rcStrict;
2617}
2618
2619#endif /* !IEM_USE_IEM_INSTEAD */
2620
2621
2622#ifndef IN_RC
2623
2624/**
2625 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2626 *
2627 * (This is a special optimization used by the VGA device.)
2628 *
2629 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2630 * remapping is made,.
2631 *
2632 * @param pVM The virtual machine.
2633 * @param GCPhys The address of the MMIO page to be changed.
2634 * @param GCPhysRemapped The address of the MMIO2 page.
2635 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2636 * for the time being.
2637 */
2638VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2639{
2640# ifndef IEM_VERIFICATION_MODE_FULL
2641 /* Currently only called from the VGA device during MMIO. */
2642 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2643 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2644 PVMCPU pVCpu = VMMGetCpu(pVM);
2645
2646 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2647 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2648 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2649 && !HMIsNestedPagingActive(pVM)))
2650 return VINF_SUCCESS; /* ignore */
2651
2652 int rc = IOM_LOCK_SHARED(pVM);
2653 if (RT_FAILURE(rc))
2654 return VINF_SUCCESS; /* better luck the next time around */
2655
2656 /*
2657 * Lookup the context range node the page belongs to.
2658 */
2659 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2660 AssertMsgReturn(pRange,
2661 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2662
2663 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2664 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2665
2666 /*
2667 * Do the aliasing; page align the addresses since PGM is picky.
2668 */
2669 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2670 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2671
2672 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2673
2674 IOM_UNLOCK_SHARED(pVM);
2675 AssertRCReturn(rc, rc);
2676
2677 /*
2678 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2679 * can simply prefetch it.
2680 *
2681 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2682 */
2683# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2684# ifdef VBOX_STRICT
2685 uint64_t fFlags;
2686 RTHCPHYS HCPhys;
2687 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2688 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2689# endif
2690# endif
2691 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2692 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2693# endif /* !IEM_VERIFICATION_MODE_FULL */
2694 return VINF_SUCCESS;
2695}
2696
2697
2698# ifndef IEM_VERIFICATION_MODE_FULL
2699/**
2700 * Mapping a HC page in place of an MMIO page for direct access.
2701 *
2702 * (This is a special optimization used by the APIC in the VT-x case.)
2703 *
2704 * @returns VBox status code.
2705 *
2706 * @param pVM Pointer to the VM.
2707 * @param pVCpu Pointer to the VMCPU.
2708 * @param GCPhys The address of the MMIO page to be changed.
2709 * @param HCPhys The address of the host physical page.
2710 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2711 * for the time being.
2712 */
2713VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2714{
2715 /* Currently only called from VT-x code during a page fault. */
2716 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2717
2718 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2719 Assert(HMIsEnabled(pVM));
2720
2721 /*
2722 * Lookup the context range node the page belongs to.
2723 */
2724# ifdef VBOX_STRICT
2725 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2726 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2727 AssertMsgReturn(pRange,
2728 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2729 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2730 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2731# endif
2732
2733 /*
2734 * Do the aliasing; page align the addresses since PGM is picky.
2735 */
2736 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2737 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2738
2739 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2740 AssertRCReturn(rc, rc);
2741
2742 /*
2743 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2744 * can simply prefetch it.
2745 *
2746 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2747 */
2748 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2749 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2750 return VINF_SUCCESS;
2751}
2752# endif /* !IEM_VERIFICATION_MODE_FULL */
2753
2754
2755/**
2756 * Reset a previously modified MMIO region; restore the access flags.
2757 *
2758 * @returns VBox status code.
2759 *
2760 * @param pVM The virtual machine.
2761 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2762 */
2763VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2764{
2765 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2766
2767 PVMCPU pVCpu = VMMGetCpu(pVM);
2768
2769 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2770 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2771 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2772 && !HMIsNestedPagingActive(pVM)))
2773 return VINF_SUCCESS; /* ignore */
2774
2775 /*
2776 * Lookup the context range node the page belongs to.
2777 */
2778# ifdef VBOX_STRICT
2779 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2780 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2781 AssertMsgReturn(pRange,
2782 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2783 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2784 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2785# endif
2786
2787 /*
2788 * Call PGM to do the job work.
2789 *
2790 * After the call, all the pages should be non-present... unless there is
2791 * a page pool flush pending (unlikely).
2792 */
2793 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2794 AssertRC(rc);
2795
2796# ifdef VBOX_STRICT
2797 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2798 {
2799 uint32_t cb = pRange->cb;
2800 GCPhys = pRange->GCPhys;
2801 while (cb)
2802 {
2803 uint64_t fFlags;
2804 RTHCPHYS HCPhys;
2805 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2806 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2807 cb -= PAGE_SIZE;
2808 GCPhys += PAGE_SIZE;
2809 }
2810 }
2811# endif
2812 return rc;
2813}
2814
2815#endif /* !IN_RC */
2816
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette