VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 44528

Last change on this file since 44528 was 44528, checked in by vboxsync, 12 years ago

header (C) fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 88.0 KB
Line 
1/* $Id: IOMAllMMIO.cpp 44528 2013-02-04 14:27:54Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0U, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0U, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0U, /* 5 - invalid */
66 ~0U, /* 6 - invalid */
67 ~0U, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Deals with complicated MMIO writes.
79 *
80 * Complicatd means unaligned or non-dword/qword align accesses depending on
81 * the MMIO region's access mode flags.
82 *
83 * @returns Strict VBox status code. Any EM scheduling status code,
84 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
85 * VINF_IOM_R3_MMIO_READ may be returned.
86 *
87 * @param pVM Pointer to the VM.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) >= IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING;
101
102 /*
103 * Do debug stop if requested.
104 */
105 int rc = VINF_SUCCESS; NOREF(pVM);
106#ifdef VBOX_STRICT
107 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
108 {
109# ifdef IN_RING3
110 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
111 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
112 if (rc == VERR_DBGF_NOT_ATTACHED)
113 rc = VINF_SUCCESS;
114# else
115 return VINF_IOM_R3_MMIO_WRITE;
116# endif
117 }
118#endif
119
120
121 /*
122 * Split and conquer.
123 */
124 for (;;)
125 {
126 unsigned const offAccess = GCPhys & 3;
127 unsigned cbThisPart = 4 - offAccess;
128 if (cbThisPart > cbValue)
129 cbThisPart = cbValue;
130
131 /*
132 * Get the missing bits (if any).
133 */
134 uint32_t u32MissingValue = 0;
135 if (fReadMissing && cbThisPart != 4)
136 {
137 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
138 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
139 switch (rc2)
140 {
141 case VINF_SUCCESS:
142 break;
143 case VINF_IOM_MMIO_UNUSED_FF:
144 u32MissingValue = UINT32_C(0xffffffff);
145 break;
146 case VINF_IOM_MMIO_UNUSED_00:
147 u32MissingValue = 0;
148 break;
149 case VINF_IOM_R3_MMIO_READ:
150 case VINF_IOM_R3_MMIO_READ_WRITE:
151 case VINF_IOM_R3_MMIO_WRITE:
152 /** @todo What if we've split a transfer and already read
153 * something? Since reads can have sideeffects we could be
154 * kind of screwed here... */
155 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
156 return rc2;
157 default:
158 if (RT_FAILURE(rc2))
159 {
160 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
161 return rc2;
162 }
163 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
164 if (rc == VINF_SUCCESS || rc2 < rc)
165 rc = rc2;
166 break;
167 }
168 }
169
170 /*
171 * Merge missing and given bits.
172 */
173 uint32_t u32GivenMask;
174 uint32_t u32GivenValue;
175 switch (cbThisPart)
176 {
177 case 1:
178 u32GivenValue = *(uint8_t const *)pvValue;
179 u32GivenMask = UINT32_C(0x000000ff);
180 break;
181 case 2:
182 u32GivenValue = *(uint16_t const *)pvValue;
183 u32GivenMask = UINT32_C(0x0000ffff);
184 break;
185 case 3:
186 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
187 ((uint8_t const *)pvValue)[2], 0);
188 u32GivenMask = UINT32_C(0x00ffffff);
189 break;
190 case 4:
191 u32GivenValue = *(uint32_t const *)pvValue;
192 u32GivenMask = UINT32_C(0xffffffff);
193 break;
194 default:
195 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
196 }
197 if (offAccess)
198 {
199 u32GivenValue <<= offAccess * 8;
200 u32GivenMask <<= offAccess * 8;
201 }
202
203 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
204 | (u32GivenValue & u32GivenMask);
205
206 /*
207 * Do DWORD write to the device.
208 */
209 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
210 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
211 switch (rc2)
212 {
213 case VINF_SUCCESS:
214 break;
215 case VINF_IOM_R3_MMIO_READ:
216 case VINF_IOM_R3_MMIO_READ_WRITE:
217 case VINF_IOM_R3_MMIO_WRITE:
218 /** @todo What if we've split a transfer and already read
219 * something? Since reads can have sideeffects we could be
220 * kind of screwed here... */
221 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
222 return rc2;
223 default:
224 if (RT_FAILURE(rc2))
225 {
226 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
227 return rc2;
228 }
229 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
230 if (rc == VINF_SUCCESS || rc2 < rc)
231 rc = rc2;
232 break;
233 }
234
235 /*
236 * Advance.
237 */
238 cbValue -= cbThisPart;
239 if (!cbValue)
240 break;
241 GCPhys += cbThisPart;
242 pvValue = (uint8_t const *)pvValue + cbThisPart;
243 }
244
245 return rc;
246}
247
248
249
250
251/**
252 * Wrapper which does the write and updates range statistics when such are enabled.
253 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
254 */
255static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
256{
257#ifdef VBOX_WITH_STATISTICS
258 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
259 Assert(pStats);
260#endif
261
262 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
263 VBOXSTRICTRC rc;
264 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
265 {
266 if ( (cb == 4 && !(GCPhysFault & 3))
267 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
268 || (cb == 8 && !(GCPhysFault & 7)) )
269 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
270 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
271 else
272 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
273 }
274 else
275 rc = VINF_SUCCESS;
276 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
277 STAM_COUNTER_INC(&pStats->Accesses);
278 return VBOXSTRICTRC_TODO(rc);
279}
280
281
282/**
283 * Deals with complicated MMIO reads.
284 *
285 * Complicatd means unaligned or non-dword/qword align accesses depending on
286 * the MMIO region's access mode flags.
287 *
288 * @returns Strict VBox status code. Any EM scheduling status code,
289 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
290 * VINF_IOM_R3_MMIO_WRITE may be returned.
291 *
292 * @param pVM Pointer to the VM.
293 * @param pRange The range to read from.
294 * @param GCPhys The physical address to start reading.
295 * @param pvValue Where to store the value.
296 * @param cbValue The size of the value to read.
297 */
298static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
299{
300 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
301 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
302 VERR_IOM_MMIO_IPE_1);
303 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
304 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
305
306 /*
307 * Do debug stop if requested.
308 */
309 int rc = VINF_SUCCESS; NOREF(pVM);
310#ifdef VBOX_STRICT
311 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
312 {
313# ifdef IN_RING3
314 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
315 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
316 if (rc == VERR_DBGF_NOT_ATTACHED)
317 rc = VINF_SUCCESS;
318# else
319 return VINF_IOM_R3_MMIO_READ;
320# endif
321 }
322#endif
323
324 /*
325 * Split and conquer.
326 */
327 for (;;)
328 {
329 /*
330 * Do DWORD read from the device.
331 */
332 uint32_t u32Value;
333 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
334 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
335 switch (rc2)
336 {
337 case VINF_SUCCESS:
338 break;
339 case VINF_IOM_MMIO_UNUSED_FF:
340 u32Value = UINT32_C(0xffffffff);
341 break;
342 case VINF_IOM_MMIO_UNUSED_00:
343 u32Value = 0;
344 break;
345 case VINF_IOM_R3_MMIO_READ:
346 case VINF_IOM_R3_MMIO_READ_WRITE:
347 case VINF_IOM_R3_MMIO_WRITE:
348 /** @todo What if we've split a transfer and already read
349 * something? Since reads can have sideeffects we could be
350 * kind of screwed here... */
351 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
352 return rc2;
353 default:
354 if (RT_FAILURE(rc2))
355 {
356 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
357 return rc2;
358 }
359 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
360 if (rc == VINF_SUCCESS || rc2 < rc)
361 rc = rc2;
362 break;
363 }
364 u32Value >>= (GCPhys & 3) * 8;
365
366 /*
367 * Write what we've read.
368 */
369 unsigned cbThisPart = 4 - (GCPhys & 3);
370 if (cbThisPart > cbValue)
371 cbThisPart = cbValue;
372
373 switch (cbThisPart)
374 {
375 case 1:
376 *(uint8_t *)pvValue = (uint8_t)u32Value;
377 break;
378 case 2:
379 *(uint16_t *)pvValue = (uint16_t)u32Value;
380 break;
381 case 3:
382 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
383 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
384 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
385 break;
386 case 4:
387 *(uint32_t *)pvValue = u32Value;
388 break;
389 }
390
391 /*
392 * Advance.
393 */
394 cbValue -= cbThisPart;
395 if (!cbValue)
396 break;
397 GCPhys += cbThisPart;
398 pvValue = (uint8_t *)pvValue + cbThisPart;
399 }
400
401 return rc;
402}
403
404
405/**
406 * Implements VINF_IOM_MMIO_UNUSED_FF.
407 *
408 * @returns VINF_SUCCESS.
409 * @param pvValue Where to store the zeros.
410 * @param cbValue How many bytes to read.
411 */
412static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
413{
414 switch (cbValue)
415 {
416 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
417 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
418 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
419 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
420 default:
421 {
422 uint8_t *pb = (uint8_t *)pvValue;
423 while (cbValue--)
424 *pb++ = UINT8_C(0xff);
425 break;
426 }
427 }
428 return VINF_SUCCESS;
429}
430
431
432/**
433 * Implements VINF_IOM_MMIO_UNUSED_00.
434 *
435 * @returns VINF_SUCCESS.
436 * @param pvValue Where to store the zeros.
437 * @param cbValue How many bytes to read.
438 */
439static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
440{
441 switch (cbValue)
442 {
443 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
444 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
445 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
446 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
447 default:
448 {
449 uint8_t *pb = (uint8_t *)pvValue;
450 while (cbValue--)
451 *pb++ = UINT8_C(0x00);
452 break;
453 }
454 }
455 return VINF_SUCCESS;
456}
457
458
459/**
460 * Wrapper which does the read and updates range statistics when such are enabled.
461 */
462DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
463{
464#ifdef VBOX_WITH_STATISTICS
465 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
466 Assert(pStats);
467 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
468#endif
469
470 VBOXSTRICTRC rc;
471 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
472 {
473 if ( (cbValue == 4 && !(GCPhys & 3))
474 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
475 || (cbValue == 8 && !(GCPhys & 7)) )
476 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
477 else
478 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
479 }
480 else
481 rc = VINF_IOM_MMIO_UNUSED_FF;
482 if (rc != VINF_SUCCESS)
483 {
484 switch (VBOXSTRICTRC_VAL(rc))
485 {
486 case VINF_IOM_MMIO_UNUSED_FF: rc = iomMMIODoReadFFs(pvValue, cbValue); break;
487 case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break;
488 }
489 }
490 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
491 STAM_COUNTER_INC(&pStats->Accesses);
492 return VBOXSTRICTRC_VAL(rc);
493}
494
495
496/**
497 * Internal - statistics only.
498 */
499DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
500{
501#ifdef VBOX_WITH_STATISTICS
502 switch (cb)
503 {
504 case 1:
505 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
506 break;
507 case 2:
508 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
509 break;
510 case 4:
511 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
512 break;
513 case 8:
514 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
515 break;
516 default:
517 /* No way. */
518 AssertMsgFailed(("Invalid data length %d\n", cb));
519 break;
520 }
521#else
522 NOREF(pVM); NOREF(cb);
523#endif
524}
525
526
527/**
528 * MOV reg, mem (read)
529 * MOVZX reg, mem (read)
530 * MOVSX reg, mem (read)
531 *
532 * @returns VBox status code.
533 *
534 * @param pVM The virtual machine.
535 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
536 * @param pCpu Disassembler CPU state.
537 * @param pRange Pointer MMIO range.
538 * @param GCPhysFault The GC physical address corresponding to pvFault.
539 */
540static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
541{
542 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
543
544 /*
545 * Get the data size from parameter 2,
546 * and call the handler function to get the data.
547 */
548 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
549 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
550
551 uint64_t u64Data = 0;
552 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
553 if (rc == VINF_SUCCESS)
554 {
555 /*
556 * Do sign extension for MOVSX.
557 */
558 /** @todo checkup MOVSX implementation! */
559 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
560 {
561 if (cb == 1)
562 {
563 /* DWORD <- BYTE */
564 int64_t iData = (int8_t)u64Data;
565 u64Data = (uint64_t)iData;
566 }
567 else
568 {
569 /* DWORD <- WORD */
570 int64_t iData = (int16_t)u64Data;
571 u64Data = (uint64_t)iData;
572 }
573 }
574
575 /*
576 * Store the result to register (parameter 1).
577 */
578 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
579 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
580 }
581
582 if (rc == VINF_SUCCESS)
583 iomMMIOStatLength(pVM, cb);
584 return rc;
585}
586
587
588/**
589 * MOV mem, reg|imm (write)
590 *
591 * @returns VBox status code.
592 *
593 * @param pVM The virtual machine.
594 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
595 * @param pCpu Disassembler CPU state.
596 * @param pRange Pointer MMIO range.
597 * @param GCPhysFault The GC physical address corresponding to pvFault.
598 */
599static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
600{
601 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
602
603 /*
604 * Get data to write from second parameter,
605 * and call the callback to write it.
606 */
607 unsigned cb = 0;
608 uint64_t u64Data = 0;
609 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
610 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
611
612 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
613 if (rc == VINF_SUCCESS)
614 iomMMIOStatLength(pVM, cb);
615 return rc;
616}
617
618
619/** Wrapper for reading virtual memory. */
620DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
621{
622 /* Note: This will fail in R0 or RC if it hits an access handler. That
623 isn't a problem though since the operation can be restarted in REM. */
624#ifdef IN_RC
625 NOREF(pVCpu);
626 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
627 /* Page may be protected and not directly accessible. */
628 if (rc == VERR_ACCESS_DENIED)
629 rc = VINF_IOM_R3_IOPORT_WRITE;
630 return rc;
631#else
632 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
633#endif
634}
635
636
637/** Wrapper for writing virtual memory. */
638DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
639{
640 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
641 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
642 * as well since we're not behind the pgm lock and handler may change between calls.
643 *
644 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
645 * the state of some shadowed structures. */
646#if defined(IN_RING0) || defined(IN_RC)
647 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
648#else
649 NOREF(pCtxCore);
650 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
651#endif
652}
653
654
655#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
656/**
657 * [REP] MOVSB
658 * [REP] MOVSW
659 * [REP] MOVSD
660 *
661 * Restricted implementation.
662 *
663 *
664 * @returns VBox status code.
665 *
666 * @param pVM The virtual machine.
667 * @param uErrorCode CPU Error code.
668 * @param pRegFrame Trap register frame.
669 * @param GCPhysFault The GC physical address corresponding to pvFault.
670 * @param pCpu Disassembler CPU state.
671 * @param pRange Pointer MMIO range.
672 * @param ppStat Which sub-sample to attribute this call to.
673 */
674static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
675 PSTAMPROFILE *ppStat)
676{
677 /*
678 * We do not support segment prefixes or REPNE.
679 */
680 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
681 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
682
683 PVMCPU pVCpu = VMMGetCpu(pVM);
684
685 /*
686 * Get bytes/words/dwords/qword count to copy.
687 */
688 uint32_t cTransfers = 1;
689 if (pCpu->fPrefix & DISPREFIX_REP)
690 {
691#ifndef IN_RC
692 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
693 && pRegFrame->rcx >= _4G)
694 return VINF_EM_RAW_EMULATE_INSTR;
695#endif
696
697 cTransfers = pRegFrame->ecx;
698 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
699 cTransfers &= 0xffff;
700
701 if (!cTransfers)
702 return VINF_SUCCESS;
703 }
704
705 /* Get the current privilege level. */
706 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
707
708 /*
709 * Get data size.
710 */
711 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
712 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
713 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
714
715#ifdef VBOX_WITH_STATISTICS
716 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
717 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
718#endif
719
720/** @todo re-evaluate on page boundaries. */
721
722 RTGCPHYS Phys = GCPhysFault;
723 int rc;
724 if (fWriteAccess)
725 {
726 /*
727 * Write operation: [Mem] -> [MMIO]
728 * ds:esi (Virt Src) -> es:edi (Phys Dst)
729 */
730 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
731
732 /* Check callback. */
733 if (!pRange->CTX_SUFF(pfnWriteCallback))
734 return VINF_IOM_R3_MMIO_WRITE;
735
736 /* Convert source address ds:esi. */
737 RTGCUINTPTR pu8Virt;
738 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
739 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
740 (PRTGCPTR)&pu8Virt);
741 if (RT_SUCCESS(rc))
742 {
743
744 /* Access verification first; we currently can't recover properly from traps inside this instruction */
745 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
746 if (rc != VINF_SUCCESS)
747 {
748 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
749 return VINF_EM_RAW_EMULATE_INSTR;
750 }
751
752#ifdef IN_RC
753 MMGCRamRegisterTrapHandler(pVM);
754#endif
755
756 /* copy loop. */
757 while (cTransfers)
758 {
759 uint32_t u32Data = 0;
760 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
761 if (rc != VINF_SUCCESS)
762 break;
763 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
764 if (rc != VINF_SUCCESS)
765 break;
766
767 pu8Virt += offIncrement;
768 Phys += offIncrement;
769 pRegFrame->rsi += offIncrement;
770 pRegFrame->rdi += offIncrement;
771 cTransfers--;
772 }
773#ifdef IN_RC
774 MMGCRamDeregisterTrapHandler(pVM);
775#endif
776 /* Update ecx. */
777 if (pCpu->fPrefix & DISPREFIX_REP)
778 pRegFrame->ecx = cTransfers;
779 }
780 else
781 rc = VINF_IOM_R3_MMIO_READ_WRITE;
782 }
783 else
784 {
785 /*
786 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
787 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
788 */
789 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
790
791 /* Check callback. */
792 if (!pRange->CTX_SUFF(pfnReadCallback))
793 return VINF_IOM_R3_MMIO_READ;
794
795 /* Convert destination address. */
796 RTGCUINTPTR pu8Virt;
797 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
798 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
799 (RTGCPTR *)&pu8Virt);
800 if (RT_FAILURE(rc))
801 return VINF_IOM_R3_MMIO_READ;
802
803 /* Check if destination address is MMIO. */
804 PIOMMMIORANGE pMMIODst;
805 RTGCPHYS PhysDst;
806 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
807 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
808 if ( RT_SUCCESS(rc)
809 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
810 {
811 /** @todo implement per-device locks for MMIO access. */
812 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
813
814 /*
815 * Extra: [MMIO] -> [MMIO]
816 */
817 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
818 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
819 {
820 iomMmioReleaseRange(pVM, pRange);
821 return VINF_IOM_R3_MMIO_READ_WRITE;
822 }
823
824 /* copy loop. */
825 while (cTransfers)
826 {
827 uint32_t u32Data;
828 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
829 if (rc != VINF_SUCCESS)
830 break;
831 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
832 if (rc != VINF_SUCCESS)
833 break;
834
835 Phys += offIncrement;
836 PhysDst += offIncrement;
837 pRegFrame->rsi += offIncrement;
838 pRegFrame->rdi += offIncrement;
839 cTransfers--;
840 }
841 iomMmioReleaseRange(pVM, pRange);
842 }
843 else
844 {
845 /*
846 * Normal: [MMIO] -> [Mem]
847 */
848 /* Access verification first; we currently can't recover properly from traps inside this instruction */
849 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
850 if (rc != VINF_SUCCESS)
851 {
852 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
853 return VINF_EM_RAW_EMULATE_INSTR;
854 }
855
856 /* copy loop. */
857#ifdef IN_RC
858 MMGCRamRegisterTrapHandler(pVM);
859#endif
860 while (cTransfers)
861 {
862 uint32_t u32Data;
863 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
864 if (rc != VINF_SUCCESS)
865 break;
866 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
867 if (rc != VINF_SUCCESS)
868 {
869 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
870 break;
871 }
872
873 pu8Virt += offIncrement;
874 Phys += offIncrement;
875 pRegFrame->rsi += offIncrement;
876 pRegFrame->rdi += offIncrement;
877 cTransfers--;
878 }
879#ifdef IN_RC
880 MMGCRamDeregisterTrapHandler(pVM);
881#endif
882 }
883
884 /* Update ecx on exit. */
885 if (pCpu->fPrefix & DISPREFIX_REP)
886 pRegFrame->ecx = cTransfers;
887 }
888
889 /* work statistics. */
890 if (rc == VINF_SUCCESS)
891 iomMMIOStatLength(pVM, cb);
892 NOREF(ppStat);
893 return rc;
894}
895#endif /* IOM_WITH_MOVS_SUPPORT */
896
897
898/**
899 * Gets the address / opcode mask corresponding to the given CPU mode.
900 *
901 * @returns Mask.
902 * @param enmCpuMode CPU mode.
903 */
904static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
905{
906 switch (enmCpuMode)
907 {
908 case DISCPUMODE_16BIT: return UINT16_MAX;
909 case DISCPUMODE_32BIT: return UINT32_MAX;
910 case DISCPUMODE_64BIT: return UINT64_MAX;
911 default:
912 AssertFailedReturn(UINT32_MAX);
913 }
914}
915
916
917/**
918 * [REP] STOSB
919 * [REP] STOSW
920 * [REP] STOSD
921 *
922 * Restricted implementation.
923 *
924 *
925 * @returns VBox status code.
926 *
927 * @param pVM The virtual machine.
928 * @param pRegFrame Trap register frame.
929 * @param GCPhysFault The GC physical address corresponding to pvFault.
930 * @param pCpu Disassembler CPU state.
931 * @param pRange Pointer MMIO range.
932 */
933static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
934{
935 /*
936 * We do not support segment prefixes or REPNE..
937 */
938 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
939 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
940
941 /*
942 * Get bytes/words/dwords/qwords count to copy.
943 */
944 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
945 RTGCUINTREG cTransfers = 1;
946 if (pCpu->fPrefix & DISPREFIX_REP)
947 {
948#ifndef IN_RC
949 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM))
950 && pRegFrame->rcx >= _4G)
951 return VINF_EM_RAW_EMULATE_INSTR;
952#endif
953
954 cTransfers = pRegFrame->rcx & fAddrMask;
955 if (!cTransfers)
956 return VINF_SUCCESS;
957 }
958
959/** @todo r=bird: bounds checks! */
960
961 /*
962 * Get data size.
963 */
964 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
965 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
966 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
967
968#ifdef VBOX_WITH_STATISTICS
969 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
970 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
971#endif
972
973
974 RTGCPHYS Phys = GCPhysFault;
975 int rc;
976 if ( pRange->CTX_SUFF(pfnFillCallback)
977 && cb <= 4 /* can only fill 32-bit values */)
978 {
979 /*
980 * Use the fill callback.
981 */
982 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
983 if (offIncrement > 0)
984 {
985 /* addr++ variant. */
986 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
987 pRegFrame->eax, cb, cTransfers);
988 if (rc == VINF_SUCCESS)
989 {
990 /* Update registers. */
991 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
992 | (pRegFrame->rdi & ~fAddrMask);
993 if (pCpu->fPrefix & DISPREFIX_REP)
994 pRegFrame->rcx &= ~fAddrMask;
995 }
996 }
997 else
998 {
999 /* addr-- variant. */
1000 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1001 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1002 pRegFrame->eax, cb, cTransfers);
1003 if (rc == VINF_SUCCESS)
1004 {
1005 /* Update registers. */
1006 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1007 | (pRegFrame->rdi & ~fAddrMask);
1008 if (pCpu->fPrefix & DISPREFIX_REP)
1009 pRegFrame->rcx &= ~fAddrMask;
1010 }
1011 }
1012 }
1013 else
1014 {
1015 /*
1016 * Use the write callback.
1017 */
1018 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1019 uint64_t u64Data = pRegFrame->rax;
1020
1021 /* fill loop. */
1022 do
1023 {
1024 rc = iomMMIODoWrite(pVM, pRange, Phys, &u64Data, cb);
1025 if (rc != VINF_SUCCESS)
1026 break;
1027
1028 Phys += offIncrement;
1029 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1030 | (pRegFrame->rdi & ~fAddrMask);
1031 cTransfers--;
1032 } while (cTransfers);
1033
1034 /* Update rcx on exit. */
1035 if (pCpu->fPrefix & DISPREFIX_REP)
1036 pRegFrame->rcx = (cTransfers & fAddrMask)
1037 | (pRegFrame->rcx & ~fAddrMask);
1038 }
1039
1040 /*
1041 * Work statistics and return.
1042 */
1043 if (rc == VINF_SUCCESS)
1044 iomMMIOStatLength(pVM, cb);
1045 return rc;
1046}
1047
1048
1049/**
1050 * [REP] LODSB
1051 * [REP] LODSW
1052 * [REP] LODSD
1053 *
1054 * Restricted implementation.
1055 *
1056 *
1057 * @returns VBox status code.
1058 *
1059 * @param pVM The virtual machine.
1060 * @param pRegFrame Trap register frame.
1061 * @param GCPhysFault The GC physical address corresponding to pvFault.
1062 * @param pCpu Disassembler CPU state.
1063 * @param pRange Pointer MMIO range.
1064 */
1065static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1066{
1067 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1068
1069 /*
1070 * We do not support segment prefixes or REP*.
1071 */
1072 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1073 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1074
1075 /*
1076 * Get data size.
1077 */
1078 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1079 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1080 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1081
1082 /*
1083 * Perform read.
1084 */
1085 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
1086 if (rc == VINF_SUCCESS)
1087 {
1088 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1089 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1090 | (pRegFrame->rsi & ~fAddrMask);
1091 }
1092
1093 /*
1094 * Work statistics and return.
1095 */
1096 if (rc == VINF_SUCCESS)
1097 iomMMIOStatLength(pVM, cb);
1098 return rc;
1099}
1100
1101
1102/**
1103 * CMP [MMIO], reg|imm
1104 * CMP reg|imm, [MMIO]
1105 *
1106 * Restricted implementation.
1107 *
1108 *
1109 * @returns VBox status code.
1110 *
1111 * @param pVM The virtual machine.
1112 * @param pRegFrame Trap register frame.
1113 * @param GCPhysFault The GC physical address corresponding to pvFault.
1114 * @param pCpu Disassembler CPU state.
1115 * @param pRange Pointer MMIO range.
1116 */
1117static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1118{
1119 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1120
1121 /*
1122 * Get the operands.
1123 */
1124 unsigned cb = 0;
1125 uint64_t uData1 = 0;
1126 uint64_t uData2 = 0;
1127 int rc;
1128 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1129 /* cmp reg, [MMIO]. */
1130 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1131 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1132 /* cmp [MMIO], reg|imm. */
1133 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1134 else
1135 {
1136 AssertMsgFailed(("Disassember CMP problem..\n"));
1137 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1138 }
1139
1140 if (rc == VINF_SUCCESS)
1141 {
1142#if HC_ARCH_BITS == 32
1143 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1144 if (cb > 4)
1145 return VINF_IOM_R3_MMIO_READ_WRITE;
1146#endif
1147 /* Emulate CMP and update guest flags. */
1148 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1149 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1150 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1151 iomMMIOStatLength(pVM, cb);
1152 }
1153
1154 return rc;
1155}
1156
1157
1158/**
1159 * AND [MMIO], reg|imm
1160 * AND reg, [MMIO]
1161 * OR [MMIO], reg|imm
1162 * OR reg, [MMIO]
1163 *
1164 * Restricted implementation.
1165 *
1166 *
1167 * @returns VBox status code.
1168 *
1169 * @param pVM The virtual machine.
1170 * @param pRegFrame Trap register frame.
1171 * @param GCPhysFault The GC physical address corresponding to pvFault.
1172 * @param pCpu Disassembler CPU state.
1173 * @param pRange Pointer MMIO range.
1174 * @param pfnEmulate Instruction emulation function.
1175 */
1176static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1177{
1178 unsigned cb = 0;
1179 uint64_t uData1 = 0;
1180 uint64_t uData2 = 0;
1181 bool fAndWrite;
1182 int rc;
1183
1184#ifdef LOG_ENABLED
1185 const char *pszInstr;
1186
1187 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1188 pszInstr = "Xor";
1189 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1190 pszInstr = "Or";
1191 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1192 pszInstr = "And";
1193 else
1194 pszInstr = "OrXorAnd??";
1195#endif
1196
1197 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1198 {
1199#if HC_ARCH_BITS == 32
1200 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1201 if (cb > 4)
1202 return VINF_IOM_R3_MMIO_READ_WRITE;
1203#endif
1204 /* and reg, [MMIO]. */
1205 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1206 fAndWrite = false;
1207 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1208 }
1209 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1210 {
1211#if HC_ARCH_BITS == 32
1212 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1213 if (cb > 4)
1214 return VINF_IOM_R3_MMIO_READ_WRITE;
1215#endif
1216 /* and [MMIO], reg|imm. */
1217 fAndWrite = true;
1218 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1219 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1220 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1221 else
1222 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1223 }
1224 else
1225 {
1226 AssertMsgFailed(("Disassember AND problem..\n"));
1227 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1228 }
1229
1230 if (rc == VINF_SUCCESS)
1231 {
1232 /* Emulate AND and update guest flags. */
1233 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1234
1235 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1236
1237 if (fAndWrite)
1238 /* Store result to MMIO. */
1239 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1240 else
1241 {
1242 /* Store result to register. */
1243 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1244 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1245 }
1246 if (rc == VINF_SUCCESS)
1247 {
1248 /* Update guest's eflags and finish. */
1249 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1250 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1251 iomMMIOStatLength(pVM, cb);
1252 }
1253 }
1254
1255 return rc;
1256}
1257
1258
1259/**
1260 * TEST [MMIO], reg|imm
1261 * TEST reg, [MMIO]
1262 *
1263 * Restricted implementation.
1264 *
1265 *
1266 * @returns VBox status code.
1267 *
1268 * @param pVM The virtual machine.
1269 * @param pRegFrame Trap register frame.
1270 * @param GCPhysFault The GC physical address corresponding to pvFault.
1271 * @param pCpu Disassembler CPU state.
1272 * @param pRange Pointer MMIO range.
1273 */
1274static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1275{
1276 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1277
1278 unsigned cb = 0;
1279 uint64_t uData1 = 0;
1280 uint64_t uData2 = 0;
1281 int rc;
1282
1283 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1284 {
1285 /* and test, [MMIO]. */
1286 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1287 }
1288 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1289 {
1290 /* test [MMIO], reg|imm. */
1291 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1292 }
1293 else
1294 {
1295 AssertMsgFailed(("Disassember TEST problem..\n"));
1296 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1297 }
1298
1299 if (rc == VINF_SUCCESS)
1300 {
1301#if HC_ARCH_BITS == 32
1302 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1303 if (cb > 4)
1304 return VINF_IOM_R3_MMIO_READ_WRITE;
1305#endif
1306
1307 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1308 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1309 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1310 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1311 iomMMIOStatLength(pVM, cb);
1312 }
1313
1314 return rc;
1315}
1316
1317
1318/**
1319 * BT [MMIO], reg|imm
1320 *
1321 * Restricted implementation.
1322 *
1323 *
1324 * @returns VBox status code.
1325 *
1326 * @param pVM The virtual machine.
1327 * @param pRegFrame Trap register frame.
1328 * @param GCPhysFault The GC physical address corresponding to pvFault.
1329 * @param pCpu Disassembler CPU state.
1330 * @param pRange Pointer MMIO range.
1331 */
1332static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1333{
1334 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1335
1336 uint64_t uBit = 0;
1337 uint64_t uData = 0;
1338 unsigned cbIgnored;
1339
1340 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1341 {
1342 AssertMsgFailed(("Disassember BT problem..\n"));
1343 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1344 }
1345 /* The size of the memory operand only matters here. */
1346 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1347
1348 /* bt [MMIO], reg|imm. */
1349 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
1350 if (rc == VINF_SUCCESS)
1351 {
1352 /* Find the bit inside the faulting address */
1353 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1354 iomMMIOStatLength(pVM, cbData);
1355 }
1356
1357 return rc;
1358}
1359
1360/**
1361 * XCHG [MMIO], reg
1362 * XCHG reg, [MMIO]
1363 *
1364 * Restricted implementation.
1365 *
1366 *
1367 * @returns VBox status code.
1368 *
1369 * @param pVM The virtual machine.
1370 * @param pRegFrame Trap register frame.
1371 * @param GCPhysFault The GC physical address corresponding to pvFault.
1372 * @param pCpu Disassembler CPU state.
1373 * @param pRange Pointer MMIO range.
1374 */
1375static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1376{
1377 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1378 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1379 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1380 return VINF_IOM_R3_MMIO_READ_WRITE;
1381
1382 int rc;
1383 unsigned cb = 0;
1384 uint64_t uData1 = 0;
1385 uint64_t uData2 = 0;
1386 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1387 {
1388 /* xchg reg, [MMIO]. */
1389 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1390 if (rc == VINF_SUCCESS)
1391 {
1392 /* Store result to MMIO. */
1393 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1394
1395 if (rc == VINF_SUCCESS)
1396 {
1397 /* Store result to register. */
1398 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1399 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1400 }
1401 else
1402 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1403 }
1404 else
1405 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1406 }
1407 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1408 {
1409 /* xchg [MMIO], reg. */
1410 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1411 if (rc == VINF_SUCCESS)
1412 {
1413 /* Store result to MMIO. */
1414 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1415 if (rc == VINF_SUCCESS)
1416 {
1417 /* Store result to register. */
1418 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1419 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1420 }
1421 else
1422 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1423 }
1424 else
1425 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1426 }
1427 else
1428 {
1429 AssertMsgFailed(("Disassember XCHG problem..\n"));
1430 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1431 }
1432 return rc;
1433}
1434
1435
1436/**
1437 * \#PF Handler callback for MMIO ranges.
1438 *
1439 * @returns VBox status code (appropriate for GC return).
1440 * @param pVM Pointer to the VM.
1441 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1442 * any error code (the EPT misconfig hack).
1443 * @param pCtxCore Trap register frame.
1444 * @param GCPhysFault The GC physical address corresponding to pvFault.
1445 * @param pvUser Pointer to the MMIO ring-3 range entry.
1446 */
1447static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1448{
1449 /* Take the IOM lock before performing any MMIO. */
1450 int rc = IOM_LOCK(pVM);
1451#ifndef IN_RING3
1452 if (rc == VERR_SEM_BUSY)
1453 return VINF_IOM_R3_MMIO_READ_WRITE;
1454#endif
1455 AssertRC(rc);
1456
1457 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1458 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1459 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1460
1461 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1462 Assert(pRange);
1463 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1464
1465#ifdef VBOX_WITH_STATISTICS
1466 /*
1467 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1468 */
1469 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
1470 if (!pStats)
1471 {
1472# ifdef IN_RING3
1473 IOM_UNLOCK(pVM);
1474 return VERR_NO_MEMORY;
1475# else
1476 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1477 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1478 IOM_UNLOCK(pVM);
1479 return VINF_IOM_R3_MMIO_READ_WRITE;
1480# endif
1481 }
1482#endif
1483
1484#ifndef IN_RING3
1485 /*
1486 * Should we defer the request right away? This isn't usually the case, so
1487 * do the simple test first and the try deal with uErrorCode being N/A.
1488 */
1489 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1490 || !pRange->CTX_SUFF(pfnReadCallback))
1491 && ( uErrorCode == UINT32_MAX
1492 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1493 : uErrorCode & X86_TRAP_PF_RW
1494 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1495 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1496 )
1497 )
1498 )
1499 {
1500 if (uErrorCode & X86_TRAP_PF_RW)
1501 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1502 else
1503 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1504
1505 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1506 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1507 IOM_UNLOCK(pVM);
1508 return VINF_IOM_R3_MMIO_READ_WRITE;
1509 }
1510#endif /* !IN_RING3 */
1511
1512 /*
1513 * Retain the range and do locking.
1514 */
1515 iomMmioRetainRange(pRange);
1516 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1517 IOM_UNLOCK(pVM);
1518 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1519 if (rc != VINF_SUCCESS)
1520 {
1521 iomMmioReleaseRange(pVM, pRange);
1522 return rc;
1523 }
1524
1525 /*
1526 * Disassemble the instruction and interpret it.
1527 */
1528 PVMCPU pVCpu = VMMGetCpu(pVM);
1529 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1530 unsigned cbOp;
1531 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1532 if (RT_FAILURE(rc))
1533 {
1534 iomMmioReleaseRange(pVM, pRange);
1535 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1536 return rc;
1537 }
1538 switch (pDis->pCurInstr->uOpcode)
1539 {
1540 case OP_MOV:
1541 case OP_MOVZX:
1542 case OP_MOVSX:
1543 {
1544 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1545 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1546 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1547 ? uErrorCode & X86_TRAP_PF_RW
1548 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1549 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1550 else
1551 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1552 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1553 break;
1554 }
1555
1556
1557#ifdef IOM_WITH_MOVS_SUPPORT
1558 case OP_MOVSB:
1559 case OP_MOVSWD:
1560 {
1561 if (uErrorCode == UINT32_MAX)
1562 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1563 else
1564 {
1565 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1566 PSTAMPROFILE pStat = NULL;
1567 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1568 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1569 }
1570 break;
1571 }
1572#endif
1573
1574 case OP_STOSB:
1575 case OP_STOSWD:
1576 Assert(uErrorCode & X86_TRAP_PF_RW);
1577 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1578 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1579 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1580 break;
1581
1582 case OP_LODSB:
1583 case OP_LODSWD:
1584 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1585 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1586 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1587 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1588 break;
1589
1590 case OP_CMP:
1591 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1592 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1593 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1594 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1595 break;
1596
1597 case OP_AND:
1598 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1599 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1600 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1601 break;
1602
1603 case OP_OR:
1604 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1605 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1606 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1607 break;
1608
1609 case OP_XOR:
1610 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1611 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1612 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1613 break;
1614
1615 case OP_TEST:
1616 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1617 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1618 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1619 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1620 break;
1621
1622 case OP_BT:
1623 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1624 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1625 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1626 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1627 break;
1628
1629 case OP_XCHG:
1630 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1631 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1632 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1633 break;
1634
1635
1636 /*
1637 * The instruction isn't supported. Hand it on to ring-3.
1638 */
1639 default:
1640 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1641 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1642 break;
1643 }
1644
1645 /*
1646 * On success advance EIP.
1647 */
1648 if (rc == VINF_SUCCESS)
1649 pCtxCore->rip += cbOp;
1650 else
1651 {
1652 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1653#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1654 switch (rc)
1655 {
1656 case VINF_IOM_R3_MMIO_READ:
1657 case VINF_IOM_R3_MMIO_READ_WRITE:
1658 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1659 break;
1660 case VINF_IOM_R3_MMIO_WRITE:
1661 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1662 break;
1663 }
1664#endif
1665 }
1666
1667 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1668 iomMmioReleaseRange(pVM, pRange);
1669 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1670 return rc;
1671}
1672
1673/**
1674 * \#PF Handler callback for MMIO ranges.
1675 *
1676 * @returns VBox status code (appropriate for GC return).
1677 * @param pVM Pointer to the VM.
1678 * @param uErrorCode CPU Error code.
1679 * @param pCtxCore Trap register frame.
1680 * @param pvFault The fault address (cr2).
1681 * @param GCPhysFault The GC physical address corresponding to pvFault.
1682 * @param pvUser Pointer to the MMIO ring-3 range entry.
1683 */
1684VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1685{
1686 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1687 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1688 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1689 return VBOXSTRICTRC_VAL(rcStrict);
1690}
1691
1692/**
1693 * Physical access handler for MMIO ranges.
1694 *
1695 * @returns VBox status code (appropriate for GC return).
1696 * @param pVM Pointer to the VM.
1697 * @param uErrorCode CPU Error code.
1698 * @param pCtxCore Trap register frame.
1699 * @param GCPhysFault The GC physical address.
1700 */
1701VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1702{
1703 int rc2 = IOM_LOCK(pVM); NOREF(rc2);
1704#ifndef IN_RING3
1705 if (rc2 == VERR_SEM_BUSY)
1706 return VINF_IOM_R3_MMIO_READ_WRITE;
1707#endif
1708 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault));
1709 IOM_UNLOCK(pVM);
1710 return VBOXSTRICTRC_VAL(rcStrict);
1711}
1712
1713
1714#ifdef IN_RING3
1715/**
1716 * \#PF Handler callback for MMIO ranges.
1717 *
1718 * @returns VINF_SUCCESS if the handler have carried out the operation.
1719 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1720 * @param pVM Pointer to the VM.
1721 * @param GCPhys The physical address the guest is writing to.
1722 * @param pvPhys The HC mapping of that address.
1723 * @param pvBuf What the guest is reading/writing.
1724 * @param cbBuf How much it's reading/writing.
1725 * @param enmAccessType The access type.
1726 * @param pvUser Pointer to the MMIO range entry.
1727 */
1728DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1729 PGMACCESSTYPE enmAccessType, void *pvUser)
1730{
1731 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1732 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1733
1734 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1735 AssertPtr(pRange);
1736 NOREF(pvPhys);
1737
1738 /*
1739 * Validate the range.
1740 */
1741 int rc = IOM_LOCK(pVM);
1742 AssertRC(rc);
1743 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1744
1745 /*
1746 * Perform locking.
1747 */
1748 iomMmioRetainRange(pRange);
1749 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1750 IOM_UNLOCK(pVM);
1751 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1752 if (rc != VINF_SUCCESS)
1753 {
1754 iomMmioReleaseRange(pVM, pRange);
1755 return rc;
1756 }
1757
1758 /*
1759 * Perform the access.
1760 */
1761 if (enmAccessType == PGMACCESSTYPE_READ)
1762 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1763 else
1764 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1765
1766 AssertRC(rc);
1767 iomMmioReleaseRange(pVM, pRange);
1768 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1769 return rc;
1770}
1771#endif /* IN_RING3 */
1772
1773
1774/**
1775 * Reads a MMIO register.
1776 *
1777 * @returns VBox status code.
1778 *
1779 * @param pVM Pointer to the VM.
1780 * @param GCPhys The physical address to read.
1781 * @param pu32Value Where to store the value read.
1782 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1783 */
1784VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1785{
1786 /* Take the IOM lock before performing any MMIO. */
1787 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1788#ifndef IN_RING3
1789 if (rc == VERR_SEM_BUSY)
1790 return VINF_IOM_R3_MMIO_WRITE;
1791#endif
1792 AssertRC(VBOXSTRICTRC_VAL(rc));
1793#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1794 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1795#endif
1796
1797 /*
1798 * Lookup the current context range node and statistics.
1799 */
1800 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1801 if (!pRange)
1802 {
1803 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1804 IOM_UNLOCK(pVM);
1805 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1806 }
1807#ifdef VBOX_WITH_STATISTICS
1808 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1809 if (!pStats)
1810 {
1811 IOM_UNLOCK(pVM);
1812# ifdef IN_RING3
1813 return VERR_NO_MEMORY;
1814# else
1815 return VINF_IOM_R3_MMIO_READ;
1816# endif
1817 }
1818 STAM_COUNTER_INC(&pStats->Accesses);
1819#endif /* VBOX_WITH_STATISTICS */
1820
1821 if (pRange->CTX_SUFF(pfnReadCallback))
1822 {
1823 /*
1824 * Perform locking.
1825 */
1826 iomMmioRetainRange(pRange);
1827 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1828 IOM_UNLOCK(pVM);
1829 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
1830 if (rc != VINF_SUCCESS)
1831 {
1832 iomMmioReleaseRange(pVM, pRange);
1833 return rc;
1834 }
1835
1836 /*
1837 * Perform the read and deal with the result.
1838 */
1839 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1840 if ( (cbValue == 4 && !(GCPhys & 3))
1841 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
1842 || (cbValue == 8 && !(GCPhys & 7)) )
1843 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
1844 pu32Value, (unsigned)cbValue);
1845 else
1846 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
1847 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1848 switch (VBOXSTRICTRC_VAL(rc))
1849 {
1850 case VINF_SUCCESS:
1851 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1852 iomMmioReleaseRange(pVM, pRange);
1853 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1854 return rc;
1855#ifndef IN_RING3
1856 case VINF_IOM_R3_MMIO_READ:
1857 case VINF_IOM_R3_MMIO_READ_WRITE:
1858 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1859#endif
1860 default:
1861 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1862 iomMmioReleaseRange(pVM, pRange);
1863 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1864 return rc;
1865
1866 case VINF_IOM_MMIO_UNUSED_00:
1867 iomMMIODoRead00s(pu32Value, cbValue);
1868 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1869 iomMmioReleaseRange(pVM, pRange);
1870 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1871 return VINF_SUCCESS;
1872
1873 case VINF_IOM_MMIO_UNUSED_FF:
1874 iomMMIODoReadFFs(pu32Value, cbValue);
1875 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1876 iomMmioReleaseRange(pVM, pRange);
1877 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1878 return VINF_SUCCESS;
1879 }
1880 /* not reached */
1881 }
1882#ifndef IN_RING3
1883 if (pRange->pfnReadCallbackR3)
1884 {
1885 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1886 IOM_UNLOCK(pVM);
1887 return VINF_IOM_R3_MMIO_READ;
1888 }
1889#endif
1890
1891 /*
1892 * Unassigned memory - this is actually not supposed t happen...
1893 */
1894 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1895 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1896 iomMMIODoReadFFs(pu32Value, cbValue);
1897 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1898 IOM_UNLOCK(pVM);
1899 return VINF_SUCCESS;
1900}
1901
1902
1903/**
1904 * Writes to a MMIO register.
1905 *
1906 * @returns VBox status code.
1907 *
1908 * @param pVM Pointer to the VM.
1909 * @param GCPhys The physical address to write to.
1910 * @param u32Value The value to write.
1911 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1912 */
1913VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1914{
1915 /* Take the IOM lock before performing any MMIO. */
1916 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1917#ifndef IN_RING3
1918 if (rc == VERR_SEM_BUSY)
1919 return VINF_IOM_R3_MMIO_WRITE;
1920#endif
1921 AssertRC(VBOXSTRICTRC_VAL(rc));
1922#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1923 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1924#endif
1925
1926 /*
1927 * Lookup the current context range node.
1928 */
1929 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1930 if (!pRange)
1931 {
1932 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1933 IOM_UNLOCK(pVM);
1934 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1935 }
1936#ifdef VBOX_WITH_STATISTICS
1937 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1938 if (!pStats)
1939 {
1940 IOM_UNLOCK(pVM);
1941# ifdef IN_RING3
1942 return VERR_NO_MEMORY;
1943# else
1944 return VINF_IOM_R3_MMIO_WRITE;
1945# endif
1946 }
1947 STAM_COUNTER_INC(&pStats->Accesses);
1948#endif /* VBOX_WITH_STATISTICS */
1949
1950 if (pRange->CTX_SUFF(pfnWriteCallback))
1951 {
1952 /*
1953 * Perform locking.
1954 */
1955 iomMmioRetainRange(pRange);
1956 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1957 IOM_UNLOCK(pVM);
1958 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1959 if (rc != VINF_SUCCESS)
1960 {
1961 iomMmioReleaseRange(pVM, pRange);
1962 return rc;
1963 }
1964
1965 /*
1966 * Perform the write.
1967 */
1968 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1969 if ( (cbValue == 4 && !(GCPhys & 3))
1970 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1971 || (cbValue == 8 && !(GCPhys & 7)) )
1972 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1973 GCPhys, &u32Value, (unsigned)cbValue);
1974 else
1975 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
1976 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1977#ifndef IN_RING3
1978 if ( rc == VINF_IOM_R3_MMIO_WRITE
1979 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
1980 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1981#endif
1982 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1983 iomMmioReleaseRange(pVM, pRange);
1984 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1985 return rc;
1986 }
1987#ifndef IN_RING3
1988 if (pRange->pfnWriteCallbackR3)
1989 {
1990 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1991 IOM_UNLOCK(pVM);
1992 return VINF_IOM_R3_MMIO_WRITE;
1993 }
1994#endif
1995
1996 /*
1997 * No write handler, nothing to do.
1998 */
1999 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2000 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2001 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2002 IOM_UNLOCK(pVM);
2003 return VINF_SUCCESS;
2004}
2005
2006
2007/**
2008 * [REP*] INSB/INSW/INSD
2009 * ES:EDI,DX[,ECX]
2010 *
2011 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2012 *
2013 * @returns Strict VBox status code. Informational status codes other than the one documented
2014 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2015 * @retval VINF_SUCCESS Success.
2016 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2017 * status code must be passed on to EM.
2018 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2019 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2020 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2021 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2022 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2023 *
2024 * @param pVM The virtual machine.
2025 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2026 * @param uPort IO Port
2027 * @param uPrefix IO instruction prefix
2028 * @param enmAddrMode The address mode.
2029 * @param cbTransfer Size of transfer unit
2030 */
2031VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2032 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2033{
2034 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2035
2036 /*
2037 * We do not support REPNE or decrementing destination
2038 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2039 */
2040 if ( (uPrefix & DISPREFIX_REPNE)
2041 || pRegFrame->eflags.Bits.u1DF)
2042 return VINF_EM_RAW_EMULATE_INSTR;
2043
2044 PVMCPU pVCpu = VMMGetCpu(pVM);
2045
2046 /*
2047 * Get bytes/words/dwords count to transfer.
2048 */
2049 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2050 RTGCUINTREG cTransfers = 1;
2051 if (uPrefix & DISPREFIX_REP)
2052 {
2053#ifndef IN_RC
2054 if ( CPUMIsGuestIn64BitCode(pVCpu)
2055 && pRegFrame->rcx >= _4G)
2056 return VINF_EM_RAW_EMULATE_INSTR;
2057#endif
2058 cTransfers = pRegFrame->rcx & fAddrMask;
2059 if (!cTransfers)
2060 return VINF_SUCCESS;
2061 }
2062
2063 /* Convert destination address es:edi. */
2064 RTGCPTR GCPtrDst;
2065 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2066 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2067 &GCPtrDst);
2068 if (RT_FAILURE(rc2))
2069 {
2070 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2071 return VINF_EM_RAW_EMULATE_INSTR;
2072 }
2073
2074 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2075 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2076 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2077 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2078 if (rc2 != VINF_SUCCESS)
2079 {
2080 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2081 return VINF_EM_RAW_EMULATE_INSTR;
2082 }
2083
2084 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2085 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2086 if (cTransfers > 1)
2087 {
2088 /* If the device supports string transfers, ask it to do as
2089 * much as it wants. The rest is done with single-word transfers. */
2090 const RTGCUINTREG cTransfersOrg = cTransfers;
2091 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
2092 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2093 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2094 | (pRegFrame->rdi & ~fAddrMask);
2095 }
2096
2097#ifdef IN_RC
2098 MMGCRamRegisterTrapHandler(pVM);
2099#endif
2100 while (cTransfers && rcStrict == VINF_SUCCESS)
2101 {
2102 uint32_t u32Value;
2103 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
2104 if (!IOM_SUCCESS(rcStrict))
2105 break;
2106 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2107 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2108 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2109 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2110 | (pRegFrame->rdi & ~fAddrMask);
2111 cTransfers--;
2112 }
2113#ifdef IN_RC
2114 MMGCRamDeregisterTrapHandler(pVM);
2115#endif
2116
2117 /* Update rcx on exit. */
2118 if (uPrefix & DISPREFIX_REP)
2119 pRegFrame->rcx = (cTransfers & fAddrMask)
2120 | (pRegFrame->rcx & ~fAddrMask);
2121
2122 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2123 return rcStrict;
2124}
2125
2126
2127/**
2128 * [REP*] INSB/INSW/INSD
2129 * ES:EDI,DX[,ECX]
2130 *
2131 * @returns Strict VBox status code. Informational status codes other than the one documented
2132 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2133 * @retval VINF_SUCCESS Success.
2134 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2135 * status code must be passed on to EM.
2136 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2137 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2138 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2139 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2140 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2141 *
2142 * @param pVM The virtual machine.
2143 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2144 * @param pCpu Disassembler CPU state.
2145 */
2146VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2147{
2148 /*
2149 * Get port number directly from the register (no need to bother the
2150 * disassembler). And get the I/O register size from the opcode / prefix.
2151 */
2152 RTIOPORT Port = pRegFrame->edx & 0xffff;
2153 unsigned cb = 0;
2154 if (pCpu->pCurInstr->uOpcode == OP_INSB)
2155 cb = 1;
2156 else
2157 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2158
2159 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2160 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2161 {
2162 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2163 return rcStrict;
2164 }
2165
2166 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2167}
2168
2169
2170/**
2171 * [REP*] OUTSB/OUTSW/OUTSD
2172 * DS:ESI,DX[,ECX]
2173 *
2174 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2175 *
2176 * @returns Strict VBox status code. Informational status codes other than the one documented
2177 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2178 * @retval VINF_SUCCESS Success.
2179 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2180 * status code must be passed on to EM.
2181 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2182 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2183 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2184 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2185 *
2186 * @param pVM The virtual machine.
2187 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2188 * @param uPort IO Port
2189 * @param uPrefix IO instruction prefix
2190 * @param enmAddrMode The address mode.
2191 * @param cbTransfer Size of transfer unit
2192 */
2193VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2194 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2195{
2196 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2197
2198 /*
2199 * We do not support segment prefixes, REPNE or
2200 * decrementing source pointer.
2201 */
2202 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2203 || pRegFrame->eflags.Bits.u1DF)
2204 return VINF_EM_RAW_EMULATE_INSTR;
2205
2206 PVMCPU pVCpu = VMMGetCpu(pVM);
2207
2208 /*
2209 * Get bytes/words/dwords count to transfer.
2210 */
2211 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2212 RTGCUINTREG cTransfers = 1;
2213 if (uPrefix & DISPREFIX_REP)
2214 {
2215#ifndef IN_RC
2216 if ( CPUMIsGuestIn64BitCode(pVCpu)
2217 && pRegFrame->rcx >= _4G)
2218 return VINF_EM_RAW_EMULATE_INSTR;
2219#endif
2220 cTransfers = pRegFrame->rcx & fAddrMask;
2221 if (!cTransfers)
2222 return VINF_SUCCESS;
2223 }
2224
2225 /* Convert source address ds:esi. */
2226 RTGCPTR GCPtrSrc;
2227 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2228 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2229 &GCPtrSrc);
2230 if (RT_FAILURE(rc2))
2231 {
2232 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2233 return VINF_EM_RAW_EMULATE_INSTR;
2234 }
2235
2236 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2237 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2238 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2239 (cpl == 3) ? X86_PTE_US : 0);
2240 if (rc2 != VINF_SUCCESS)
2241 {
2242 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2243 return VINF_EM_RAW_EMULATE_INSTR;
2244 }
2245
2246 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2247 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2248 if (cTransfers > 1)
2249 {
2250 /*
2251 * If the device supports string transfers, ask it to do as
2252 * much as it wants. The rest is done with single-word transfers.
2253 */
2254 const RTGCUINTREG cTransfersOrg = cTransfers;
2255 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
2256 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2257 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2258 | (pRegFrame->rsi & ~fAddrMask);
2259 }
2260
2261#ifdef IN_RC
2262 MMGCRamRegisterTrapHandler(pVM);
2263#endif
2264
2265 while (cTransfers && rcStrict == VINF_SUCCESS)
2266 {
2267 uint32_t u32Value = 0;
2268 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2269 if (rcStrict != VINF_SUCCESS)
2270 break;
2271 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
2272 if (!IOM_SUCCESS(rcStrict))
2273 break;
2274 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2275 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2276 | (pRegFrame->rsi & ~fAddrMask);
2277 cTransfers--;
2278 }
2279
2280#ifdef IN_RC
2281 MMGCRamDeregisterTrapHandler(pVM);
2282#endif
2283
2284 /* Update rcx on exit. */
2285 if (uPrefix & DISPREFIX_REP)
2286 pRegFrame->rcx = (cTransfers & fAddrMask)
2287 | (pRegFrame->rcx & ~fAddrMask);
2288
2289 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2290 return rcStrict;
2291}
2292
2293
2294/**
2295 * [REP*] OUTSB/OUTSW/OUTSD
2296 * DS:ESI,DX[,ECX]
2297 *
2298 * @returns Strict VBox status code. Informational status codes other than the one documented
2299 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2300 * @retval VINF_SUCCESS Success.
2301 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2302 * status code must be passed on to EM.
2303 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2304 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
2305 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2306 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2307 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2308 *
2309 * @param pVM The virtual machine.
2310 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2311 * @param pCpu Disassembler CPU state.
2312 */
2313VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2314{
2315 /*
2316 * Get port number from the first parameter.
2317 * And get the I/O register size from the opcode / prefix.
2318 */
2319 uint64_t Port = 0;
2320 unsigned cb = 0;
2321 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &Port, &cb);
2322 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
2323 if (pCpu->pCurInstr->uOpcode == OP_OUTSB)
2324 cb = 1;
2325 else
2326 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2327
2328 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2329 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2330 {
2331 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2332 return rcStrict;
2333 }
2334
2335 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2336}
2337
2338#ifndef IN_RC
2339
2340/**
2341 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2342 *
2343 * (This is a special optimization used by the VGA device.)
2344 *
2345 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2346 * remapping is made,.
2347 *
2348 * @param pVM The virtual machine.
2349 * @param GCPhys The address of the MMIO page to be changed.
2350 * @param GCPhysRemapped The address of the MMIO2 page.
2351 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2352 * for the time being.
2353 */
2354VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2355{
2356 /* Currently only called from the VGA device during MMIO. */
2357 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2358 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2359 PVMCPU pVCpu = VMMGetCpu(pVM);
2360
2361 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2362 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2363 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2364 && !HMIsNestedPagingActive(pVM)))
2365 return VINF_SUCCESS; /* ignore */
2366
2367 int rc = IOM_LOCK(pVM);
2368 if (RT_FAILURE(rc))
2369 return VINF_SUCCESS; /* better luck the next time around */
2370
2371 /*
2372 * Lookup the context range node the page belongs to.
2373 */
2374 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
2375 AssertMsgReturn(pRange,
2376 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2377
2378 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2379 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2380
2381 /*
2382 * Do the aliasing; page align the addresses since PGM is picky.
2383 */
2384 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2385 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2386
2387 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2388
2389 IOM_UNLOCK(pVM);
2390 AssertRCReturn(rc, rc);
2391
2392 /*
2393 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2394 * can simply prefetch it.
2395 *
2396 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2397 */
2398#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2399# ifdef VBOX_STRICT
2400 uint64_t fFlags;
2401 RTHCPHYS HCPhys;
2402 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2403 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2404# endif
2405#endif
2406 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2407 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2408 return VINF_SUCCESS;
2409}
2410
2411
2412/**
2413 * Mapping a HC page in place of an MMIO page for direct access.
2414 *
2415 * (This is a special optimization used by the APIC in the VT-x case.)
2416 *
2417 * @returns VBox status code.
2418 *
2419 * @param pVM Pointer to the VM.
2420 * @param pVCpu Pointer to the VMCPU.
2421 * @param GCPhys The address of the MMIO page to be changed.
2422 * @param HCPhys The address of the host physical page.
2423 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2424 * for the time being.
2425 */
2426VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2427{
2428 /* Currently only called from VT-x code during a page fault. */
2429 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2430
2431 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2432 Assert(HMIsEnabled(pVM));
2433
2434 /*
2435 * Lookup the context range node the page belongs to.
2436 */
2437#ifdef VBOX_STRICT
2438 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2439 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2440 AssertMsgReturn(pRange,
2441 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2442 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2443 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2444#endif
2445
2446 /*
2447 * Do the aliasing; page align the addresses since PGM is picky.
2448 */
2449 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2450 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2451
2452 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2453 AssertRCReturn(rc, rc);
2454
2455 /*
2456 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2457 * can simply prefetch it.
2458 *
2459 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2460 */
2461 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2462 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2463 return VINF_SUCCESS;
2464}
2465
2466
2467/**
2468 * Reset a previously modified MMIO region; restore the access flags.
2469 *
2470 * @returns VBox status code.
2471 *
2472 * @param pVM The virtual machine.
2473 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2474 */
2475VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2476{
2477 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2478
2479 PVMCPU pVCpu = VMMGetCpu(pVM);
2480
2481 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2482 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2483 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2484 && !HMIsNestedPagingActive(pVM)))
2485 return VINF_SUCCESS; /* ignore */
2486
2487 /*
2488 * Lookup the context range node the page belongs to.
2489 */
2490#ifdef VBOX_STRICT
2491 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2492 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2493 AssertMsgReturn(pRange,
2494 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2495 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2496 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2497#endif
2498
2499 /*
2500 * Call PGM to do the job work.
2501 *
2502 * After the call, all the pages should be non-present... unless there is
2503 * a page pool flush pending (unlikely).
2504 */
2505 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2506 AssertRC(rc);
2507
2508#ifdef VBOX_STRICT
2509 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2510 {
2511 uint32_t cb = pRange->cb;
2512 GCPhys = pRange->GCPhys;
2513 while (cb)
2514 {
2515 uint64_t fFlags;
2516 RTHCPHYS HCPhys;
2517 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2518 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2519 cb -= PAGE_SIZE;
2520 GCPhys += PAGE_SIZE;
2521 }
2522 }
2523#endif
2524 return rc;
2525}
2526
2527#endif /* !IN_RC */
2528
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette