VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 44564

Last change on this file since 44564 was 44564, checked in by vboxsync, 12 years ago

IOM: Adding IOMMMIO_FLAGS_WRITE_ONLY_DWORD, fixing parameter validation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 88.2 KB
Line 
1/* $Id: IOMAllMMIO.cpp 44564 2013-02-06 13:56:21Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0U, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0U, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0U, /* 5 - invalid */
66 ~0U, /* 6 - invalid */
67 ~0U, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Deals with complicated MMIO writes.
79 *
80 * Complicatd means unaligned or non-dword/qword align accesses depending on
81 * the MMIO region's access mode flags.
82 *
83 * @returns Strict VBox status code. Any EM scheduling status code,
84 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
85 * VINF_IOM_R3_MMIO_READ may be returned.
86 *
87 * @param pVM Pointer to the VM.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) >= IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING;
101
102 /*
103 * Do debug stop if requested.
104 */
105 int rc = VINF_SUCCESS; NOREF(pVM);
106#ifdef VBOX_STRICT
107 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
108 {
109# ifdef IN_RING3
110 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
111 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
112 if (rc == VERR_DBGF_NOT_ATTACHED)
113 rc = VINF_SUCCESS;
114# else
115 return VINF_IOM_R3_MMIO_WRITE;
116# endif
117 }
118#endif
119
120 /*
121 * Check if we should ignore the write.
122 */
123 if (pRange->fFlags & IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
124 {
125 Assert(cbValue != 4 || (GCPhys & 3));
126 return VINF_SUCCESS;
127 }
128
129 /*
130 * Split and conquer.
131 */
132 for (;;)
133 {
134 unsigned const offAccess = GCPhys & 3;
135 unsigned cbThisPart = 4 - offAccess;
136 if (cbThisPart > cbValue)
137 cbThisPart = cbValue;
138
139 /*
140 * Get the missing bits (if any).
141 */
142 uint32_t u32MissingValue = 0;
143 if (fReadMissing && cbThisPart != 4)
144 {
145 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
146 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
147 switch (rc2)
148 {
149 case VINF_SUCCESS:
150 break;
151 case VINF_IOM_MMIO_UNUSED_FF:
152 u32MissingValue = UINT32_C(0xffffffff);
153 break;
154 case VINF_IOM_MMIO_UNUSED_00:
155 u32MissingValue = 0;
156 break;
157 case VINF_IOM_R3_MMIO_READ:
158 case VINF_IOM_R3_MMIO_READ_WRITE:
159 case VINF_IOM_R3_MMIO_WRITE:
160 /** @todo What if we've split a transfer and already read
161 * something? Since writes generally have sideeffects we
162 * could be kind of screwed here... */
163 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
164 return rc2;
165 default:
166 if (RT_FAILURE(rc2))
167 {
168 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
169 return rc2;
170 }
171 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
172 if (rc == VINF_SUCCESS || rc2 < rc)
173 rc = rc2;
174 break;
175 }
176 }
177
178 /*
179 * Merge missing and given bits.
180 */
181 uint32_t u32GivenMask;
182 uint32_t u32GivenValue;
183 switch (cbThisPart)
184 {
185 case 1:
186 u32GivenValue = *(uint8_t const *)pvValue;
187 u32GivenMask = UINT32_C(0x000000ff);
188 break;
189 case 2:
190 u32GivenValue = *(uint16_t const *)pvValue;
191 u32GivenMask = UINT32_C(0x0000ffff);
192 break;
193 case 3:
194 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
195 ((uint8_t const *)pvValue)[2], 0);
196 u32GivenMask = UINT32_C(0x00ffffff);
197 break;
198 case 4:
199 u32GivenValue = *(uint32_t const *)pvValue;
200 u32GivenMask = UINT32_C(0xffffffff);
201 break;
202 default:
203 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
204 }
205 if (offAccess)
206 {
207 u32GivenValue <<= offAccess * 8;
208 u32GivenMask <<= offAccess * 8;
209 }
210
211 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
212 | (u32GivenValue & u32GivenMask);
213
214 /*
215 * Do DWORD write to the device.
216 */
217 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
218 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
219 switch (rc2)
220 {
221 case VINF_SUCCESS:
222 break;
223 case VINF_IOM_R3_MMIO_READ:
224 case VINF_IOM_R3_MMIO_READ_WRITE:
225 case VINF_IOM_R3_MMIO_WRITE:
226 /** @todo What if we've split a transfer and already read
227 * something? Since reads can have sideeffects we could be
228 * kind of screwed here... */
229 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
230 return rc2;
231 default:
232 if (RT_FAILURE(rc2))
233 {
234 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
235 return rc2;
236 }
237 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
238 if (rc == VINF_SUCCESS || rc2 < rc)
239 rc = rc2;
240 break;
241 }
242
243 /*
244 * Advance.
245 */
246 cbValue -= cbThisPart;
247 if (!cbValue)
248 break;
249 GCPhys += cbThisPart;
250 pvValue = (uint8_t const *)pvValue + cbThisPart;
251 }
252
253 return rc;
254}
255
256
257
258
259/**
260 * Wrapper which does the write and updates range statistics when such are enabled.
261 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
262 */
263static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
264{
265#ifdef VBOX_WITH_STATISTICS
266 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
267 Assert(pStats);
268#endif
269
270 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
271 VBOXSTRICTRC rc;
272 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
273 {
274 if ( (cb == 4 && !(GCPhysFault & 3))
275 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
276 || (cb == 8 && !(GCPhysFault & 7)) )
277 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
278 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
279 else
280 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
281 }
282 else
283 rc = VINF_SUCCESS;
284 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
285 STAM_COUNTER_INC(&pStats->Accesses);
286 return VBOXSTRICTRC_TODO(rc);
287}
288
289
290/**
291 * Deals with complicated MMIO reads.
292 *
293 * Complicatd means unaligned or non-dword/qword align accesses depending on
294 * the MMIO region's access mode flags.
295 *
296 * @returns Strict VBox status code. Any EM scheduling status code,
297 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
298 * VINF_IOM_R3_MMIO_WRITE may be returned.
299 *
300 * @param pVM Pointer to the VM.
301 * @param pRange The range to read from.
302 * @param GCPhys The physical address to start reading.
303 * @param pvValue Where to store the value.
304 * @param cbValue The size of the value to read.
305 */
306static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
307{
308 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
309 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
310 VERR_IOM_MMIO_IPE_1);
311 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
312 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
313
314 /*
315 * Do debug stop if requested.
316 */
317 int rc = VINF_SUCCESS; NOREF(pVM);
318#ifdef VBOX_STRICT
319 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
320 {
321# ifdef IN_RING3
322 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
323 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
324 if (rc == VERR_DBGF_NOT_ATTACHED)
325 rc = VINF_SUCCESS;
326# else
327 return VINF_IOM_R3_MMIO_READ;
328# endif
329 }
330#endif
331
332 /*
333 * Split and conquer.
334 */
335 for (;;)
336 {
337 /*
338 * Do DWORD read from the device.
339 */
340 uint32_t u32Value;
341 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
342 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
343 switch (rc2)
344 {
345 case VINF_SUCCESS:
346 break;
347 case VINF_IOM_MMIO_UNUSED_FF:
348 u32Value = UINT32_C(0xffffffff);
349 break;
350 case VINF_IOM_MMIO_UNUSED_00:
351 u32Value = 0;
352 break;
353 case VINF_IOM_R3_MMIO_READ:
354 case VINF_IOM_R3_MMIO_READ_WRITE:
355 case VINF_IOM_R3_MMIO_WRITE:
356 /** @todo What if we've split a transfer and already read
357 * something? Since reads can have sideeffects we could be
358 * kind of screwed here... */
359 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
360 return rc2;
361 default:
362 if (RT_FAILURE(rc2))
363 {
364 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
365 return rc2;
366 }
367 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
368 if (rc == VINF_SUCCESS || rc2 < rc)
369 rc = rc2;
370 break;
371 }
372 u32Value >>= (GCPhys & 3) * 8;
373
374 /*
375 * Write what we've read.
376 */
377 unsigned cbThisPart = 4 - (GCPhys & 3);
378 if (cbThisPart > cbValue)
379 cbThisPart = cbValue;
380
381 switch (cbThisPart)
382 {
383 case 1:
384 *(uint8_t *)pvValue = (uint8_t)u32Value;
385 break;
386 case 2:
387 *(uint16_t *)pvValue = (uint16_t)u32Value;
388 break;
389 case 3:
390 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
391 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
392 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
393 break;
394 case 4:
395 *(uint32_t *)pvValue = u32Value;
396 break;
397 }
398
399 /*
400 * Advance.
401 */
402 cbValue -= cbThisPart;
403 if (!cbValue)
404 break;
405 GCPhys += cbThisPart;
406 pvValue = (uint8_t *)pvValue + cbThisPart;
407 }
408
409 return rc;
410}
411
412
413/**
414 * Implements VINF_IOM_MMIO_UNUSED_FF.
415 *
416 * @returns VINF_SUCCESS.
417 * @param pvValue Where to store the zeros.
418 * @param cbValue How many bytes to read.
419 */
420static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
421{
422 switch (cbValue)
423 {
424 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
425 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
426 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
427 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
428 default:
429 {
430 uint8_t *pb = (uint8_t *)pvValue;
431 while (cbValue--)
432 *pb++ = UINT8_C(0xff);
433 break;
434 }
435 }
436 return VINF_SUCCESS;
437}
438
439
440/**
441 * Implements VINF_IOM_MMIO_UNUSED_00.
442 *
443 * @returns VINF_SUCCESS.
444 * @param pvValue Where to store the zeros.
445 * @param cbValue How many bytes to read.
446 */
447static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
448{
449 switch (cbValue)
450 {
451 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
452 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
453 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
454 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
455 default:
456 {
457 uint8_t *pb = (uint8_t *)pvValue;
458 while (cbValue--)
459 *pb++ = UINT8_C(0x00);
460 break;
461 }
462 }
463 return VINF_SUCCESS;
464}
465
466
467/**
468 * Wrapper which does the read and updates range statistics when such are enabled.
469 */
470DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
471{
472#ifdef VBOX_WITH_STATISTICS
473 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
474 Assert(pStats);
475 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
476#endif
477
478 VBOXSTRICTRC rc;
479 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
480 {
481 if ( (cbValue == 4 && !(GCPhys & 3))
482 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
483 || (cbValue == 8 && !(GCPhys & 7)) )
484 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
485 else
486 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
487 }
488 else
489 rc = VINF_IOM_MMIO_UNUSED_FF;
490 if (rc != VINF_SUCCESS)
491 {
492 switch (VBOXSTRICTRC_VAL(rc))
493 {
494 case VINF_IOM_MMIO_UNUSED_FF: rc = iomMMIODoReadFFs(pvValue, cbValue); break;
495 case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break;
496 }
497 }
498 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
499 STAM_COUNTER_INC(&pStats->Accesses);
500 return VBOXSTRICTRC_VAL(rc);
501}
502
503
504/**
505 * Internal - statistics only.
506 */
507DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
508{
509#ifdef VBOX_WITH_STATISTICS
510 switch (cb)
511 {
512 case 1:
513 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
514 break;
515 case 2:
516 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
517 break;
518 case 4:
519 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
520 break;
521 case 8:
522 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
523 break;
524 default:
525 /* No way. */
526 AssertMsgFailed(("Invalid data length %d\n", cb));
527 break;
528 }
529#else
530 NOREF(pVM); NOREF(cb);
531#endif
532}
533
534
535/**
536 * MOV reg, mem (read)
537 * MOVZX reg, mem (read)
538 * MOVSX reg, mem (read)
539 *
540 * @returns VBox status code.
541 *
542 * @param pVM The virtual machine.
543 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
544 * @param pCpu Disassembler CPU state.
545 * @param pRange Pointer MMIO range.
546 * @param GCPhysFault The GC physical address corresponding to pvFault.
547 */
548static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
549{
550 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
551
552 /*
553 * Get the data size from parameter 2,
554 * and call the handler function to get the data.
555 */
556 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
557 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
558
559 uint64_t u64Data = 0;
560 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
561 if (rc == VINF_SUCCESS)
562 {
563 /*
564 * Do sign extension for MOVSX.
565 */
566 /** @todo checkup MOVSX implementation! */
567 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
568 {
569 if (cb == 1)
570 {
571 /* DWORD <- BYTE */
572 int64_t iData = (int8_t)u64Data;
573 u64Data = (uint64_t)iData;
574 }
575 else
576 {
577 /* DWORD <- WORD */
578 int64_t iData = (int16_t)u64Data;
579 u64Data = (uint64_t)iData;
580 }
581 }
582
583 /*
584 * Store the result to register (parameter 1).
585 */
586 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
587 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
588 }
589
590 if (rc == VINF_SUCCESS)
591 iomMMIOStatLength(pVM, cb);
592 return rc;
593}
594
595
596/**
597 * MOV mem, reg|imm (write)
598 *
599 * @returns VBox status code.
600 *
601 * @param pVM The virtual machine.
602 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
603 * @param pCpu Disassembler CPU state.
604 * @param pRange Pointer MMIO range.
605 * @param GCPhysFault The GC physical address corresponding to pvFault.
606 */
607static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
608{
609 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
610
611 /*
612 * Get data to write from second parameter,
613 * and call the callback to write it.
614 */
615 unsigned cb = 0;
616 uint64_t u64Data = 0;
617 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
618 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
619
620 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
621 if (rc == VINF_SUCCESS)
622 iomMMIOStatLength(pVM, cb);
623 return rc;
624}
625
626
627/** Wrapper for reading virtual memory. */
628DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
629{
630 /* Note: This will fail in R0 or RC if it hits an access handler. That
631 isn't a problem though since the operation can be restarted in REM. */
632#ifdef IN_RC
633 NOREF(pVCpu);
634 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
635 /* Page may be protected and not directly accessible. */
636 if (rc == VERR_ACCESS_DENIED)
637 rc = VINF_IOM_R3_IOPORT_WRITE;
638 return rc;
639#else
640 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
641#endif
642}
643
644
645/** Wrapper for writing virtual memory. */
646DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
647{
648 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
649 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
650 * as well since we're not behind the pgm lock and handler may change between calls.
651 *
652 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
653 * the state of some shadowed structures. */
654#if defined(IN_RING0) || defined(IN_RC)
655 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
656#else
657 NOREF(pCtxCore);
658 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
659#endif
660}
661
662
663#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
664/**
665 * [REP] MOVSB
666 * [REP] MOVSW
667 * [REP] MOVSD
668 *
669 * Restricted implementation.
670 *
671 *
672 * @returns VBox status code.
673 *
674 * @param pVM The virtual machine.
675 * @param uErrorCode CPU Error code.
676 * @param pRegFrame Trap register frame.
677 * @param GCPhysFault The GC physical address corresponding to pvFault.
678 * @param pCpu Disassembler CPU state.
679 * @param pRange Pointer MMIO range.
680 * @param ppStat Which sub-sample to attribute this call to.
681 */
682static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
683 PSTAMPROFILE *ppStat)
684{
685 /*
686 * We do not support segment prefixes or REPNE.
687 */
688 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
689 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
690
691 PVMCPU pVCpu = VMMGetCpu(pVM);
692
693 /*
694 * Get bytes/words/dwords/qword count to copy.
695 */
696 uint32_t cTransfers = 1;
697 if (pCpu->fPrefix & DISPREFIX_REP)
698 {
699#ifndef IN_RC
700 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
701 && pRegFrame->rcx >= _4G)
702 return VINF_EM_RAW_EMULATE_INSTR;
703#endif
704
705 cTransfers = pRegFrame->ecx;
706 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
707 cTransfers &= 0xffff;
708
709 if (!cTransfers)
710 return VINF_SUCCESS;
711 }
712
713 /* Get the current privilege level. */
714 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
715
716 /*
717 * Get data size.
718 */
719 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
720 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
721 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
722
723#ifdef VBOX_WITH_STATISTICS
724 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
725 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
726#endif
727
728/** @todo re-evaluate on page boundaries. */
729
730 RTGCPHYS Phys = GCPhysFault;
731 int rc;
732 if (fWriteAccess)
733 {
734 /*
735 * Write operation: [Mem] -> [MMIO]
736 * ds:esi (Virt Src) -> es:edi (Phys Dst)
737 */
738 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
739
740 /* Check callback. */
741 if (!pRange->CTX_SUFF(pfnWriteCallback))
742 return VINF_IOM_R3_MMIO_WRITE;
743
744 /* Convert source address ds:esi. */
745 RTGCUINTPTR pu8Virt;
746 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
747 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
748 (PRTGCPTR)&pu8Virt);
749 if (RT_SUCCESS(rc))
750 {
751
752 /* Access verification first; we currently can't recover properly from traps inside this instruction */
753 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
754 if (rc != VINF_SUCCESS)
755 {
756 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
757 return VINF_EM_RAW_EMULATE_INSTR;
758 }
759
760#ifdef IN_RC
761 MMGCRamRegisterTrapHandler(pVM);
762#endif
763
764 /* copy loop. */
765 while (cTransfers)
766 {
767 uint32_t u32Data = 0;
768 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
769 if (rc != VINF_SUCCESS)
770 break;
771 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
772 if (rc != VINF_SUCCESS)
773 break;
774
775 pu8Virt += offIncrement;
776 Phys += offIncrement;
777 pRegFrame->rsi += offIncrement;
778 pRegFrame->rdi += offIncrement;
779 cTransfers--;
780 }
781#ifdef IN_RC
782 MMGCRamDeregisterTrapHandler(pVM);
783#endif
784 /* Update ecx. */
785 if (pCpu->fPrefix & DISPREFIX_REP)
786 pRegFrame->ecx = cTransfers;
787 }
788 else
789 rc = VINF_IOM_R3_MMIO_READ_WRITE;
790 }
791 else
792 {
793 /*
794 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
795 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
796 */
797 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
798
799 /* Check callback. */
800 if (!pRange->CTX_SUFF(pfnReadCallback))
801 return VINF_IOM_R3_MMIO_READ;
802
803 /* Convert destination address. */
804 RTGCUINTPTR pu8Virt;
805 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
806 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
807 (RTGCPTR *)&pu8Virt);
808 if (RT_FAILURE(rc))
809 return VINF_IOM_R3_MMIO_READ;
810
811 /* Check if destination address is MMIO. */
812 PIOMMMIORANGE pMMIODst;
813 RTGCPHYS PhysDst;
814 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
815 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
816 if ( RT_SUCCESS(rc)
817 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
818 {
819 /** @todo implement per-device locks for MMIO access. */
820 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
821
822 /*
823 * Extra: [MMIO] -> [MMIO]
824 */
825 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
826 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
827 {
828 iomMmioReleaseRange(pVM, pRange);
829 return VINF_IOM_R3_MMIO_READ_WRITE;
830 }
831
832 /* copy loop. */
833 while (cTransfers)
834 {
835 uint32_t u32Data;
836 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
837 if (rc != VINF_SUCCESS)
838 break;
839 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
840 if (rc != VINF_SUCCESS)
841 break;
842
843 Phys += offIncrement;
844 PhysDst += offIncrement;
845 pRegFrame->rsi += offIncrement;
846 pRegFrame->rdi += offIncrement;
847 cTransfers--;
848 }
849 iomMmioReleaseRange(pVM, pRange);
850 }
851 else
852 {
853 /*
854 * Normal: [MMIO] -> [Mem]
855 */
856 /* Access verification first; we currently can't recover properly from traps inside this instruction */
857 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
858 if (rc != VINF_SUCCESS)
859 {
860 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
861 return VINF_EM_RAW_EMULATE_INSTR;
862 }
863
864 /* copy loop. */
865#ifdef IN_RC
866 MMGCRamRegisterTrapHandler(pVM);
867#endif
868 while (cTransfers)
869 {
870 uint32_t u32Data;
871 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
872 if (rc != VINF_SUCCESS)
873 break;
874 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
875 if (rc != VINF_SUCCESS)
876 {
877 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
878 break;
879 }
880
881 pu8Virt += offIncrement;
882 Phys += offIncrement;
883 pRegFrame->rsi += offIncrement;
884 pRegFrame->rdi += offIncrement;
885 cTransfers--;
886 }
887#ifdef IN_RC
888 MMGCRamDeregisterTrapHandler(pVM);
889#endif
890 }
891
892 /* Update ecx on exit. */
893 if (pCpu->fPrefix & DISPREFIX_REP)
894 pRegFrame->ecx = cTransfers;
895 }
896
897 /* work statistics. */
898 if (rc == VINF_SUCCESS)
899 iomMMIOStatLength(pVM, cb);
900 NOREF(ppStat);
901 return rc;
902}
903#endif /* IOM_WITH_MOVS_SUPPORT */
904
905
906/**
907 * Gets the address / opcode mask corresponding to the given CPU mode.
908 *
909 * @returns Mask.
910 * @param enmCpuMode CPU mode.
911 */
912static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
913{
914 switch (enmCpuMode)
915 {
916 case DISCPUMODE_16BIT: return UINT16_MAX;
917 case DISCPUMODE_32BIT: return UINT32_MAX;
918 case DISCPUMODE_64BIT: return UINT64_MAX;
919 default:
920 AssertFailedReturn(UINT32_MAX);
921 }
922}
923
924
925/**
926 * [REP] STOSB
927 * [REP] STOSW
928 * [REP] STOSD
929 *
930 * Restricted implementation.
931 *
932 *
933 * @returns VBox status code.
934 *
935 * @param pVM The virtual machine.
936 * @param pRegFrame Trap register frame.
937 * @param GCPhysFault The GC physical address corresponding to pvFault.
938 * @param pCpu Disassembler CPU state.
939 * @param pRange Pointer MMIO range.
940 */
941static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
942{
943 /*
944 * We do not support segment prefixes or REPNE..
945 */
946 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
947 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
948
949 /*
950 * Get bytes/words/dwords/qwords count to copy.
951 */
952 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
953 RTGCUINTREG cTransfers = 1;
954 if (pCpu->fPrefix & DISPREFIX_REP)
955 {
956#ifndef IN_RC
957 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM))
958 && pRegFrame->rcx >= _4G)
959 return VINF_EM_RAW_EMULATE_INSTR;
960#endif
961
962 cTransfers = pRegFrame->rcx & fAddrMask;
963 if (!cTransfers)
964 return VINF_SUCCESS;
965 }
966
967/** @todo r=bird: bounds checks! */
968
969 /*
970 * Get data size.
971 */
972 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
973 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
974 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
975
976#ifdef VBOX_WITH_STATISTICS
977 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
978 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
979#endif
980
981
982 RTGCPHYS Phys = GCPhysFault;
983 int rc;
984 if ( pRange->CTX_SUFF(pfnFillCallback)
985 && cb <= 4 /* can only fill 32-bit values */)
986 {
987 /*
988 * Use the fill callback.
989 */
990 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
991 if (offIncrement > 0)
992 {
993 /* addr++ variant. */
994 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
995 pRegFrame->eax, cb, cTransfers);
996 if (rc == VINF_SUCCESS)
997 {
998 /* Update registers. */
999 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1000 | (pRegFrame->rdi & ~fAddrMask);
1001 if (pCpu->fPrefix & DISPREFIX_REP)
1002 pRegFrame->rcx &= ~fAddrMask;
1003 }
1004 }
1005 else
1006 {
1007 /* addr-- variant. */
1008 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1009 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1010 pRegFrame->eax, cb, cTransfers);
1011 if (rc == VINF_SUCCESS)
1012 {
1013 /* Update registers. */
1014 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1015 | (pRegFrame->rdi & ~fAddrMask);
1016 if (pCpu->fPrefix & DISPREFIX_REP)
1017 pRegFrame->rcx &= ~fAddrMask;
1018 }
1019 }
1020 }
1021 else
1022 {
1023 /*
1024 * Use the write callback.
1025 */
1026 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1027 uint64_t u64Data = pRegFrame->rax;
1028
1029 /* fill loop. */
1030 do
1031 {
1032 rc = iomMMIODoWrite(pVM, pRange, Phys, &u64Data, cb);
1033 if (rc != VINF_SUCCESS)
1034 break;
1035
1036 Phys += offIncrement;
1037 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1038 | (pRegFrame->rdi & ~fAddrMask);
1039 cTransfers--;
1040 } while (cTransfers);
1041
1042 /* Update rcx on exit. */
1043 if (pCpu->fPrefix & DISPREFIX_REP)
1044 pRegFrame->rcx = (cTransfers & fAddrMask)
1045 | (pRegFrame->rcx & ~fAddrMask);
1046 }
1047
1048 /*
1049 * Work statistics and return.
1050 */
1051 if (rc == VINF_SUCCESS)
1052 iomMMIOStatLength(pVM, cb);
1053 return rc;
1054}
1055
1056
1057/**
1058 * [REP] LODSB
1059 * [REP] LODSW
1060 * [REP] LODSD
1061 *
1062 * Restricted implementation.
1063 *
1064 *
1065 * @returns VBox status code.
1066 *
1067 * @param pVM The virtual machine.
1068 * @param pRegFrame Trap register frame.
1069 * @param GCPhysFault The GC physical address corresponding to pvFault.
1070 * @param pCpu Disassembler CPU state.
1071 * @param pRange Pointer MMIO range.
1072 */
1073static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1074{
1075 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1076
1077 /*
1078 * We do not support segment prefixes or REP*.
1079 */
1080 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1081 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1082
1083 /*
1084 * Get data size.
1085 */
1086 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1087 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1088 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1089
1090 /*
1091 * Perform read.
1092 */
1093 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
1094 if (rc == VINF_SUCCESS)
1095 {
1096 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1097 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1098 | (pRegFrame->rsi & ~fAddrMask);
1099 }
1100
1101 /*
1102 * Work statistics and return.
1103 */
1104 if (rc == VINF_SUCCESS)
1105 iomMMIOStatLength(pVM, cb);
1106 return rc;
1107}
1108
1109
1110/**
1111 * CMP [MMIO], reg|imm
1112 * CMP reg|imm, [MMIO]
1113 *
1114 * Restricted implementation.
1115 *
1116 *
1117 * @returns VBox status code.
1118 *
1119 * @param pVM The virtual machine.
1120 * @param pRegFrame Trap register frame.
1121 * @param GCPhysFault The GC physical address corresponding to pvFault.
1122 * @param pCpu Disassembler CPU state.
1123 * @param pRange Pointer MMIO range.
1124 */
1125static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1126{
1127 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1128
1129 /*
1130 * Get the operands.
1131 */
1132 unsigned cb = 0;
1133 uint64_t uData1 = 0;
1134 uint64_t uData2 = 0;
1135 int rc;
1136 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1137 /* cmp reg, [MMIO]. */
1138 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1139 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1140 /* cmp [MMIO], reg|imm. */
1141 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1142 else
1143 {
1144 AssertMsgFailed(("Disassember CMP problem..\n"));
1145 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1146 }
1147
1148 if (rc == VINF_SUCCESS)
1149 {
1150#if HC_ARCH_BITS == 32
1151 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1152 if (cb > 4)
1153 return VINF_IOM_R3_MMIO_READ_WRITE;
1154#endif
1155 /* Emulate CMP and update guest flags. */
1156 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1157 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1158 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1159 iomMMIOStatLength(pVM, cb);
1160 }
1161
1162 return rc;
1163}
1164
1165
1166/**
1167 * AND [MMIO], reg|imm
1168 * AND reg, [MMIO]
1169 * OR [MMIO], reg|imm
1170 * OR reg, [MMIO]
1171 *
1172 * Restricted implementation.
1173 *
1174 *
1175 * @returns VBox status code.
1176 *
1177 * @param pVM The virtual machine.
1178 * @param pRegFrame Trap register frame.
1179 * @param GCPhysFault The GC physical address corresponding to pvFault.
1180 * @param pCpu Disassembler CPU state.
1181 * @param pRange Pointer MMIO range.
1182 * @param pfnEmulate Instruction emulation function.
1183 */
1184static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1185{
1186 unsigned cb = 0;
1187 uint64_t uData1 = 0;
1188 uint64_t uData2 = 0;
1189 bool fAndWrite;
1190 int rc;
1191
1192#ifdef LOG_ENABLED
1193 const char *pszInstr;
1194
1195 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1196 pszInstr = "Xor";
1197 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1198 pszInstr = "Or";
1199 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1200 pszInstr = "And";
1201 else
1202 pszInstr = "OrXorAnd??";
1203#endif
1204
1205 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1206 {
1207#if HC_ARCH_BITS == 32
1208 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1209 if (cb > 4)
1210 return VINF_IOM_R3_MMIO_READ_WRITE;
1211#endif
1212 /* and reg, [MMIO]. */
1213 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1214 fAndWrite = false;
1215 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1216 }
1217 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1218 {
1219#if HC_ARCH_BITS == 32
1220 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1221 if (cb > 4)
1222 return VINF_IOM_R3_MMIO_READ_WRITE;
1223#endif
1224 /* and [MMIO], reg|imm. */
1225 fAndWrite = true;
1226 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1227 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1228 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1229 else
1230 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1231 }
1232 else
1233 {
1234 AssertMsgFailed(("Disassember AND problem..\n"));
1235 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1236 }
1237
1238 if (rc == VINF_SUCCESS)
1239 {
1240 /* Emulate AND and update guest flags. */
1241 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1242
1243 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1244
1245 if (fAndWrite)
1246 /* Store result to MMIO. */
1247 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1248 else
1249 {
1250 /* Store result to register. */
1251 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1252 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1253 }
1254 if (rc == VINF_SUCCESS)
1255 {
1256 /* Update guest's eflags and finish. */
1257 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1258 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1259 iomMMIOStatLength(pVM, cb);
1260 }
1261 }
1262
1263 return rc;
1264}
1265
1266
1267/**
1268 * TEST [MMIO], reg|imm
1269 * TEST reg, [MMIO]
1270 *
1271 * Restricted implementation.
1272 *
1273 *
1274 * @returns VBox status code.
1275 *
1276 * @param pVM The virtual machine.
1277 * @param pRegFrame Trap register frame.
1278 * @param GCPhysFault The GC physical address corresponding to pvFault.
1279 * @param pCpu Disassembler CPU state.
1280 * @param pRange Pointer MMIO range.
1281 */
1282static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1283{
1284 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1285
1286 unsigned cb = 0;
1287 uint64_t uData1 = 0;
1288 uint64_t uData2 = 0;
1289 int rc;
1290
1291 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1292 {
1293 /* and test, [MMIO]. */
1294 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1295 }
1296 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1297 {
1298 /* test [MMIO], reg|imm. */
1299 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1300 }
1301 else
1302 {
1303 AssertMsgFailed(("Disassember TEST problem..\n"));
1304 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1305 }
1306
1307 if (rc == VINF_SUCCESS)
1308 {
1309#if HC_ARCH_BITS == 32
1310 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1311 if (cb > 4)
1312 return VINF_IOM_R3_MMIO_READ_WRITE;
1313#endif
1314
1315 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1316 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1317 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1318 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1319 iomMMIOStatLength(pVM, cb);
1320 }
1321
1322 return rc;
1323}
1324
1325
1326/**
1327 * BT [MMIO], reg|imm
1328 *
1329 * Restricted implementation.
1330 *
1331 *
1332 * @returns VBox status code.
1333 *
1334 * @param pVM The virtual machine.
1335 * @param pRegFrame Trap register frame.
1336 * @param GCPhysFault The GC physical address corresponding to pvFault.
1337 * @param pCpu Disassembler CPU state.
1338 * @param pRange Pointer MMIO range.
1339 */
1340static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1341{
1342 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1343
1344 uint64_t uBit = 0;
1345 uint64_t uData = 0;
1346 unsigned cbIgnored;
1347
1348 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1349 {
1350 AssertMsgFailed(("Disassember BT problem..\n"));
1351 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1352 }
1353 /* The size of the memory operand only matters here. */
1354 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1355
1356 /* bt [MMIO], reg|imm. */
1357 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
1358 if (rc == VINF_SUCCESS)
1359 {
1360 /* Find the bit inside the faulting address */
1361 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1362 iomMMIOStatLength(pVM, cbData);
1363 }
1364
1365 return rc;
1366}
1367
1368/**
1369 * XCHG [MMIO], reg
1370 * XCHG reg, [MMIO]
1371 *
1372 * Restricted implementation.
1373 *
1374 *
1375 * @returns VBox status code.
1376 *
1377 * @param pVM The virtual machine.
1378 * @param pRegFrame Trap register frame.
1379 * @param GCPhysFault The GC physical address corresponding to pvFault.
1380 * @param pCpu Disassembler CPU state.
1381 * @param pRange Pointer MMIO range.
1382 */
1383static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1384{
1385 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1386 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1387 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1388 return VINF_IOM_R3_MMIO_READ_WRITE;
1389
1390 int rc;
1391 unsigned cb = 0;
1392 uint64_t uData1 = 0;
1393 uint64_t uData2 = 0;
1394 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1395 {
1396 /* xchg reg, [MMIO]. */
1397 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1398 if (rc == VINF_SUCCESS)
1399 {
1400 /* Store result to MMIO. */
1401 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1402
1403 if (rc == VINF_SUCCESS)
1404 {
1405 /* Store result to register. */
1406 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1407 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1408 }
1409 else
1410 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1411 }
1412 else
1413 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1414 }
1415 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1416 {
1417 /* xchg [MMIO], reg. */
1418 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1419 if (rc == VINF_SUCCESS)
1420 {
1421 /* Store result to MMIO. */
1422 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1423 if (rc == VINF_SUCCESS)
1424 {
1425 /* Store result to register. */
1426 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1427 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1428 }
1429 else
1430 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1431 }
1432 else
1433 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1434 }
1435 else
1436 {
1437 AssertMsgFailed(("Disassember XCHG problem..\n"));
1438 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1439 }
1440 return rc;
1441}
1442
1443
1444/**
1445 * \#PF Handler callback for MMIO ranges.
1446 *
1447 * @returns VBox status code (appropriate for GC return).
1448 * @param pVM Pointer to the VM.
1449 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1450 * any error code (the EPT misconfig hack).
1451 * @param pCtxCore Trap register frame.
1452 * @param GCPhysFault The GC physical address corresponding to pvFault.
1453 * @param pvUser Pointer to the MMIO ring-3 range entry.
1454 */
1455static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1456{
1457 /* Take the IOM lock before performing any MMIO. */
1458 int rc = IOM_LOCK(pVM);
1459#ifndef IN_RING3
1460 if (rc == VERR_SEM_BUSY)
1461 return VINF_IOM_R3_MMIO_READ_WRITE;
1462#endif
1463 AssertRC(rc);
1464
1465 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1466 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1467 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1468
1469 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1470 Assert(pRange);
1471 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1472
1473#ifdef VBOX_WITH_STATISTICS
1474 /*
1475 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1476 */
1477 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
1478 if (!pStats)
1479 {
1480# ifdef IN_RING3
1481 IOM_UNLOCK(pVM);
1482 return VERR_NO_MEMORY;
1483# else
1484 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1485 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1486 IOM_UNLOCK(pVM);
1487 return VINF_IOM_R3_MMIO_READ_WRITE;
1488# endif
1489 }
1490#endif
1491
1492#ifndef IN_RING3
1493 /*
1494 * Should we defer the request right away? This isn't usually the case, so
1495 * do the simple test first and the try deal with uErrorCode being N/A.
1496 */
1497 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1498 || !pRange->CTX_SUFF(pfnReadCallback))
1499 && ( uErrorCode == UINT32_MAX
1500 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1501 : uErrorCode & X86_TRAP_PF_RW
1502 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1503 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1504 )
1505 )
1506 )
1507 {
1508 if (uErrorCode & X86_TRAP_PF_RW)
1509 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1510 else
1511 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1512
1513 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1514 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1515 IOM_UNLOCK(pVM);
1516 return VINF_IOM_R3_MMIO_READ_WRITE;
1517 }
1518#endif /* !IN_RING3 */
1519
1520 /*
1521 * Retain the range and do locking.
1522 */
1523 iomMmioRetainRange(pRange);
1524 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1525 IOM_UNLOCK(pVM);
1526 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1527 if (rc != VINF_SUCCESS)
1528 {
1529 iomMmioReleaseRange(pVM, pRange);
1530 return rc;
1531 }
1532
1533 /*
1534 * Disassemble the instruction and interpret it.
1535 */
1536 PVMCPU pVCpu = VMMGetCpu(pVM);
1537 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1538 unsigned cbOp;
1539 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1540 if (RT_FAILURE(rc))
1541 {
1542 iomMmioReleaseRange(pVM, pRange);
1543 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1544 return rc;
1545 }
1546 switch (pDis->pCurInstr->uOpcode)
1547 {
1548 case OP_MOV:
1549 case OP_MOVZX:
1550 case OP_MOVSX:
1551 {
1552 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1553 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1554 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1555 ? uErrorCode & X86_TRAP_PF_RW
1556 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1557 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1558 else
1559 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1560 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1561 break;
1562 }
1563
1564
1565#ifdef IOM_WITH_MOVS_SUPPORT
1566 case OP_MOVSB:
1567 case OP_MOVSWD:
1568 {
1569 if (uErrorCode == UINT32_MAX)
1570 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1571 else
1572 {
1573 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1574 PSTAMPROFILE pStat = NULL;
1575 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1576 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1577 }
1578 break;
1579 }
1580#endif
1581
1582 case OP_STOSB:
1583 case OP_STOSWD:
1584 Assert(uErrorCode & X86_TRAP_PF_RW);
1585 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1586 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1587 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1588 break;
1589
1590 case OP_LODSB:
1591 case OP_LODSWD:
1592 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1593 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1594 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1595 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1596 break;
1597
1598 case OP_CMP:
1599 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1600 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1601 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1602 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1603 break;
1604
1605 case OP_AND:
1606 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1607 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1608 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1609 break;
1610
1611 case OP_OR:
1612 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1613 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1614 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1615 break;
1616
1617 case OP_XOR:
1618 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1619 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1620 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1621 break;
1622
1623 case OP_TEST:
1624 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1625 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1626 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1627 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1628 break;
1629
1630 case OP_BT:
1631 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1632 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1633 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1634 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1635 break;
1636
1637 case OP_XCHG:
1638 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1639 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1640 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1641 break;
1642
1643
1644 /*
1645 * The instruction isn't supported. Hand it on to ring-3.
1646 */
1647 default:
1648 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1649 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1650 break;
1651 }
1652
1653 /*
1654 * On success advance EIP.
1655 */
1656 if (rc == VINF_SUCCESS)
1657 pCtxCore->rip += cbOp;
1658 else
1659 {
1660 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1661#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1662 switch (rc)
1663 {
1664 case VINF_IOM_R3_MMIO_READ:
1665 case VINF_IOM_R3_MMIO_READ_WRITE:
1666 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1667 break;
1668 case VINF_IOM_R3_MMIO_WRITE:
1669 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1670 break;
1671 }
1672#endif
1673 }
1674
1675 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1676 iomMmioReleaseRange(pVM, pRange);
1677 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1678 return rc;
1679}
1680
1681/**
1682 * \#PF Handler callback for MMIO ranges.
1683 *
1684 * @returns VBox status code (appropriate for GC return).
1685 * @param pVM Pointer to the VM.
1686 * @param uErrorCode CPU Error code.
1687 * @param pCtxCore Trap register frame.
1688 * @param pvFault The fault address (cr2).
1689 * @param GCPhysFault The GC physical address corresponding to pvFault.
1690 * @param pvUser Pointer to the MMIO ring-3 range entry.
1691 */
1692VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1693{
1694 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1695 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1696 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1697 return VBOXSTRICTRC_VAL(rcStrict);
1698}
1699
1700/**
1701 * Physical access handler for MMIO ranges.
1702 *
1703 * @returns VBox status code (appropriate for GC return).
1704 * @param pVM Pointer to the VM.
1705 * @param uErrorCode CPU Error code.
1706 * @param pCtxCore Trap register frame.
1707 * @param GCPhysFault The GC physical address.
1708 */
1709VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1710{
1711 int rc2 = IOM_LOCK(pVM); NOREF(rc2);
1712#ifndef IN_RING3
1713 if (rc2 == VERR_SEM_BUSY)
1714 return VINF_IOM_R3_MMIO_READ_WRITE;
1715#endif
1716 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault));
1717 IOM_UNLOCK(pVM);
1718 return VBOXSTRICTRC_VAL(rcStrict);
1719}
1720
1721
1722#ifdef IN_RING3
1723/**
1724 * \#PF Handler callback for MMIO ranges.
1725 *
1726 * @returns VINF_SUCCESS if the handler have carried out the operation.
1727 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1728 * @param pVM Pointer to the VM.
1729 * @param GCPhys The physical address the guest is writing to.
1730 * @param pvPhys The HC mapping of that address.
1731 * @param pvBuf What the guest is reading/writing.
1732 * @param cbBuf How much it's reading/writing.
1733 * @param enmAccessType The access type.
1734 * @param pvUser Pointer to the MMIO range entry.
1735 */
1736DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1737 PGMACCESSTYPE enmAccessType, void *pvUser)
1738{
1739 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1740 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1741
1742 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1743 AssertPtr(pRange);
1744 NOREF(pvPhys);
1745
1746 /*
1747 * Validate the range.
1748 */
1749 int rc = IOM_LOCK(pVM);
1750 AssertRC(rc);
1751 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1752
1753 /*
1754 * Perform locking.
1755 */
1756 iomMmioRetainRange(pRange);
1757 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1758 IOM_UNLOCK(pVM);
1759 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1760 if (rc != VINF_SUCCESS)
1761 {
1762 iomMmioReleaseRange(pVM, pRange);
1763 return rc;
1764 }
1765
1766 /*
1767 * Perform the access.
1768 */
1769 if (enmAccessType == PGMACCESSTYPE_READ)
1770 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1771 else
1772 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1773
1774 AssertRC(rc);
1775 iomMmioReleaseRange(pVM, pRange);
1776 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1777 return rc;
1778}
1779#endif /* IN_RING3 */
1780
1781
1782/**
1783 * Reads a MMIO register.
1784 *
1785 * @returns VBox status code.
1786 *
1787 * @param pVM Pointer to the VM.
1788 * @param GCPhys The physical address to read.
1789 * @param pu32Value Where to store the value read.
1790 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1791 */
1792VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1793{
1794 /* Take the IOM lock before performing any MMIO. */
1795 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1796#ifndef IN_RING3
1797 if (rc == VERR_SEM_BUSY)
1798 return VINF_IOM_R3_MMIO_WRITE;
1799#endif
1800 AssertRC(VBOXSTRICTRC_VAL(rc));
1801#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1802 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1803#endif
1804
1805 /*
1806 * Lookup the current context range node and statistics.
1807 */
1808 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1809 if (!pRange)
1810 {
1811 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1812 IOM_UNLOCK(pVM);
1813 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1814 }
1815#ifdef VBOX_WITH_STATISTICS
1816 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1817 if (!pStats)
1818 {
1819 IOM_UNLOCK(pVM);
1820# ifdef IN_RING3
1821 return VERR_NO_MEMORY;
1822# else
1823 return VINF_IOM_R3_MMIO_READ;
1824# endif
1825 }
1826 STAM_COUNTER_INC(&pStats->Accesses);
1827#endif /* VBOX_WITH_STATISTICS */
1828
1829 if (pRange->CTX_SUFF(pfnReadCallback))
1830 {
1831 /*
1832 * Perform locking.
1833 */
1834 iomMmioRetainRange(pRange);
1835 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1836 IOM_UNLOCK(pVM);
1837 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
1838 if (rc != VINF_SUCCESS)
1839 {
1840 iomMmioReleaseRange(pVM, pRange);
1841 return rc;
1842 }
1843
1844 /*
1845 * Perform the read and deal with the result.
1846 */
1847 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1848 if ( (cbValue == 4 && !(GCPhys & 3))
1849 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
1850 || (cbValue == 8 && !(GCPhys & 7)) )
1851 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
1852 pu32Value, (unsigned)cbValue);
1853 else
1854 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
1855 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1856 switch (VBOXSTRICTRC_VAL(rc))
1857 {
1858 case VINF_SUCCESS:
1859 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1860 iomMmioReleaseRange(pVM, pRange);
1861 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1862 return rc;
1863#ifndef IN_RING3
1864 case VINF_IOM_R3_MMIO_READ:
1865 case VINF_IOM_R3_MMIO_READ_WRITE:
1866 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1867#endif
1868 default:
1869 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1870 iomMmioReleaseRange(pVM, pRange);
1871 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1872 return rc;
1873
1874 case VINF_IOM_MMIO_UNUSED_00:
1875 iomMMIODoRead00s(pu32Value, cbValue);
1876 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1877 iomMmioReleaseRange(pVM, pRange);
1878 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1879 return VINF_SUCCESS;
1880
1881 case VINF_IOM_MMIO_UNUSED_FF:
1882 iomMMIODoReadFFs(pu32Value, cbValue);
1883 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1884 iomMmioReleaseRange(pVM, pRange);
1885 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1886 return VINF_SUCCESS;
1887 }
1888 /* not reached */
1889 }
1890#ifndef IN_RING3
1891 if (pRange->pfnReadCallbackR3)
1892 {
1893 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1894 IOM_UNLOCK(pVM);
1895 return VINF_IOM_R3_MMIO_READ;
1896 }
1897#endif
1898
1899 /*
1900 * Unassigned memory - this is actually not supposed t happen...
1901 */
1902 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1903 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1904 iomMMIODoReadFFs(pu32Value, cbValue);
1905 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1906 IOM_UNLOCK(pVM);
1907 return VINF_SUCCESS;
1908}
1909
1910
1911/**
1912 * Writes to a MMIO register.
1913 *
1914 * @returns VBox status code.
1915 *
1916 * @param pVM Pointer to the VM.
1917 * @param GCPhys The physical address to write to.
1918 * @param u32Value The value to write.
1919 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1920 */
1921VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1922{
1923 /* Take the IOM lock before performing any MMIO. */
1924 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1925#ifndef IN_RING3
1926 if (rc == VERR_SEM_BUSY)
1927 return VINF_IOM_R3_MMIO_WRITE;
1928#endif
1929 AssertRC(VBOXSTRICTRC_VAL(rc));
1930#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1931 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1932#endif
1933
1934 /*
1935 * Lookup the current context range node.
1936 */
1937 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1938 if (!pRange)
1939 {
1940 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1941 IOM_UNLOCK(pVM);
1942 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1943 }
1944#ifdef VBOX_WITH_STATISTICS
1945 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1946 if (!pStats)
1947 {
1948 IOM_UNLOCK(pVM);
1949# ifdef IN_RING3
1950 return VERR_NO_MEMORY;
1951# else
1952 return VINF_IOM_R3_MMIO_WRITE;
1953# endif
1954 }
1955 STAM_COUNTER_INC(&pStats->Accesses);
1956#endif /* VBOX_WITH_STATISTICS */
1957
1958 if (pRange->CTX_SUFF(pfnWriteCallback))
1959 {
1960 /*
1961 * Perform locking.
1962 */
1963 iomMmioRetainRange(pRange);
1964 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1965 IOM_UNLOCK(pVM);
1966 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1967 if (rc != VINF_SUCCESS)
1968 {
1969 iomMmioReleaseRange(pVM, pRange);
1970 return rc;
1971 }
1972
1973 /*
1974 * Perform the write.
1975 */
1976 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1977 if ( (cbValue == 4 && !(GCPhys & 3))
1978 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1979 || (cbValue == 8 && !(GCPhys & 7)) )
1980 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1981 GCPhys, &u32Value, (unsigned)cbValue);
1982 else
1983 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
1984 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1985#ifndef IN_RING3
1986 if ( rc == VINF_IOM_R3_MMIO_WRITE
1987 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
1988 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1989#endif
1990 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1991 iomMmioReleaseRange(pVM, pRange);
1992 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1993 return rc;
1994 }
1995#ifndef IN_RING3
1996 if (pRange->pfnWriteCallbackR3)
1997 {
1998 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1999 IOM_UNLOCK(pVM);
2000 return VINF_IOM_R3_MMIO_WRITE;
2001 }
2002#endif
2003
2004 /*
2005 * No write handler, nothing to do.
2006 */
2007 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2008 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2009 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2010 IOM_UNLOCK(pVM);
2011 return VINF_SUCCESS;
2012}
2013
2014
2015/**
2016 * [REP*] INSB/INSW/INSD
2017 * ES:EDI,DX[,ECX]
2018 *
2019 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2020 *
2021 * @returns Strict VBox status code. Informational status codes other than the one documented
2022 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2023 * @retval VINF_SUCCESS Success.
2024 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2025 * status code must be passed on to EM.
2026 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2027 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2028 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2029 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2030 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2031 *
2032 * @param pVM The virtual machine.
2033 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2034 * @param uPort IO Port
2035 * @param uPrefix IO instruction prefix
2036 * @param enmAddrMode The address mode.
2037 * @param cbTransfer Size of transfer unit
2038 */
2039VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2040 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2041{
2042 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2043
2044 /*
2045 * We do not support REPNE or decrementing destination
2046 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2047 */
2048 if ( (uPrefix & DISPREFIX_REPNE)
2049 || pRegFrame->eflags.Bits.u1DF)
2050 return VINF_EM_RAW_EMULATE_INSTR;
2051
2052 PVMCPU pVCpu = VMMGetCpu(pVM);
2053
2054 /*
2055 * Get bytes/words/dwords count to transfer.
2056 */
2057 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2058 RTGCUINTREG cTransfers = 1;
2059 if (uPrefix & DISPREFIX_REP)
2060 {
2061#ifndef IN_RC
2062 if ( CPUMIsGuestIn64BitCode(pVCpu)
2063 && pRegFrame->rcx >= _4G)
2064 return VINF_EM_RAW_EMULATE_INSTR;
2065#endif
2066 cTransfers = pRegFrame->rcx & fAddrMask;
2067 if (!cTransfers)
2068 return VINF_SUCCESS;
2069 }
2070
2071 /* Convert destination address es:edi. */
2072 RTGCPTR GCPtrDst;
2073 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2074 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2075 &GCPtrDst);
2076 if (RT_FAILURE(rc2))
2077 {
2078 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2079 return VINF_EM_RAW_EMULATE_INSTR;
2080 }
2081
2082 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2083 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2084 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2085 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2086 if (rc2 != VINF_SUCCESS)
2087 {
2088 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2089 return VINF_EM_RAW_EMULATE_INSTR;
2090 }
2091
2092 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2093 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2094 if (cTransfers > 1)
2095 {
2096 /* If the device supports string transfers, ask it to do as
2097 * much as it wants. The rest is done with single-word transfers. */
2098 const RTGCUINTREG cTransfersOrg = cTransfers;
2099 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
2100 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2101 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2102 | (pRegFrame->rdi & ~fAddrMask);
2103 }
2104
2105#ifdef IN_RC
2106 MMGCRamRegisterTrapHandler(pVM);
2107#endif
2108 while (cTransfers && rcStrict == VINF_SUCCESS)
2109 {
2110 uint32_t u32Value;
2111 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
2112 if (!IOM_SUCCESS(rcStrict))
2113 break;
2114 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2115 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2116 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2117 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2118 | (pRegFrame->rdi & ~fAddrMask);
2119 cTransfers--;
2120 }
2121#ifdef IN_RC
2122 MMGCRamDeregisterTrapHandler(pVM);
2123#endif
2124
2125 /* Update rcx on exit. */
2126 if (uPrefix & DISPREFIX_REP)
2127 pRegFrame->rcx = (cTransfers & fAddrMask)
2128 | (pRegFrame->rcx & ~fAddrMask);
2129
2130 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2131 return rcStrict;
2132}
2133
2134
2135/**
2136 * [REP*] INSB/INSW/INSD
2137 * ES:EDI,DX[,ECX]
2138 *
2139 * @returns Strict VBox status code. Informational status codes other than the one documented
2140 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2141 * @retval VINF_SUCCESS Success.
2142 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2143 * status code must be passed on to EM.
2144 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2145 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2146 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2147 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2148 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2149 *
2150 * @param pVM The virtual machine.
2151 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2152 * @param pCpu Disassembler CPU state.
2153 */
2154VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2155{
2156 /*
2157 * Get port number directly from the register (no need to bother the
2158 * disassembler). And get the I/O register size from the opcode / prefix.
2159 */
2160 RTIOPORT Port = pRegFrame->edx & 0xffff;
2161 unsigned cb = 0;
2162 if (pCpu->pCurInstr->uOpcode == OP_INSB)
2163 cb = 1;
2164 else
2165 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2166
2167 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2168 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2169 {
2170 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2171 return rcStrict;
2172 }
2173
2174 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2175}
2176
2177
2178/**
2179 * [REP*] OUTSB/OUTSW/OUTSD
2180 * DS:ESI,DX[,ECX]
2181 *
2182 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2183 *
2184 * @returns Strict VBox status code. Informational status codes other than the one documented
2185 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2186 * @retval VINF_SUCCESS Success.
2187 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2188 * status code must be passed on to EM.
2189 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2190 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2191 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2192 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2193 *
2194 * @param pVM The virtual machine.
2195 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2196 * @param uPort IO Port
2197 * @param uPrefix IO instruction prefix
2198 * @param enmAddrMode The address mode.
2199 * @param cbTransfer Size of transfer unit
2200 */
2201VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2202 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2203{
2204 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2205
2206 /*
2207 * We do not support segment prefixes, REPNE or
2208 * decrementing source pointer.
2209 */
2210 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2211 || pRegFrame->eflags.Bits.u1DF)
2212 return VINF_EM_RAW_EMULATE_INSTR;
2213
2214 PVMCPU pVCpu = VMMGetCpu(pVM);
2215
2216 /*
2217 * Get bytes/words/dwords count to transfer.
2218 */
2219 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2220 RTGCUINTREG cTransfers = 1;
2221 if (uPrefix & DISPREFIX_REP)
2222 {
2223#ifndef IN_RC
2224 if ( CPUMIsGuestIn64BitCode(pVCpu)
2225 && pRegFrame->rcx >= _4G)
2226 return VINF_EM_RAW_EMULATE_INSTR;
2227#endif
2228 cTransfers = pRegFrame->rcx & fAddrMask;
2229 if (!cTransfers)
2230 return VINF_SUCCESS;
2231 }
2232
2233 /* Convert source address ds:esi. */
2234 RTGCPTR GCPtrSrc;
2235 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2236 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2237 &GCPtrSrc);
2238 if (RT_FAILURE(rc2))
2239 {
2240 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2241 return VINF_EM_RAW_EMULATE_INSTR;
2242 }
2243
2244 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2245 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2246 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2247 (cpl == 3) ? X86_PTE_US : 0);
2248 if (rc2 != VINF_SUCCESS)
2249 {
2250 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2251 return VINF_EM_RAW_EMULATE_INSTR;
2252 }
2253
2254 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2255 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2256 if (cTransfers > 1)
2257 {
2258 /*
2259 * If the device supports string transfers, ask it to do as
2260 * much as it wants. The rest is done with single-word transfers.
2261 */
2262 const RTGCUINTREG cTransfersOrg = cTransfers;
2263 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
2264 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2265 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2266 | (pRegFrame->rsi & ~fAddrMask);
2267 }
2268
2269#ifdef IN_RC
2270 MMGCRamRegisterTrapHandler(pVM);
2271#endif
2272
2273 while (cTransfers && rcStrict == VINF_SUCCESS)
2274 {
2275 uint32_t u32Value = 0;
2276 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2277 if (rcStrict != VINF_SUCCESS)
2278 break;
2279 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
2280 if (!IOM_SUCCESS(rcStrict))
2281 break;
2282 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2283 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2284 | (pRegFrame->rsi & ~fAddrMask);
2285 cTransfers--;
2286 }
2287
2288#ifdef IN_RC
2289 MMGCRamDeregisterTrapHandler(pVM);
2290#endif
2291
2292 /* Update rcx on exit. */
2293 if (uPrefix & DISPREFIX_REP)
2294 pRegFrame->rcx = (cTransfers & fAddrMask)
2295 | (pRegFrame->rcx & ~fAddrMask);
2296
2297 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2298 return rcStrict;
2299}
2300
2301
2302/**
2303 * [REP*] OUTSB/OUTSW/OUTSD
2304 * DS:ESI,DX[,ECX]
2305 *
2306 * @returns Strict VBox status code. Informational status codes other than the one documented
2307 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2308 * @retval VINF_SUCCESS Success.
2309 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2310 * status code must be passed on to EM.
2311 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2312 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
2313 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2314 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2315 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2316 *
2317 * @param pVM The virtual machine.
2318 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2319 * @param pCpu Disassembler CPU state.
2320 */
2321VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2322{
2323 /*
2324 * Get port number from the first parameter.
2325 * And get the I/O register size from the opcode / prefix.
2326 */
2327 uint64_t Port = 0;
2328 unsigned cb = 0;
2329 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &Port, &cb);
2330 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
2331 if (pCpu->pCurInstr->uOpcode == OP_OUTSB)
2332 cb = 1;
2333 else
2334 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2335
2336 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2337 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2338 {
2339 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2340 return rcStrict;
2341 }
2342
2343 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2344}
2345
2346#ifndef IN_RC
2347
2348/**
2349 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2350 *
2351 * (This is a special optimization used by the VGA device.)
2352 *
2353 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2354 * remapping is made,.
2355 *
2356 * @param pVM The virtual machine.
2357 * @param GCPhys The address of the MMIO page to be changed.
2358 * @param GCPhysRemapped The address of the MMIO2 page.
2359 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2360 * for the time being.
2361 */
2362VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2363{
2364 /* Currently only called from the VGA device during MMIO. */
2365 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2366 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2367 PVMCPU pVCpu = VMMGetCpu(pVM);
2368
2369 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2370 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2371 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2372 && !HMIsNestedPagingActive(pVM)))
2373 return VINF_SUCCESS; /* ignore */
2374
2375 int rc = IOM_LOCK(pVM);
2376 if (RT_FAILURE(rc))
2377 return VINF_SUCCESS; /* better luck the next time around */
2378
2379 /*
2380 * Lookup the context range node the page belongs to.
2381 */
2382 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
2383 AssertMsgReturn(pRange,
2384 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2385
2386 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2387 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2388
2389 /*
2390 * Do the aliasing; page align the addresses since PGM is picky.
2391 */
2392 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2393 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2394
2395 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2396
2397 IOM_UNLOCK(pVM);
2398 AssertRCReturn(rc, rc);
2399
2400 /*
2401 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2402 * can simply prefetch it.
2403 *
2404 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2405 */
2406#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2407# ifdef VBOX_STRICT
2408 uint64_t fFlags;
2409 RTHCPHYS HCPhys;
2410 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2411 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2412# endif
2413#endif
2414 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2415 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2416 return VINF_SUCCESS;
2417}
2418
2419
2420/**
2421 * Mapping a HC page in place of an MMIO page for direct access.
2422 *
2423 * (This is a special optimization used by the APIC in the VT-x case.)
2424 *
2425 * @returns VBox status code.
2426 *
2427 * @param pVM Pointer to the VM.
2428 * @param pVCpu Pointer to the VMCPU.
2429 * @param GCPhys The address of the MMIO page to be changed.
2430 * @param HCPhys The address of the host physical page.
2431 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2432 * for the time being.
2433 */
2434VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2435{
2436 /* Currently only called from VT-x code during a page fault. */
2437 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2438
2439 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2440 Assert(HMIsEnabled(pVM));
2441
2442 /*
2443 * Lookup the context range node the page belongs to.
2444 */
2445#ifdef VBOX_STRICT
2446 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2447 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2448 AssertMsgReturn(pRange,
2449 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2450 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2451 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2452#endif
2453
2454 /*
2455 * Do the aliasing; page align the addresses since PGM is picky.
2456 */
2457 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2458 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2459
2460 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2461 AssertRCReturn(rc, rc);
2462
2463 /*
2464 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2465 * can simply prefetch it.
2466 *
2467 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2468 */
2469 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2470 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2471 return VINF_SUCCESS;
2472}
2473
2474
2475/**
2476 * Reset a previously modified MMIO region; restore the access flags.
2477 *
2478 * @returns VBox status code.
2479 *
2480 * @param pVM The virtual machine.
2481 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2482 */
2483VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2484{
2485 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2486
2487 PVMCPU pVCpu = VMMGetCpu(pVM);
2488
2489 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2490 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2491 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2492 && !HMIsNestedPagingActive(pVM)))
2493 return VINF_SUCCESS; /* ignore */
2494
2495 /*
2496 * Lookup the context range node the page belongs to.
2497 */
2498#ifdef VBOX_STRICT
2499 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2500 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2501 AssertMsgReturn(pRange,
2502 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2503 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2504 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2505#endif
2506
2507 /*
2508 * Call PGM to do the job work.
2509 *
2510 * After the call, all the pages should be non-present... unless there is
2511 * a page pool flush pending (unlikely).
2512 */
2513 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2514 AssertRC(rc);
2515
2516#ifdef VBOX_STRICT
2517 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2518 {
2519 uint32_t cb = pRange->cb;
2520 GCPhys = pRange->GCPhys;
2521 while (cb)
2522 {
2523 uint64_t fFlags;
2524 RTHCPHYS HCPhys;
2525 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2526 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2527 cb -= PAGE_SIZE;
2528 GCPhys += PAGE_SIZE;
2529 }
2530 }
2531#endif
2532 return rc;
2533}
2534
2535#endif /* !IN_RC */
2536
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette