VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 18279

Last change on this file since 18279 was 18234, checked in by vboxsync, 16 years ago

IOMMMIOModifyPage -> IOMMMIOMapMMIO2Page. (missed one)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 64.0 KB
Line 
1/* $Id: IOMAllMMIO.cpp 18234 2009-03-25 01:29:51Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_IOM
27#include <VBox/iom.h>
28#include <VBox/cpum.h>
29#include <VBox/pgm.h>
30#include <VBox/selm.h>
31#include <VBox/mm.h>
32#include <VBox/em.h>
33#include <VBox/pgm.h>
34#include <VBox/trpm.h>
35#include "IOMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/hwaccm.h>
38
39#include <VBox/dis.h>
40#include <VBox/disopcode.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*******************************************************************************
50* Global Variables *
51*******************************************************************************/
52
53/**
54 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
55 */
56static const unsigned g_aSize2Shift[] =
57{
58 ~0, /* 0 - invalid */
59 0, /* *1 == 2^0 */
60 1, /* *2 == 2^1 */
61 ~0, /* 3 - invalid */
62 2, /* *4 == 2^2 */
63 ~0, /* 5 - invalid */
64 ~0, /* 6 - invalid */
65 ~0, /* 7 - invalid */
66 3 /* *8 == 2^3 */
67};
68
69/**
70 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
71 */
72#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
73
74
75/**
76 * Wrapper which does the write and updates range statistics when such are enabled.
77 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
78 */
79DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
80{
81#ifdef VBOX_WITH_STATISTICS
82 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
83 Assert(pStats);
84#endif
85
86 int rc;
87 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
88 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /* @todo fix const!! */
89 else
90 rc = VINF_SUCCESS;
91 if (rc != VINF_IOM_HC_MMIO_WRITE)
92 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
93 return rc;
94}
95
96
97/**
98 * Wrapper which does the read and updates range statistics when such are enabled.
99 */
100DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
101{
102#ifdef VBOX_WITH_STATISTICS
103 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
104 Assert(pStats);
105#endif
106
107 int rc;
108 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
109 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
110 else
111 rc = VINF_IOM_MMIO_UNUSED_FF;
112 if (rc != VINF_SUCCESS)
113 {
114 switch (rc)
115 {
116 case VINF_IOM_MMIO_UNUSED_FF:
117 switch (cbValue)
118 {
119 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
120 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
121 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
122 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
123 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
124 }
125 rc = VINF_SUCCESS;
126 break;
127
128 case VINF_IOM_MMIO_UNUSED_00:
129 switch (cbValue)
130 {
131 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
132 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
133 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
134 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
135 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
136 }
137 rc = VINF_SUCCESS;
138 break;
139 }
140 if (rc != VINF_IOM_HC_MMIO_READ)
141 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
142 }
143 return rc;
144}
145
146
147/**
148 * Internal - statistics only.
149 */
150DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
151{
152#ifdef VBOX_WITH_STATISTICS
153 switch (cb)
154 {
155 case 1:
156 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
157 break;
158 case 2:
159 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
160 break;
161 case 4:
162 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
163 break;
164 case 8:
165 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
166 break;
167 default:
168 /* No way. */
169 AssertMsgFailed(("Invalid data length %d\n", cb));
170 break;
171 }
172#else
173 NOREF(pVM); NOREF(cb);
174#endif
175}
176
177
178/**
179 * MOV reg, mem (read)
180 * MOVZX reg, mem (read)
181 * MOVSX reg, mem (read)
182 *
183 * @returns VBox status code.
184 *
185 * @param pVM The virtual machine.
186 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
187 * @param pCpu Disassembler CPU state.
188 * @param pRange Pointer MMIO range.
189 * @param GCPhysFault The GC physical address corresponding to pvFault.
190 */
191static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
192{
193 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
194
195 /*
196 * Get the data size from parameter 2,
197 * and call the handler function to get the data.
198 */
199 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
200 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
201
202 uint64_t u64Data = 0;
203 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
204 if (rc == VINF_SUCCESS)
205 {
206 /*
207 * Do sign extension for MOVSX.
208 */
209 /** @todo checkup MOVSX implementation! */
210 if (pCpu->pCurInstr->opcode == OP_MOVSX)
211 {
212 if (cb == 1)
213 {
214 /* DWORD <- BYTE */
215 int64_t iData = (int8_t)u64Data;
216 u64Data = (uint64_t)iData;
217 }
218 else
219 {
220 /* DWORD <- WORD */
221 int64_t iData = (int16_t)u64Data;
222 u64Data = (uint64_t)iData;
223 }
224 }
225
226 /*
227 * Store the result to register (parameter 1).
228 */
229 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
230 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
231 }
232
233 if (rc == VINF_SUCCESS)
234 iomMMIOStatLength(pVM, cb);
235 return rc;
236}
237
238
239/**
240 * MOV mem, reg|imm (write)
241 *
242 * @returns VBox status code.
243 *
244 * @param pVM The virtual machine.
245 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
246 * @param pCpu Disassembler CPU state.
247 * @param pRange Pointer MMIO range.
248 * @param GCPhysFault The GC physical address corresponding to pvFault.
249 */
250static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
251{
252 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
253
254 /*
255 * Get data to write from second parameter,
256 * and call the callback to write it.
257 */
258 unsigned cb = 0;
259 uint64_t u64Data = 0;
260 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
261 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
262
263 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
264 if (rc == VINF_SUCCESS)
265 iomMMIOStatLength(pVM, cb);
266 return rc;
267}
268
269
270/** Wrapper for reading virtual memory. */
271DECLINLINE(int) iomRamRead(PVM pVM, void *pDest, RTGCPTR GCSrc, uint32_t cb)
272{
273 /* Note: This will fail in R0 or RC if it hits an access handler. That
274 isn't a problem though since the operation can be restarted in REM. */
275#ifdef IN_RC
276 return MMGCRamReadNoTrapHandler(pDest, (void *)GCSrc, cb);
277#else
278 return PGMPhysReadGCPtr(pVM, pDest, GCSrc, cb);
279#endif
280}
281
282
283/** Wrapper for writing virtual memory. */
284DECLINLINE(int) iomRamWrite(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
285{
286 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
287 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
288 * as well since we're not behind the pgm lock and handler may change between calls.
289 * MMGCRamWriteNoTrapHandler may also trap if the page isn't shadowed, or was kicked
290 * out from both the shadow pt (SMP or our changes) and TLB.
291 *
292 * Currently MMGCRamWriteNoTrapHandler may also fail when it hits a write access handler.
293 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr OTOH may mess up the state
294 * of some shadowed structure in R0. */
295#ifdef IN_RC
296 NOREF(pCtxCore);
297 return MMGCRamWriteNoTrapHandler((void *)GCPtrDst, pvSrc, cb);
298#elif IN_RING0
299# ifdef VBOX_WITH_NEW_PHYS_CODE /* PGMPhysWriteGCPtr will fail, make sure we ignore handlers here. */
300 return PGMPhysInterpretedWriteNoHandlers(pVM, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
301# else
302 NOREF(pCtxCore);
303 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
304# endif
305#else
306 NOREF(pCtxCore);
307 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
308#endif
309}
310
311
312#ifdef IOM_WITH_MOVS_SUPPORT
313/**
314 * [REP] MOVSB
315 * [REP] MOVSW
316 * [REP] MOVSD
317 *
318 * Restricted implementation.
319 *
320 *
321 * @returns VBox status code.
322 *
323 * @param pVM The virtual machine.
324 * @param uErrorCode CPU Error code.
325 * @param pRegFrame Trap register frame.
326 * @param GCPhysFault The GC physical address corresponding to pvFault.
327 * @param pCpu Disassembler CPU state.
328 * @param pRange Pointer MMIO range.
329 * @param ppStat Which sub-sample to attribute this call to.
330 */
331static int iomInterpretMOVS(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
332{
333 /*
334 * We do not support segment prefixes or REPNE.
335 */
336 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
337 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
338
339
340 /*
341 * Get bytes/words/dwords/qword count to copy.
342 */
343 uint32_t cTransfers = 1;
344 if (pCpu->prefix & PREFIX_REP)
345 {
346#ifndef IN_RC
347 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
348 && pRegFrame->rcx >= _4G)
349 return VINF_EM_RAW_EMULATE_INSTR;
350#endif
351
352 cTransfers = pRegFrame->ecx;
353 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
354 cTransfers &= 0xffff;
355
356 if (!cTransfers)
357 return VINF_SUCCESS;
358 }
359
360 /* Get the current privilege level. */
361 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
362
363 /*
364 * Get data size.
365 */
366 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
367 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
368 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
369
370#ifdef VBOX_WITH_STATISTICS
371 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
372 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
373#endif
374
375/** @todo re-evaluate on page boundraries. */
376
377 RTGCPHYS Phys = GCPhysFault;
378 int rc;
379 if (uErrorCode & X86_TRAP_PF_RW)
380 {
381 /*
382 * Write operation: [Mem] -> [MMIO]
383 * ds:esi (Virt Src) -> es:edi (Phys Dst)
384 */
385 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
386
387 /* Check callback. */
388 if (!pRange->CTX_SUFF(pfnWriteCallback))
389 return VINF_IOM_HC_MMIO_WRITE;
390
391 /* Convert source address ds:esi. */
392 RTGCUINTPTR pu8Virt;
393 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
394 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
395 (PRTGCPTR)&pu8Virt);
396 if (RT_SUCCESS(rc))
397 {
398
399 /* Access verification first; we currently can't recover properly from traps inside this instruction */
400 rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
401 if (rc != VINF_SUCCESS)
402 {
403 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
404 return VINF_EM_RAW_EMULATE_INSTR;
405 }
406
407#ifdef IN_RC
408 MMGCRamRegisterTrapHandler(pVM);
409#endif
410
411 /* copy loop. */
412 while (cTransfers)
413 {
414 uint32_t u32Data = 0;
415 rc = iomRamRead(pVM, &u32Data, (RTGCPTR)pu8Virt, cb);
416 if (rc != VINF_SUCCESS)
417 break;
418 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
419 if (rc != VINF_SUCCESS)
420 break;
421
422 pu8Virt += offIncrement;
423 Phys += offIncrement;
424 pRegFrame->rsi += offIncrement;
425 pRegFrame->rdi += offIncrement;
426 cTransfers--;
427 }
428#ifdef IN_RC
429 MMGCRamDeregisterTrapHandler(pVM);
430#endif
431 /* Update ecx. */
432 if (pCpu->prefix & PREFIX_REP)
433 pRegFrame->ecx = cTransfers;
434 }
435 else
436 rc = VINF_IOM_HC_MMIO_READ_WRITE;
437 }
438 else
439 {
440 /*
441 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
442 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
443 */
444 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
445
446 /* Check callback. */
447 if (!pRange->CTX_SUFF(pfnReadCallback))
448 return VINF_IOM_HC_MMIO_READ;
449
450 /* Convert destination address. */
451 RTGCUINTPTR pu8Virt;
452 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
453 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
454 (RTGCPTR *)&pu8Virt);
455 if (RT_FAILURE(rc))
456 return VINF_IOM_HC_MMIO_READ;
457
458 /* Check if destination address is MMIO. */
459 PIOMMMIORANGE pMMIODst;
460 RTGCPHYS PhysDst;
461 rc = PGMGstGetPage(pVM, (RTGCPTR)pu8Virt, NULL, &PhysDst);
462 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
463 if ( RT_SUCCESS(rc)
464 && (pMMIODst = iomMMIOGetRange(&pVM->iom.s, PhysDst)))
465 {
466 /*
467 * Extra: [MMIO] -> [MMIO]
468 */
469 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
470 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
471 return VINF_IOM_HC_MMIO_READ_WRITE;
472
473 /* copy loop. */
474 while (cTransfers)
475 {
476 uint32_t u32Data;
477 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
478 if (rc != VINF_SUCCESS)
479 break;
480 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
481 if (rc != VINF_SUCCESS)
482 break;
483
484 Phys += offIncrement;
485 PhysDst += offIncrement;
486 pRegFrame->rsi += offIncrement;
487 pRegFrame->rdi += offIncrement;
488 cTransfers--;
489 }
490 }
491 else
492 {
493 /*
494 * Normal: [MMIO] -> [Mem]
495 */
496 /* Access verification first; we currently can't recover properly from traps inside this instruction */
497 rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
498 if (rc != VINF_SUCCESS)
499 {
500 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
501 return VINF_EM_RAW_EMULATE_INSTR;
502 }
503
504 /* copy loop. */
505#ifdef IN_RC
506 MMGCRamRegisterTrapHandler(pVM);
507#endif
508 while (cTransfers)
509 {
510 uint32_t u32Data;
511 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
512 if (rc != VINF_SUCCESS)
513 break;
514 rc = iomRamWrite(pVM, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
515 if (rc != VINF_SUCCESS)
516 {
517 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
518 break;
519 }
520
521 pu8Virt += offIncrement;
522 Phys += offIncrement;
523 pRegFrame->rsi += offIncrement;
524 pRegFrame->rdi += offIncrement;
525 cTransfers--;
526 }
527#ifdef IN_RC
528 MMGCRamDeregisterTrapHandler(pVM);
529#endif
530 }
531
532 /* Update ecx on exit. */
533 if (pCpu->prefix & PREFIX_REP)
534 pRegFrame->ecx = cTransfers;
535 }
536
537 /* work statistics. */
538 if (rc == VINF_SUCCESS)
539 iomMMIOStatLength(pVM, cb);
540 NOREF(ppStat);
541 return rc;
542}
543#endif /* IOM_WITH_MOVS_SUPPORT */
544
545
546/**
547 * [REP] STOSB
548 * [REP] STOSW
549 * [REP] STOSD
550 *
551 * Restricted implementation.
552 *
553 *
554 * @returns VBox status code.
555 *
556 * @param pVM The virtual machine.
557 * @param pRegFrame Trap register frame.
558 * @param GCPhysFault The GC physical address corresponding to pvFault.
559 * @param pCpu Disassembler CPU state.
560 * @param pRange Pointer MMIO range.
561 */
562static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
563{
564 /*
565 * We do not support segment prefixes or REPNE..
566 */
567 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
568 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
569
570 /*
571 * Get bytes/words/dwords count to copy.
572 */
573 uint32_t cTransfers = 1;
574 if (pCpu->prefix & PREFIX_REP)
575 {
576#ifndef IN_RC
577 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
578 && pRegFrame->rcx >= _4G)
579 return VINF_EM_RAW_EMULATE_INSTR;
580#endif
581
582 cTransfers = pRegFrame->ecx;
583 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
584 cTransfers &= 0xffff;
585
586 if (!cTransfers)
587 return VINF_SUCCESS;
588 }
589
590/** @todo r=bird: bounds checks! */
591
592 /*
593 * Get data size.
594 */
595 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
596 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
597 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
598
599#ifdef VBOX_WITH_STATISTICS
600 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
601 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
602#endif
603
604
605 RTGCPHYS Phys = GCPhysFault;
606 uint32_t u32Data = pRegFrame->eax;
607 int rc;
608 if (pRange->CTX_SUFF(pfnFillCallback))
609 {
610 /*
611 * Use the fill callback.
612 */
613 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
614 if (offIncrement > 0)
615 {
616 /* addr++ variant. */
617 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
618 if (rc == VINF_SUCCESS)
619 {
620 /* Update registers. */
621 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
622 if (pCpu->prefix & PREFIX_REP)
623 pRegFrame->ecx = 0;
624 }
625 }
626 else
627 {
628 /* addr-- variant. */
629 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), (Phys - (cTransfers - 1)) << SIZE_2_SHIFT(cb), u32Data, cb, cTransfers);
630 if (rc == VINF_SUCCESS)
631 {
632 /* Update registers. */
633 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
634 if (pCpu->prefix & PREFIX_REP)
635 pRegFrame->ecx = 0;
636 }
637 }
638 }
639 else
640 {
641 /*
642 * Use the write callback.
643 */
644 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
645
646 /* fill loop. */
647 do
648 {
649 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
650 if (rc != VINF_SUCCESS)
651 break;
652
653 Phys += offIncrement;
654 pRegFrame->rdi += offIncrement;
655 cTransfers--;
656 } while (cTransfers);
657
658 /* Update ecx on exit. */
659 if (pCpu->prefix & PREFIX_REP)
660 pRegFrame->ecx = cTransfers;
661 }
662
663 /*
664 * Work statistics and return.
665 */
666 if (rc == VINF_SUCCESS)
667 iomMMIOStatLength(pVM, cb);
668 return rc;
669}
670
671
672/**
673 * [REP] LODSB
674 * [REP] LODSW
675 * [REP] LODSD
676 *
677 * Restricted implementation.
678 *
679 *
680 * @returns VBox status code.
681 *
682 * @param pVM The virtual machine.
683 * @param pRegFrame Trap register frame.
684 * @param GCPhysFault The GC physical address corresponding to pvFault.
685 * @param pCpu Disassembler CPU state.
686 * @param pRange Pointer MMIO range.
687 */
688static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
689{
690 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
691
692 /*
693 * We do not support segment prefixes or REP*.
694 */
695 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
696 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
697
698 /*
699 * Get data size.
700 */
701 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
702 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
703 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
704
705 /*
706 * Perform read.
707 */
708 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
709 if (rc == VINF_SUCCESS)
710 pRegFrame->rsi += offIncrement;
711
712 /*
713 * Work statistics and return.
714 */
715 if (rc == VINF_SUCCESS)
716 iomMMIOStatLength(pVM, cb);
717 return rc;
718}
719
720
721/**
722 * CMP [MMIO], reg|imm
723 * CMP reg|imm, [MMIO]
724 *
725 * Restricted implementation.
726 *
727 *
728 * @returns VBox status code.
729 *
730 * @param pVM The virtual machine.
731 * @param pRegFrame Trap register frame.
732 * @param GCPhysFault The GC physical address corresponding to pvFault.
733 * @param pCpu Disassembler CPU state.
734 * @param pRange Pointer MMIO range.
735 */
736static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
737{
738 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
739
740 /*
741 * Get the operands.
742 */
743 unsigned cb = 0;
744 uint64_t uData1 = 0;
745 uint64_t uData2 = 0;
746 int rc;
747 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
748 /* cmp reg, [MMIO]. */
749 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
750 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
751 /* cmp [MMIO], reg|imm. */
752 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
753 else
754 {
755 AssertMsgFailed(("Disassember CMP problem..\n"));
756 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
757 }
758
759 if (rc == VINF_SUCCESS)
760 {
761 /* Emulate CMP and update guest flags. */
762 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
763 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
764 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
765 iomMMIOStatLength(pVM, cb);
766 }
767
768 return rc;
769}
770
771
772/**
773 * AND [MMIO], reg|imm
774 * AND reg, [MMIO]
775 * OR [MMIO], reg|imm
776 * OR reg, [MMIO]
777 *
778 * Restricted implementation.
779 *
780 *
781 * @returns VBox status code.
782 *
783 * @param pVM The virtual machine.
784 * @param pRegFrame Trap register frame.
785 * @param GCPhysFault The GC physical address corresponding to pvFault.
786 * @param pCpu Disassembler CPU state.
787 * @param pRange Pointer MMIO range.
788 * @param pfnEmulate Instruction emulation function.
789 */
790static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
791{
792 unsigned cb = 0;
793 uint64_t uData1 = 0;
794 uint64_t uData2 = 0;
795 bool fAndWrite;
796 int rc;
797
798#ifdef LOG_ENABLED
799 const char *pszInstr;
800
801 if (pCpu->pCurInstr->opcode == OP_XOR)
802 pszInstr = "Xor";
803 else if (pCpu->pCurInstr->opcode == OP_OR)
804 pszInstr = "Or";
805 else if (pCpu->pCurInstr->opcode == OP_AND)
806 pszInstr = "And";
807 else
808 pszInstr = "OrXorAnd??";
809#endif
810
811 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
812 {
813 /* and reg, [MMIO]. */
814 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
815 fAndWrite = false;
816 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
817 }
818 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
819 {
820 /* and [MMIO], reg|imm. */
821 fAndWrite = true;
822 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
823 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
824 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
825 else
826 rc = VINF_IOM_HC_MMIO_READ_WRITE;
827 }
828 else
829 {
830 AssertMsgFailed(("Disassember AND problem..\n"));
831 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
832 }
833
834 if (rc == VINF_SUCCESS)
835 {
836 /* Emulate AND and update guest flags. */
837 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
838
839 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
840
841 if (fAndWrite)
842 /* Store result to MMIO. */
843 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
844 else
845 {
846 /* Store result to register. */
847 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
848 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
849 }
850 if (rc == VINF_SUCCESS)
851 {
852 /* Update guest's eflags and finish. */
853 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
854 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
855 iomMMIOStatLength(pVM, cb);
856 }
857 }
858
859 return rc;
860}
861
862
863/**
864 * TEST [MMIO], reg|imm
865 * TEST reg, [MMIO]
866 *
867 * Restricted implementation.
868 *
869 *
870 * @returns VBox status code.
871 *
872 * @param pVM The virtual machine.
873 * @param pRegFrame Trap register frame.
874 * @param GCPhysFault The GC physical address corresponding to pvFault.
875 * @param pCpu Disassembler CPU state.
876 * @param pRange Pointer MMIO range.
877 */
878static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
879{
880 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
881
882 unsigned cb = 0;
883 uint64_t uData1 = 0;
884 uint64_t uData2 = 0;
885 int rc;
886
887 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
888 {
889 /* and test, [MMIO]. */
890 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
891 }
892 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
893 {
894 /* test [MMIO], reg|imm. */
895 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
896 }
897 else
898 {
899 AssertMsgFailed(("Disassember TEST problem..\n"));
900 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
901 }
902
903 if (rc == VINF_SUCCESS)
904 {
905 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
906 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
907 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
908 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
909 iomMMIOStatLength(pVM, cb);
910 }
911
912 return rc;
913}
914
915
916/**
917 * BT [MMIO], reg|imm
918 *
919 * Restricted implementation.
920 *
921 *
922 * @returns VBox status code.
923 *
924 * @param pVM The virtual machine.
925 * @param pRegFrame Trap register frame.
926 * @param GCPhysFault The GC physical address corresponding to pvFault.
927 * @param pCpu Disassembler CPU state.
928 * @param pRange Pointer MMIO range.
929 */
930static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
931{
932 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
933
934 uint64_t uBit = 0;
935 uint64_t uData1 = 0;
936 unsigned cb = 0;
937 int rc;
938
939 if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cb))
940 {
941 /* bt [MMIO], reg|imm. */
942 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
943 }
944 else
945 {
946 AssertMsgFailed(("Disassember BT problem..\n"));
947 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
948 }
949
950 if (rc == VINF_SUCCESS)
951 {
952 /* The size of the memory operand only matters here. */
953 cb = DISGetParamSize(pCpu, &pCpu->param1);
954
955 /* Find the bit inside the faulting address */
956 uBit &= (cb*8 - 1);
957
958 pRegFrame->eflags.Bits.u1CF = (uData1 >> uBit);
959 iomMMIOStatLength(pVM, cb);
960 }
961
962 return rc;
963}
964
965/**
966 * XCHG [MMIO], reg
967 * XCHG reg, [MMIO]
968 *
969 * Restricted implementation.
970 *
971 *
972 * @returns VBox status code.
973 *
974 * @param pVM The virtual machine.
975 * @param pRegFrame Trap register frame.
976 * @param GCPhysFault The GC physical address corresponding to pvFault.
977 * @param pCpu Disassembler CPU state.
978 * @param pRange Pointer MMIO range.
979 */
980static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
981{
982 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
983 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
984 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
985 return VINF_IOM_HC_MMIO_READ_WRITE;
986
987 int rc;
988 unsigned cb = 0;
989 uint64_t uData1 = 0;
990 uint64_t uData2 = 0;
991 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
992 {
993 /* xchg reg, [MMIO]. */
994 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
995 if (rc == VINF_SUCCESS)
996 {
997 /* Store result to MMIO. */
998 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
999
1000 if (rc == VINF_SUCCESS)
1001 {
1002 /* Store result to register. */
1003 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
1004 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1005 }
1006 else
1007 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1008 }
1009 else
1010 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1011 }
1012 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1013 {
1014 /* xchg [MMIO], reg. */
1015 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1016 if (rc == VINF_SUCCESS)
1017 {
1018 /* Store result to MMIO. */
1019 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1020 if (rc == VINF_SUCCESS)
1021 {
1022 /* Store result to register. */
1023 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1024 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1025 }
1026 else
1027 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1028 }
1029 else
1030 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1031 }
1032 else
1033 {
1034 AssertMsgFailed(("Disassember XCHG problem..\n"));
1035 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1036 }
1037 return rc;
1038}
1039
1040
1041/**
1042 * \#PF Handler callback for MMIO ranges.
1043 *
1044 * @returns VBox status code (appropriate for GC return).
1045 * @param pVM VM Handle.
1046 * @param uErrorCode CPU Error code.
1047 * @param pCtxCore Trap register frame.
1048 * @param pvFault The fault address (cr2).
1049 * @param GCPhysFault The GC physical address corresponding to pvFault.
1050 * @param pvUser Pointer to the MMIO ring-3 range entry.
1051 */
1052VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1053{
1054 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1055 Log(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1056 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1057
1058 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1059 Assert(pRange);
1060 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1061
1062#ifdef VBOX_WITH_STATISTICS
1063 /*
1064 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1065 */
1066 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
1067 if (!pStats)
1068 {
1069# ifdef IN_RING3
1070 return VERR_NO_MEMORY;
1071# else
1072 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1073 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1074 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1075# endif
1076 }
1077#endif
1078
1079#ifndef IN_RING3
1080 /*
1081 * Should we defer the request right away?
1082 */
1083 if (uErrorCode & X86_TRAP_PF_RW
1084 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1085 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1086 {
1087# ifdef VBOX_WITH_STATISTICS
1088 if (uErrorCode & X86_TRAP_PF_RW)
1089 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1090 else
1091 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1092# endif
1093
1094 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1095 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1096 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1097 }
1098#endif /* !IN_RING3 */
1099
1100 /*
1101 * Disassemble the instruction and interpret it.
1102 */
1103 DISCPUSTATE Cpu;
1104 unsigned cbOp;
1105 int rc = EMInterpretDisasOne(pVM, pCtxCore, &Cpu, &cbOp);
1106 AssertRCReturn(rc, rc);
1107 switch (Cpu.pCurInstr->opcode)
1108 {
1109 case OP_MOV:
1110 case OP_MOVZX:
1111 case OP_MOVSX:
1112 {
1113 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1114 if (uErrorCode & X86_TRAP_PF_RW)
1115 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1116 else
1117 rc = iomInterpretMOVxXRead(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1118 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1119 break;
1120 }
1121
1122
1123#ifdef IOM_WITH_MOVS_SUPPORT
1124 case OP_MOVSB:
1125 case OP_MOVSWD:
1126 {
1127 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1128 PSTAMPROFILE pStat = NULL;
1129 rc = iomInterpretMOVS(pVM, uErrorCode, pCtxCore, GCPhysFault, &Cpu, pRange, &pStat);
1130 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1131 break;
1132 }
1133#endif
1134
1135 case OP_STOSB:
1136 case OP_STOSWD:
1137 Assert(uErrorCode & X86_TRAP_PF_RW);
1138 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1139 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1140 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1141 break;
1142
1143 case OP_LODSB:
1144 case OP_LODSWD:
1145 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1146 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1147 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1148 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1149 break;
1150
1151 case OP_CMP:
1152 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1153 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1154 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1155 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1156 break;
1157
1158 case OP_AND:
1159 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1160 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateAnd);
1161 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1162 break;
1163
1164 case OP_OR:
1165 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1166 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateOr);
1167 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1168 break;
1169
1170 case OP_XOR:
1171 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1172 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateXor);
1173 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1174 break;
1175
1176 case OP_TEST:
1177 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1178 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1179 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1180 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1181 break;
1182
1183 case OP_BT:
1184 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1185 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1186 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1187 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1188 break;
1189
1190 case OP_XCHG:
1191 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1192 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1193 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1194 break;
1195
1196
1197 /*
1198 * The instruction isn't supported. Hand it on to ring-3.
1199 */
1200 default:
1201 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1202 rc = (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1203 break;
1204 }
1205
1206 /*
1207 * On success advance EIP.
1208 */
1209 if (rc == VINF_SUCCESS)
1210 pCtxCore->rip += cbOp;
1211 else
1212 {
1213 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1214#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1215 switch (rc)
1216 {
1217 case VINF_IOM_HC_MMIO_READ:
1218 case VINF_IOM_HC_MMIO_READ_WRITE:
1219 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1220 break;
1221 case VINF_IOM_HC_MMIO_WRITE:
1222 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1223 break;
1224 }
1225#endif
1226 }
1227
1228 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1229 return rc;
1230}
1231
1232
1233#ifdef IN_RING3
1234/**
1235 * \#PF Handler callback for MMIO ranges.
1236 *
1237 * @returns VINF_SUCCESS if the handler have carried out the operation.
1238 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1239 * @param pVM VM Handle.
1240 * @param GCPhys The physical address the guest is writing to.
1241 * @param pvPhys The HC mapping of that address.
1242 * @param pvBuf What the guest is reading/writing.
1243 * @param cbBuf How much it's reading/writing.
1244 * @param enmAccessType The access type.
1245 * @param pvUser Pointer to the MMIO range entry.
1246 */
1247DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1248{
1249 int rc;
1250 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1251 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1252
1253 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1254
1255 Assert(pRange);
1256 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1257
1258 if (enmAccessType == PGMACCESSTYPE_READ)
1259 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1260 else
1261 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1262
1263 AssertRC(rc);
1264 return rc;
1265}
1266#endif /* IN_RING3 */
1267
1268
1269/**
1270 * Reads a MMIO register.
1271 *
1272 * @returns VBox status code.
1273 *
1274 * @param pVM VM handle.
1275 * @param GCPhys The physical address to read.
1276 * @param pu32Value Where to store the value read.
1277 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1278 */
1279VMMDECL(int) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1280{
1281 /*
1282 * Lookup the current context range node and statistics.
1283 */
1284 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1285 AssertMsgReturn(pRange,
1286 ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue),
1287 VERR_INTERNAL_ERROR);
1288#ifdef VBOX_WITH_STATISTICS
1289 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1290 if (!pStats)
1291# ifdef IN_RING3
1292 return VERR_NO_MEMORY;
1293# else
1294 return VINF_IOM_HC_MMIO_READ;
1295# endif
1296#endif /* VBOX_WITH_STATISTICS */
1297 if (pRange->CTX_SUFF(pfnReadCallback))
1298 {
1299 /*
1300 * Perform the read and deal with the result.
1301 */
1302#ifdef VBOX_WITH_STATISTICS
1303 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1304#endif
1305 int rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1306#ifdef VBOX_WITH_STATISTICS
1307 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1308 if (rc != VINF_IOM_HC_MMIO_READ)
1309 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1310#endif
1311 switch (rc)
1312 {
1313 case VINF_SUCCESS:
1314 default:
1315 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1316 return rc;
1317
1318 case VINF_IOM_MMIO_UNUSED_00:
1319 switch (cbValue)
1320 {
1321 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1322 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1323 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1324 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1325 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1326 }
1327 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1328 return VINF_SUCCESS;
1329
1330 case VINF_IOM_MMIO_UNUSED_FF:
1331 switch (cbValue)
1332 {
1333 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1334 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1335 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1336 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1337 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1338 }
1339 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1340 return VINF_SUCCESS;
1341 }
1342 }
1343#ifndef IN_RING3
1344 if (pRange->pfnReadCallbackR3)
1345 {
1346 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1347 return VINF_IOM_HC_MMIO_READ;
1348 }
1349#endif
1350
1351 /*
1352 * Lookup the ring-3 range.
1353 */
1354#ifdef VBOX_WITH_STATISTICS
1355 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1356#endif
1357 /* Unassigned memory; this is actually not supposed to happen. */
1358 switch (cbValue)
1359 {
1360 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1361 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1362 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1363 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1364 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1365 }
1366 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1367 return VINF_SUCCESS;
1368}
1369
1370
1371/**
1372 * Writes to a MMIO register.
1373 *
1374 * @returns VBox status code.
1375 *
1376 * @param pVM VM handle.
1377 * @param GCPhys The physical address to write to.
1378 * @param u32Value The value to write.
1379 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1380 */
1381VMMDECL(int) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1382{
1383 /*
1384 * Lookup the current context range node.
1385 */
1386 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1387 AssertMsgReturn(pRange,
1388 ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue),
1389 VERR_INTERNAL_ERROR);
1390#ifdef VBOX_WITH_STATISTICS
1391 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1392 if (!pStats)
1393# ifdef IN_RING3
1394 return VERR_NO_MEMORY;
1395# else
1396 return VINF_IOM_HC_MMIO_WRITE;
1397# endif
1398#endif /* VBOX_WITH_STATISTICS */
1399
1400 /*
1401 * Perform the write if there's a write handler. R0/GC may have
1402 * to defer it to ring-3.
1403 */
1404 if (pRange->CTX_SUFF(pfnWriteCallback))
1405 {
1406#ifdef VBOX_WITH_STATISTICS
1407 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1408#endif
1409 int rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue);
1410#ifdef VBOX_WITH_STATISTICS
1411 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1412 if (rc != VINF_IOM_HC_MMIO_WRITE)
1413 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1414#endif
1415 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1416 return rc;
1417 }
1418#ifndef IN_RING3
1419 if (pRange->pfnWriteCallbackR3)
1420 {
1421 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1422 return VINF_IOM_HC_MMIO_WRITE;
1423 }
1424#endif
1425
1426 /*
1427 * No write handler, nothing to do.
1428 */
1429#ifdef VBOX_WITH_STATISTICS
1430 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1431#endif
1432 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1433 return VINF_SUCCESS;
1434}
1435
1436
1437/**
1438 * [REP*] INSB/INSW/INSD
1439 * ES:EDI,DX[,ECX]
1440 *
1441 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1442 *
1443 * @returns Strict VBox status code. Informational status codes other than the one documented
1444 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1445 * @retval VINF_SUCCESS Success.
1446 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1447 * status code must be passed on to EM.
1448 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1449 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1450 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1451 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1452 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1453 *
1454 * @param pVM The virtual machine.
1455 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1456 * @param uPort IO Port
1457 * @param uPrefix IO instruction prefix
1458 * @param cbTransfer Size of transfer unit
1459 */
1460VMMDECL(int) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1461{
1462#ifdef VBOX_WITH_STATISTICS
1463 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1464#endif
1465
1466 /*
1467 * We do not support REPNE or decrementing destination
1468 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1469 */
1470 if ( (uPrefix & PREFIX_REPNE)
1471 || pRegFrame->eflags.Bits.u1DF)
1472 return VINF_EM_RAW_EMULATE_INSTR;
1473
1474 /*
1475 * Get bytes/words/dwords count to transfer.
1476 */
1477 RTGCUINTREG cTransfers = 1;
1478 if (uPrefix & PREFIX_REP)
1479 {
1480#ifndef IN_RC
1481 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
1482 && pRegFrame->rcx >= _4G)
1483 return VINF_EM_RAW_EMULATE_INSTR;
1484#endif
1485 cTransfers = pRegFrame->ecx;
1486
1487 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1488 cTransfers &= 0xffff;
1489
1490 if (!cTransfers)
1491 return VINF_SUCCESS;
1492 }
1493
1494 /* Convert destination address es:edi. */
1495 RTGCPTR GCPtrDst;
1496 int rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1497 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1498 &GCPtrDst);
1499 if (RT_FAILURE(rc))
1500 {
1501 Log(("INS destination address conversion failed -> fallback, rc=%d\n", rc));
1502 return VINF_EM_RAW_EMULATE_INSTR;
1503 }
1504
1505 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1506 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
1507
1508 rc = PGMVerifyAccess(pVM, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1509 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1510 if (rc != VINF_SUCCESS)
1511 {
1512 Log(("INS will generate a trap -> fallback, rc=%d\n", rc));
1513 return VINF_EM_RAW_EMULATE_INSTR;
1514 }
1515
1516 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1517 if (cTransfers > 1)
1518 {
1519 /* If the device supports string transfers, ask it to do as
1520 * much as it wants. The rest is done with single-word transfers. */
1521 const RTGCUINTREG cTransfersOrg = cTransfers;
1522 rc = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1523 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1524 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1525 }
1526
1527#ifdef IN_RC
1528 MMGCRamRegisterTrapHandler(pVM);
1529#endif
1530
1531 while (cTransfers && rc == VINF_SUCCESS)
1532 {
1533 uint32_t u32Value;
1534 rc = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1535 if (!IOM_SUCCESS(rc))
1536 break;
1537 int rc2 = iomRamWrite(pVM, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1538 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1539 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1540 pRegFrame->rdi += cbTransfer;
1541 cTransfers--;
1542 }
1543#ifdef IN_RC
1544 MMGCRamDeregisterTrapHandler(pVM);
1545#endif
1546
1547 /* Update ecx on exit. */
1548 if (uPrefix & PREFIX_REP)
1549 pRegFrame->ecx = cTransfers;
1550
1551 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_READ || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Rrc\n", rc));
1552 return rc;
1553}
1554
1555
1556/**
1557 * [REP*] INSB/INSW/INSD
1558 * ES:EDI,DX[,ECX]
1559 *
1560 * @returns Strict VBox status code. Informational status codes other than the one documented
1561 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1562 * @retval VINF_SUCCESS Success.
1563 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1564 * status code must be passed on to EM.
1565 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1566 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1567 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1568 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1569 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1570 *
1571 * @param pVM The virtual machine.
1572 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1573 * @param pCpu Disassembler CPU state.
1574 */
1575VMMDECL(int) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1576{
1577 /*
1578 * Get port number directly from the register (no need to bother the
1579 * disassembler). And get the I/O register size from the opcode / prefix.
1580 */
1581 RTIOPORT Port = pRegFrame->edx & 0xffff;
1582 unsigned cb = 0;
1583 if (pCpu->pCurInstr->opcode == OP_INSB)
1584 cb = 1;
1585 else
1586 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1587
1588 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1589 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1590 {
1591 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Rrc\n", rc));
1592 return rc;
1593 }
1594
1595 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1596}
1597
1598
1599/**
1600 * [REP*] OUTSB/OUTSW/OUTSD
1601 * DS:ESI,DX[,ECX]
1602 *
1603 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1604 *
1605 * @returns Strict VBox status code. Informational status codes other than the one documented
1606 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1607 * @retval VINF_SUCCESS Success.
1608 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1609 * status code must be passed on to EM.
1610 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1611 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1612 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1613 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1614 *
1615 * @param pVM The virtual machine.
1616 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1617 * @param uPort IO Port
1618 * @param uPrefix IO instruction prefix
1619 * @param cbTransfer Size of transfer unit
1620 */
1621VMMDECL(int) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1622{
1623#ifdef VBOX_WITH_STATISTICS
1624 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1625#endif
1626
1627 /*
1628 * We do not support segment prefixes, REPNE or
1629 * decrementing source pointer.
1630 */
1631 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1632 || pRegFrame->eflags.Bits.u1DF)
1633 return VINF_EM_RAW_EMULATE_INSTR;
1634
1635 /*
1636 * Get bytes/words/dwords count to transfer.
1637 */
1638 RTGCUINTREG cTransfers = 1;
1639 if (uPrefix & PREFIX_REP)
1640 {
1641#ifndef IN_RC
1642 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
1643 && pRegFrame->rcx >= _4G)
1644 return VINF_EM_RAW_EMULATE_INSTR;
1645#endif
1646 cTransfers = pRegFrame->ecx;
1647 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1648 cTransfers &= 0xffff;
1649
1650 if (!cTransfers)
1651 return VINF_SUCCESS;
1652 }
1653
1654 /* Convert source address ds:esi. */
1655 RTGCPTR GCPtrSrc;
1656 int rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1657 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1658 &GCPtrSrc);
1659 if (RT_FAILURE(rc))
1660 {
1661 Log(("OUTS source address conversion failed -> fallback, rc=%Rrc\n", rc));
1662 return VINF_EM_RAW_EMULATE_INSTR;
1663 }
1664
1665 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1666 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
1667 rc = PGMVerifyAccess(pVM, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1668 (cpl == 3) ? X86_PTE_US : 0);
1669 if (rc != VINF_SUCCESS)
1670 {
1671 Log(("OUTS will generate a trap -> fallback, rc=%Rrc\n", rc));
1672 return VINF_EM_RAW_EMULATE_INSTR;
1673 }
1674
1675 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1676 if (cTransfers > 1)
1677 {
1678 /*
1679 * If the device supports string transfers, ask it to do as
1680 * much as it wants. The rest is done with single-word transfers.
1681 */
1682 const RTGCUINTREG cTransfersOrg = cTransfers;
1683 rc = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1684 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1685 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1686 }
1687
1688#ifdef IN_RC
1689 MMGCRamRegisterTrapHandler(pVM);
1690#endif
1691
1692 while (cTransfers && rc == VINF_SUCCESS)
1693 {
1694 uint32_t u32Value;
1695 rc = iomRamRead(pVM, &u32Value, GCPtrSrc, cbTransfer);
1696 if (rc != VINF_SUCCESS)
1697 break;
1698 rc = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1699 if (!IOM_SUCCESS(rc))
1700 break;
1701 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1702 pRegFrame->rsi += cbTransfer;
1703 cTransfers--;
1704 }
1705
1706#ifdef IN_RC
1707 MMGCRamDeregisterTrapHandler(pVM);
1708#endif
1709
1710 /* Update ecx on exit. */
1711 if (uPrefix & PREFIX_REP)
1712 pRegFrame->ecx = cTransfers;
1713
1714 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_WRITE || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Rrc\n", rc));
1715 return rc;
1716}
1717
1718
1719/**
1720 * [REP*] OUTSB/OUTSW/OUTSD
1721 * DS:ESI,DX[,ECX]
1722 *
1723 * @returns Strict VBox status code. Informational status codes other than the one documented
1724 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1725 * @retval VINF_SUCCESS Success.
1726 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1727 * status code must be passed on to EM.
1728 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1729 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1730 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1731 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1732 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1733 *
1734 * @param pVM The virtual machine.
1735 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1736 * @param pCpu Disassembler CPU state.
1737 */
1738VMMDECL(int) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1739{
1740 /*
1741 * Get port number from the first parameter.
1742 * And get the I/O register size from the opcode / prefix.
1743 */
1744 uint64_t Port = 0;
1745 unsigned cb = 0;
1746 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1747 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1748 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1749 cb = 1;
1750 else
1751 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1752
1753 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1754 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1755 {
1756 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Rrc\n", rc));
1757 return rc;
1758 }
1759
1760 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1761}
1762
1763
1764#ifndef IN_RC
1765/**
1766 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1767 *
1768 * (This is a special optimization used by the VGA device.)
1769 *
1770 * @returns VBox status code.
1771 *
1772 * @param pVM The virtual machine.
1773 * @param GCPhys The address of the MMIO page to be changed.
1774 * @param GCPhysRemapped The address of the MMIO2 page.
1775 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1776 * for the time being.
1777 */
1778VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1779{
1780 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1781
1782 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1783
1784 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1785 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1786 || ( CPUMIsGuestInPagedProtectedMode(pVM)
1787 && !HWACCMIsNestedPagingActive(pVM)))
1788 return VINF_SUCCESS; /* ignore */
1789
1790 /*
1791 * Lookup the context range node the page belongs to.
1792 */
1793 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1794 AssertMsgReturn(pRange,
1795 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
1796 VERR_IOM_MMIO_RANGE_NOT_FOUND);
1797 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1798 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1799
1800 /*
1801 * Do the aliasing; page align the addresses since PGM is picky.
1802 */
1803 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1804 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1805
1806 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1807 AssertRCReturn(rc, rc);
1808
1809 /*
1810 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1811 * can simply prefetch it.
1812 *
1813 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1814 */
1815#ifndef VBOX_WITH_NEW_PHYS_CODE /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1816# ifdef VBOX_STRICT
1817 uint64_t fFlags;
1818 RTHCPHYS HCPhys;
1819 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1820 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1821# endif
1822#endif
1823 rc = PGMPrefetchPage(pVM, (RTGCPTR)GCPhys);
1824 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1825 return VINF_SUCCESS;
1826}
1827
1828
1829/**
1830 * Reset a previously modified MMIO region; restore the access flags.
1831 *
1832 * @returns VBox status code.
1833 *
1834 * @param pVM The virtual machine.
1835 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1836 */
1837VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1838{
1839 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1840
1841 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1842 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1843 || ( CPUMIsGuestInPagedProtectedMode(pVM)
1844 && !HWACCMIsNestedPagingActive(pVM)))
1845 return VINF_SUCCESS; /* ignore */
1846
1847 /*
1848 * Lookup the context range node the page belongs to.
1849 */
1850 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1851 AssertMsgReturn(pRange,
1852 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
1853 VERR_IOM_MMIO_RANGE_NOT_FOUND);
1854
1855 /*
1856 * Call PGM to do the job work.
1857 *
1858 * After the call, all the pages should be non-present... unless there is
1859 * a page pool flush pending (unlikely).
1860 */
1861 int rc = PGMHandlerPhysicalReset(pVM, pRange->GCPhys);
1862 AssertRC(rc);
1863
1864#ifdef VBOX_STRICT
1865 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
1866 {
1867 uint32_t cb = pRange->cb;
1868 GCPhys = pRange->GCPhys;
1869 while (cb)
1870 {
1871 uint64_t fFlags;
1872 RTHCPHYS HCPhys;
1873 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1874 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1875 cb -= PAGE_SIZE;
1876 GCPhys += PAGE_SIZE;
1877 }
1878 }
1879#endif
1880 return rc;
1881}
1882#endif /* !IN_RC */
1883
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette