VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 13400

Last change on this file since 13400 was 13400, checked in by vboxsync, 16 years ago

Extra checks

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.7 KB
Line 
1/* $Id: IOMAllMMIO.cpp 13400 2008-10-20 15:37:53Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_IOM
27#include <VBox/iom.h>
28#include <VBox/cpum.h>
29#include <VBox/pgm.h>
30#include <VBox/selm.h>
31#include <VBox/mm.h>
32#include <VBox/em.h>
33#include <VBox/pgm.h>
34#include <VBox/trpm.h>
35#include "IOMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/hwaccm.h>
38
39#include <VBox/dis.h>
40#include <VBox/disopcode.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*******************************************************************************
50* Global Variables *
51*******************************************************************************/
52
53/**
54 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
55 */
56static const unsigned g_aSize2Shift[] =
57{
58 ~0, /* 0 - invalid */
59 0, /* *1 == 2^0 */
60 1, /* *2 == 2^1 */
61 ~0, /* 3 - invalid */
62 2, /* *4 == 2^2 */
63 ~0, /* 5 - invalid */
64 ~0, /* 6 - invalid */
65 ~0, /* 7 - invalid */
66 3 /* *8 == 2^3 */
67};
68
69/**
70 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
71 */
72#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
73
74
75/**
76 * Wrapper which does the write and updates range statistics when such are enabled.
77 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
78 */
79DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
80{
81#ifdef VBOX_WITH_STATISTICS
82 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
83 Assert(pStats);
84#endif
85
86 int rc;
87 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
88 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /* @todo fix const!! */
89 else
90 rc = VINF_SUCCESS;
91 if (rc != VINF_IOM_HC_MMIO_WRITE)
92 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
93 return rc;
94}
95
96
97/**
98 * Wrapper which does the read and updates range statistics when such are enabled.
99 */
100DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, void *pvData, unsigned cb)
101{
102#ifdef VBOX_WITH_STATISTICS
103 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
104 Assert(pStats);
105#endif
106
107 int rc;
108 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
109 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, pvData, cb);
110 else
111 {
112/** @todo r=bird: this is (probably) wrong, all bits should be set here I
113 * think. */
114 switch (cb)
115 {
116 case 1: *(uint8_t *)pvData = 0; break;
117 case 2: *(uint16_t *)pvData = 0; break;
118 case 4: *(uint32_t *)pvData = 0; break;
119 case 8: *(uint64_t *)pvData = 0; break;
120 default:
121 memset(pvData, 0, cb);
122 break;
123 }
124 rc = VINF_SUCCESS;
125 }
126 if (rc != VINF_IOM_HC_MMIO_READ)
127 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
128 return rc;
129}
130
131
132/**
133 * Internal - statistics only.
134 */
135DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
136{
137#ifdef VBOX_WITH_STATISTICS
138 switch (cb)
139 {
140 case 1:
141 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
142 break;
143 case 2:
144 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
145 break;
146 case 4:
147 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
148 break;
149 case 8:
150 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
151 break;
152 default:
153 /* No way. */
154 AssertMsgFailed(("Invalid data length %d\n", cb));
155 break;
156 }
157#else
158 NOREF(pVM); NOREF(cb);
159#endif
160}
161
162
163/**
164 * MOV reg, mem (read)
165 * MOVZX reg, mem (read)
166 * MOVSX reg, mem (read)
167 *
168 * @returns VBox status code.
169 *
170 * @param pVM The virtual machine.
171 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
172 * @param pCpu Disassembler CPU state.
173 * @param pRange Pointer MMIO range.
174 * @param GCPhysFault The GC physical address corresponding to pvFault.
175 */
176static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
177{
178 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
179
180 /*
181 * Get the data size from parameter 2,
182 * and call the handler function to get the data.
183 */
184 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
185 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
186
187 uint64_t u64Data = 0;
188 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
189 if (rc == VINF_SUCCESS)
190 {
191 /*
192 * Do sign extension for MOVSX.
193 */
194 /** @todo checkup MOVSX implementation! */
195 if (pCpu->pCurInstr->opcode == OP_MOVSX)
196 {
197 if (cb == 1)
198 {
199 /* DWORD <- BYTE */
200 int64_t iData = (int8_t)u64Data;
201 u64Data = (uint64_t)iData;
202 }
203 else
204 {
205 /* DWORD <- WORD */
206 int64_t iData = (int16_t)u64Data;
207 u64Data = (uint64_t)iData;
208 }
209 }
210
211 /*
212 * Store the result to register (parameter 1).
213 */
214 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
215 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
216 }
217
218 if (rc == VINF_SUCCESS)
219 iomMMIOStatLength(pVM, cb);
220 return rc;
221}
222
223
224/**
225 * MOV mem, reg|imm (write)
226 *
227 * @returns VBox status code.
228 *
229 * @param pVM The virtual machine.
230 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
231 * @param pCpu Disassembler CPU state.
232 * @param pRange Pointer MMIO range.
233 * @param GCPhysFault The GC physical address corresponding to pvFault.
234 */
235static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
236{
237 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
238
239 /*
240 * Get data to write from second parameter,
241 * and call the callback to write it.
242 */
243 unsigned cb = 0;
244 uint64_t u64Data = 0;
245 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
246 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
247
248 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
249 if (rc == VINF_SUCCESS)
250 iomMMIOStatLength(pVM, cb);
251 return rc;
252}
253
254
255/** Wrapper for reading virtual memory. */
256DECLINLINE(int) iomRamRead(PVM pVM, void *pDest, RTGCPTR GCSrc, uint32_t cb)
257{
258#ifdef IN_GC
259 return MMGCRamReadNoTrapHandler(pDest, (void *)GCSrc, cb);
260#else
261 return PGMPhysReadGCPtr(pVM, pDest, GCSrc, cb);
262#endif
263}
264
265
266/** Wrapper for writing virtual memory. */
267DECLINLINE(int) iomRamWrite(PVM pVM, RTGCPTR GCDest, void *pSrc, uint32_t cb)
268{
269#ifdef IN_GC
270 return MMGCRamWriteNoTrapHandler((void *)GCDest, pSrc, cb);
271#else
272 return PGMPhysWriteGCPtr(pVM, GCDest, pSrc, cb);
273#endif
274}
275
276
277#ifdef IOM_WITH_MOVS_SUPPORT
278/**
279 * [REP] MOVSB
280 * [REP] MOVSW
281 * [REP] MOVSD
282 *
283 * Restricted implementation.
284 *
285 *
286 * @returns VBox status code.
287 *
288 * @param pVM The virtual machine.
289 * @param uErrorCode CPU Error code.
290 * @param pRegFrame Trap register frame.
291 * @param GCPhysFault The GC physical address corresponding to pvFault.
292 * @param pCpu Disassembler CPU state.
293 * @param pRange Pointer MMIO range.
294 * @param ppStat Which sub-sample to attribute this call to.
295 */
296static int iomInterpretMOVS(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
297{
298 /*
299 * We do not support segment prefixes or REPNE.
300 */
301 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
302 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
303
304
305 /*
306 * Get bytes/words/dwords/qword count to copy.
307 */
308 uint32_t cTransfers = 1;
309 if (pCpu->prefix & PREFIX_REP)
310 {
311#ifndef IN_GC
312 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
313 && pRegFrame->rcx >= _4G)
314 return VINF_EM_RAW_EMULATE_INSTR;
315#endif
316
317 cTransfers = pRegFrame->ecx;
318 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
319 cTransfers &= 0xffff;
320
321 if (!cTransfers)
322 return VINF_SUCCESS;
323 }
324
325 /* Get the current privilege level. */
326 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
327
328 /*
329 * Get data size.
330 */
331 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
332 AssertMsg(cb > 0 && cb <= sizeof(uint32_t), ("cb=%d\n", cb));
333 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
334
335#ifdef VBOX_WITH_STATISTICS
336 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
337 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
338#endif
339
340/** @todo re-evaluate on page boundraries. */
341
342 RTGCPHYS Phys = GCPhysFault;
343 int rc;
344 if (uErrorCode & X86_TRAP_PF_RW)
345 {
346 /*
347 * Write operation: [Mem] -> [MMIO]
348 * ds:esi (Virt Src) -> es:edi (Phys Dst)
349 */
350 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
351
352 /* Check callback. */
353 if (!pRange->CTX_SUFF(pfnWriteCallback))
354 return VINF_IOM_HC_MMIO_WRITE;
355
356 /* Convert source address ds:esi. */
357 RTGCUINTPTR pu8Virt;
358 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
359 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
360 (PRTGCPTR)&pu8Virt);
361 if (RT_SUCCESS(rc))
362 {
363
364 /* Access verification first; we currently can't recover properly from traps inside this instruction */
365 rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
366 if (rc != VINF_SUCCESS)
367 {
368 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
369 return VINF_EM_RAW_EMULATE_INSTR;
370 }
371
372#ifdef IN_GC
373 MMGCRamRegisterTrapHandler(pVM);
374#endif
375
376 /* copy loop. */
377 while (cTransfers)
378 {
379 uint32_t u32Data = 0;
380 rc = iomRamRead(pVM, &u32Data, (RTGCPTR)pu8Virt, cb);
381 if (rc != VINF_SUCCESS)
382 break;
383 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
384 if (rc != VINF_SUCCESS)
385 break;
386
387 pu8Virt += offIncrement;
388 Phys += offIncrement;
389 pRegFrame->rsi += offIncrement;
390 pRegFrame->rdi += offIncrement;
391 cTransfers--;
392 }
393#ifdef IN_GC
394 MMGCRamDeregisterTrapHandler(pVM);
395#endif
396 /* Update ecx. */
397 if (pCpu->prefix & PREFIX_REP)
398 pRegFrame->ecx = cTransfers;
399 }
400 else
401 rc = VINF_IOM_HC_MMIO_READ_WRITE;
402 }
403 else
404 {
405 /*
406 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
407 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
408 */
409 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
410
411 /* Check callback. */
412 if (!pRange->CTX_SUFF(pfnReadCallback))
413 return VINF_IOM_HC_MMIO_READ;
414
415 /* Convert destination address. */
416 RTGCUINTPTR pu8Virt;
417 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
418 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
419 (RTGCPTR *)&pu8Virt);
420 if (RT_FAILURE(rc))
421 return VINF_EM_RAW_GUEST_TRAP;
422
423 /* Check if destination address is MMIO. */
424 PIOMMMIORANGE pMMIODst;
425 RTGCPHYS PhysDst;
426 rc = PGMGstGetPage(pVM, (RTGCPTR)pu8Virt, NULL, &PhysDst);
427 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
428 if ( RT_SUCCESS(rc)
429 && (pMMIODst = iomMMIOGetRange(&pVM->iom.s, PhysDst)))
430 {
431 /*
432 * Extra: [MMIO] -> [MMIO]
433 */
434 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
435 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
436 return VINF_IOM_HC_MMIO_READ_WRITE;
437
438 /* copy loop. */
439 while (cTransfers)
440 {
441 uint32_t u32Data;
442 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
443 if (rc != VINF_SUCCESS)
444 break;
445 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
446 if (rc != VINF_SUCCESS)
447 break;
448
449 Phys += offIncrement;
450 PhysDst += offIncrement;
451 pRegFrame->rsi += offIncrement;
452 pRegFrame->rdi += offIncrement;
453 cTransfers--;
454 }
455 }
456 else
457 {
458 /*
459 * Normal: [MMIO] -> [Mem]
460 */
461 /* Access verification first; we currently can't recover properly from traps inside this instruction */
462 rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
463 if (rc != VINF_SUCCESS)
464 {
465 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
466 return VINF_EM_RAW_EMULATE_INSTR;
467 }
468
469 /* copy loop. */
470#ifdef IN_GC
471 MMGCRamRegisterTrapHandler(pVM);
472#endif
473 while (cTransfers)
474 {
475 uint32_t u32Data;
476 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
477 if (rc != VINF_SUCCESS)
478 break;
479 rc = iomRamWrite(pVM, (RTGCPTR)pu8Virt, &u32Data, cb);
480 if (rc != VINF_SUCCESS)
481 {
482 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
483 break;
484 }
485
486 pu8Virt += offIncrement;
487 Phys += offIncrement;
488 pRegFrame->rsi += offIncrement;
489 pRegFrame->rdi += offIncrement;
490 cTransfers--;
491 }
492#ifdef IN_GC
493 MMGCRamDeregisterTrapHandler(pVM);
494#endif
495 }
496
497 /* Update ecx on exit. */
498 if (pCpu->prefix & PREFIX_REP)
499 pRegFrame->ecx = cTransfers;
500 }
501
502 /* work statistics. */
503 if (rc == VINF_SUCCESS)
504 iomMMIOStatLength(pVM, cb);
505 NOREF(ppStat);
506 return rc;
507}
508#endif /* IOM_WITH_MOVS_SUPPORT */
509
510
511/**
512 * [REP] STOSB
513 * [REP] STOSW
514 * [REP] STOSD
515 *
516 * Restricted implementation.
517 *
518 *
519 * @returns VBox status code.
520 *
521 * @param pVM The virtual machine .
522 * @param pRegFrame Trap register frame.
523 * @param GCPhysFault The GC physical address corresponding to pvFault.
524 * @param pCpu Disassembler CPU state.
525 * @param pRange Pointer MMIO range.
526 */
527static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
528{
529 /*
530 * We do not support segment prefixes or REPNE..
531 */
532 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
533 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
534
535 /*
536 * Get bytes/words/dwords count to copy.
537 */
538 uint32_t cTransfers = 1;
539 if (pCpu->prefix & PREFIX_REP)
540 {
541#ifndef IN_GC
542 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
543 && pRegFrame->rcx >= _4G)
544 return VINF_EM_RAW_EMULATE_INSTR;
545#endif
546
547 cTransfers = pRegFrame->ecx;
548 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
549 cTransfers &= 0xffff;
550
551 if (!cTransfers)
552 return VINF_SUCCESS;
553 }
554
555/** @todo r=bird: bounds checks! */
556
557 /*
558 * Get data size.
559 */
560 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
561 AssertMsg(cb > 0 && cb <= sizeof(uint32_t), ("cb=%d\n", cb));
562 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
563
564#ifdef VBOX_WITH_STATISTICS
565 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
566 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
567#endif
568
569
570 RTGCPHYS Phys = GCPhysFault;
571 uint32_t u32Data = pRegFrame->eax;
572 int rc;
573 if (pRange->CTX_SUFF(pfnFillCallback))
574 {
575 /*
576 * Use the fill callback.
577 */
578 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
579 if (offIncrement > 0)
580 {
581 /* addr++ variant. */
582 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
583 if (rc == VINF_SUCCESS)
584 {
585 /* Update registers. */
586 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
587 if (pCpu->prefix & PREFIX_REP)
588 pRegFrame->ecx = 0;
589 }
590 }
591 else
592 {
593 /* addr-- variant. */
594 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), (Phys - (cTransfers - 1)) << SIZE_2_SHIFT(cb), u32Data, cb, cTransfers);
595 if (rc == VINF_SUCCESS)
596 {
597 /* Update registers. */
598 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
599 if (pCpu->prefix & PREFIX_REP)
600 pRegFrame->ecx = 0;
601 }
602 }
603 }
604 else
605 {
606 /*
607 * Use the write callback.
608 */
609 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
610
611 /* fill loop. */
612 do
613 {
614 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
615 if (rc != VINF_SUCCESS)
616 break;
617
618 Phys += offIncrement;
619 pRegFrame->rdi += offIncrement;
620 cTransfers--;
621 } while (cTransfers);
622
623 /* Update ecx on exit. */
624 if (pCpu->prefix & PREFIX_REP)
625 pRegFrame->ecx = cTransfers;
626 }
627
628 /*
629 * Work statistics and return.
630 */
631 if (rc == VINF_SUCCESS)
632 iomMMIOStatLength(pVM, cb);
633 return rc;
634}
635
636
637/**
638 * [REP] LODSB
639 * [REP] LODSW
640 * [REP] LODSD
641 *
642 * Restricted implementation.
643 *
644 *
645 * @returns VBox status code.
646 *
647 * @param pVM The virtual machine.
648 * @param pRegFrame Trap register frame.
649 * @param GCPhysFault The GC physical address corresponding to pvFault.
650 * @param pCpu Disassembler CPU state.
651 * @param pRange Pointer MMIO range.
652 */
653static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
654{
655 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
656
657 /*
658 * We do not support segment prefixes or REP*.
659 */
660 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
661 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
662
663 /*
664 * Get data size.
665 */
666 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
667 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
668 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
669
670 /*
671 * Perform read.
672 */
673 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
674 if (rc == VINF_SUCCESS)
675 pRegFrame->rsi += offIncrement;
676
677 /*
678 * Work statistics and return.
679 */
680 if (rc == VINF_SUCCESS)
681 iomMMIOStatLength(pVM, cb);
682 return rc;
683}
684
685
686/**
687 * CMP [MMIO], reg|imm
688 * CMP reg|imm, [MMIO]
689 *
690 * Restricted implementation.
691 *
692 *
693 * @returns VBox status code.
694 *
695 * @param pVM The virtual machine.
696 * @param pRegFrame Trap register frame.
697 * @param GCPhysFault The GC physical address corresponding to pvFault.
698 * @param pCpu Disassembler CPU state.
699 * @param pRange Pointer MMIO range.
700 */
701static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
702{
703 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
704
705 /*
706 * Get the operands.
707 */
708 unsigned cb = 0;
709 uint64_t uData1 = 0;
710 uint64_t uData2 = 0;
711 int rc;
712 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
713 /* cmp reg, [MMIO]. */
714 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
715 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
716 /* cmp [MMIO], reg|imm. */
717 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
718 else
719 {
720 AssertMsgFailed(("Disassember CMP problem..\n"));
721 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
722 }
723
724 if (rc == VINF_SUCCESS)
725 {
726 /* Emulate CMP and update guest flags. */
727 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
728 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
729 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
730 iomMMIOStatLength(pVM, cb);
731 }
732
733 return rc;
734}
735
736
737/**
738 * AND [MMIO], reg|imm
739 * AND reg, [MMIO]
740 * OR [MMIO], reg|imm
741 * OR reg, [MMIO]
742 *
743 * Restricted implementation.
744 *
745 *
746 * @returns VBox status code.
747 *
748 * @param pVM The virtual machine.
749 * @param pRegFrame Trap register frame.
750 * @param GCPhysFault The GC physical address corresponding to pvFault.
751 * @param pCpu Disassembler CPU state.
752 * @param pRange Pointer MMIO range.
753 * @param pfnEmulate Instruction emulation function.
754 */
755static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
756{
757 unsigned cb = 0;
758 uint64_t uData1 = 0;
759 uint64_t uData2 = 0;
760 bool fAndWrite;
761 int rc;
762
763#ifdef LOG_ENABLED
764 const char *pszInstr;
765
766 if (pCpu->pCurInstr->opcode == OP_XOR)
767 pszInstr = "Xor";
768 else if (pCpu->pCurInstr->opcode == OP_OR)
769 pszInstr = "Or";
770 else if (pCpu->pCurInstr->opcode == OP_AND)
771 pszInstr = "And";
772 else
773 pszInstr = "OrXorAnd??";
774#endif
775
776 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
777 {
778 /* and reg, [MMIO]. */
779 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
780 fAndWrite = false;
781 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
782 }
783 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
784 {
785 /* and [MMIO], reg|imm. */
786 fAndWrite = true;
787 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
788 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
789 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
790 else
791 rc = VINF_IOM_HC_MMIO_READ_WRITE;
792 }
793 else
794 {
795 AssertMsgFailed(("Disassember AND problem..\n"));
796 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
797 }
798
799 if (rc == VINF_SUCCESS)
800 {
801 /* Emulate AND and update guest flags. */
802 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
803
804 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
805
806 if (fAndWrite)
807 /* Store result to MMIO. */
808 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
809 else
810 {
811 /* Store result to register. */
812 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
813 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
814 }
815 if (rc == VINF_SUCCESS)
816 {
817 /* Update guest's eflags and finish. */
818 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
819 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
820 iomMMIOStatLength(pVM, cb);
821 }
822 }
823
824 return rc;
825}
826
827
828/**
829 * TEST [MMIO], reg|imm
830 * TEST reg, [MMIO]
831 *
832 * Restricted implementation.
833 *
834 *
835 * @returns VBox status code.
836 *
837 * @param pVM The virtual machine.
838 * @param pRegFrame Trap register frame.
839 * @param GCPhysFault The GC physical address corresponding to pvFault.
840 * @param pCpu Disassembler CPU state.
841 * @param pRange Pointer MMIO range.
842 */
843static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
844{
845 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
846
847 unsigned cb = 0;
848 uint64_t uData1 = 0;
849 uint64_t uData2 = 0;
850 int rc;
851
852 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
853 {
854 /* and test, [MMIO]. */
855 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
856 }
857 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
858 {
859 /* test [MMIO], reg|imm. */
860 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
861 }
862 else
863 {
864 AssertMsgFailed(("Disassember TEST problem..\n"));
865 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
866 }
867
868 if (rc == VINF_SUCCESS)
869 {
870 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
871 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
872 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
873 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
874 iomMMIOStatLength(pVM, cb);
875 }
876
877 return rc;
878}
879
880
881/**
882 * BT [MMIO], reg|imm
883 *
884 * Restricted implementation.
885 *
886 *
887 * @returns VBox status code.
888 *
889 * @param pVM The virtual machine.
890 * @param pRegFrame Trap register frame.
891 * @param GCPhysFault The GC physical address corresponding to pvFault.
892 * @param pCpu Disassembler CPU state.
893 * @param pRange Pointer MMIO range.
894 */
895static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
896{
897 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
898
899 uint64_t uBit = 0;
900 uint64_t uData1 = 0;
901 unsigned cb = 0;
902 int rc;
903
904 if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cb))
905 {
906 /* bt [MMIO], reg|imm. */
907 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
908 }
909 else
910 {
911 AssertMsgFailed(("Disassember BT problem..\n"));
912 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
913 }
914
915 if (rc == VINF_SUCCESS)
916 {
917 /* The size of the memory operand only matters here. */
918 cb = DISGetParamSize(pCpu, &pCpu->param1);
919
920 /* Find the bit inside the faulting address */
921 uBit &= (cb*8 - 1);
922
923 pRegFrame->eflags.Bits.u1CF = (uData1 >> uBit);
924 iomMMIOStatLength(pVM, cb);
925 }
926
927 return rc;
928}
929
930/**
931 * XCHG [MMIO], reg
932 * XCHG reg, [MMIO]
933 *
934 * Restricted implementation.
935 *
936 *
937 * @returns VBox status code.
938 *
939 * @param pVM The virtual machine.
940 * @param pRegFrame Trap register frame.
941 * @param GCPhysFault The GC physical address corresponding to pvFault.
942 * @param pCpu Disassembler CPU state.
943 * @param pRange Pointer MMIO range.
944 */
945static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
946{
947 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
948 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
949 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
950 return VINF_IOM_HC_MMIO_READ_WRITE;
951
952 int rc;
953 unsigned cb = 0;
954 uint64_t uData1 = 0;
955 uint64_t uData2 = 0;
956 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
957 {
958 /* xchg reg, [MMIO]. */
959 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
960 if (rc == VINF_SUCCESS)
961 {
962 /* Store result to MMIO. */
963 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
964
965 if (rc == VINF_SUCCESS)
966 {
967 /* Store result to register. */
968 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
969 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
970 }
971 else
972 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
973 }
974 else
975 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
976 }
977 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
978 {
979 /* xchg [MMIO], reg. */
980 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
981 if (rc == VINF_SUCCESS)
982 {
983 /* Store result to MMIO. */
984 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
985 if (rc == VINF_SUCCESS)
986 {
987 /* Store result to register. */
988 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
989 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
990 }
991 else
992 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
993 }
994 else
995 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
996 }
997 else
998 {
999 AssertMsgFailed(("Disassember XCHG problem..\n"));
1000 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1001 }
1002 return rc;
1003}
1004
1005
1006/**
1007 * \#PF Handler callback for MMIO ranges.
1008 *
1009 * @returns VBox status code (appropriate for GC return).
1010 * @param pVM VM Handle.
1011 * @param uErrorCode CPU Error code.
1012 * @param pCtxCore Trap register frame.
1013 * @param pvFault The fault address (cr2).
1014 * @param GCPhysFault The GC physical address corresponding to pvFault.
1015 * @param pvUser Pointer to the MMIO ring-3 range entry.
1016 */
1017VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1018{
1019 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1020 Log(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%VGv eip=%VGv\n",
1021 GCPhysFault, (uint32_t)uErrorCode, pvFault, pCtxCore->rip));
1022
1023 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1024 Assert(pRange);
1025 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1026
1027#ifdef VBOX_WITH_STATISTICS
1028 /*
1029 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1030 */
1031 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
1032 if (!pStats)
1033 {
1034# ifdef IN_RING3
1035 return VERR_NO_MEMORY;
1036# else
1037 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1038 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1039 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1040# endif
1041 }
1042#endif
1043
1044#ifndef IN_RING3
1045 /*
1046 * Should we defer the request right away?
1047 */
1048 if (uErrorCode & X86_TRAP_PF_RW
1049 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1050 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1051 {
1052# ifdef VBOX_WITH_STATISTICS
1053 if (uErrorCode & X86_TRAP_PF_RW)
1054 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1055 else
1056 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1057# endif
1058
1059 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1060 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1061 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1062 }
1063#endif /* !IN_RING3 */
1064
1065 /*
1066 * Disassemble the instruction and interprete it.
1067 */
1068 DISCPUSTATE Cpu;
1069 unsigned cbOp;
1070 int rc = EMInterpretDisasOne(pVM, pCtxCore, &Cpu, &cbOp);
1071 AssertRCReturn(rc, rc);
1072 switch (Cpu.pCurInstr->opcode)
1073 {
1074 case OP_MOV:
1075 case OP_MOVZX:
1076 case OP_MOVSX:
1077 {
1078 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1079 if (uErrorCode & X86_TRAP_PF_RW)
1080 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1081 else
1082 rc = iomInterpretMOVxXRead(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1083 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1084 break;
1085 }
1086
1087
1088#ifdef IOM_WITH_MOVS_SUPPORT
1089 case OP_MOVSB:
1090 case OP_MOVSWD:
1091 {
1092 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1093 PSTAMPROFILE pStat = NULL;
1094 rc = iomInterpretMOVS(pVM, uErrorCode, pCtxCore, GCPhysFault, &Cpu, pRange, &pStat);
1095 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1096 break;
1097 }
1098#endif
1099
1100 case OP_STOSB:
1101 case OP_STOSWD:
1102 Assert(uErrorCode & X86_TRAP_PF_RW);
1103 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1104 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1105 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1106 break;
1107
1108 case OP_LODSB:
1109 case OP_LODSWD:
1110 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1111 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1112 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1113 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1114 break;
1115
1116 case OP_CMP:
1117 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1118 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1119 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1120 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1121 break;
1122
1123 case OP_AND:
1124 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1125 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateAnd);
1126 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1127 break;
1128
1129 case OP_OR:
1130 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1131 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateOr);
1132 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1133 break;
1134
1135 case OP_XOR:
1136 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1137 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateXor);
1138 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1139 break;
1140
1141 case OP_TEST:
1142 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1143 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1144 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1145 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1146 break;
1147
1148 case OP_BT:
1149 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1150 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1151 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1152 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1153 break;
1154
1155 case OP_XCHG:
1156 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1157 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1158 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1159 break;
1160
1161
1162 /*
1163 * The instruction isn't supported. Hand it on to ring-3.
1164 */
1165 default:
1166 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1167 rc = (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1168 break;
1169 }
1170
1171 /*
1172 * On success advance EIP.
1173 */
1174 if (rc == VINF_SUCCESS)
1175 pCtxCore->rip += cbOp;
1176 else
1177 {
1178 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1179#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1180 switch (rc)
1181 {
1182 case VINF_IOM_HC_MMIO_READ:
1183 case VINF_IOM_HC_MMIO_READ_WRITE:
1184 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1185 break;
1186 case VINF_IOM_HC_MMIO_WRITE:
1187 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1188 break;
1189 }
1190#endif
1191 }
1192
1193 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1194 return rc;
1195}
1196
1197
1198#ifdef IN_RING3
1199/**
1200 * \#PF Handler callback for MMIO ranges.
1201 *
1202 * @returns VINF_SUCCESS if the handler have carried out the operation.
1203 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1204 * @param pVM VM Handle.
1205 * @param GCPhys The physical address the guest is writing to.
1206 * @param pvPhys The HC mapping of that address.
1207 * @param pvBuf What the guest is reading/writing.
1208 * @param cbBuf How much it's reading/writing.
1209 * @param enmAccessType The access type.
1210 * @param pvUser Pointer to the MMIO range entry.
1211 */
1212DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1213{
1214 int rc;
1215 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1216 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1217
1218 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1219
1220 Assert(pRange);
1221 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1222
1223 if (enmAccessType == PGMACCESSTYPE_READ)
1224 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, cbBuf);
1225 else
1226 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, cbBuf);
1227
1228 AssertRC(rc);
1229 return rc;
1230}
1231#endif /* IN_RING3 */
1232
1233
1234/**
1235 * Reads a MMIO register.
1236 *
1237 * @returns VBox status code.
1238 *
1239 * @param pVM VM handle.
1240 * @param GCPhys The physical address to read.
1241 * @param pu32Value Where to store the value read.
1242 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1243 */
1244VMMDECL(int) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1245{
1246 /*
1247 * Lookup the current context range node and statistics.
1248 */
1249 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1250 AssertMsgReturn(pRange,
1251 ("Handlers and page tables are out of sync or something! GCPhys=%VGp cbValue=%d\n", GCPhys, cbValue),
1252 VERR_INTERNAL_ERROR);
1253#ifdef VBOX_WITH_STATISTICS
1254 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1255 if (!pStats)
1256# ifdef IN_RING3
1257 return VERR_NO_MEMORY;
1258# else
1259 return VINF_IOM_HC_MMIO_READ;
1260# endif
1261#endif /* VBOX_WITH_STATISTICS */
1262 if (pRange->CTX_SUFF(pfnReadCallback))
1263 {
1264 /*
1265 * Perform the read and deal with the result.
1266 */
1267#ifdef VBOX_WITH_STATISTICS
1268 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1269#endif
1270 int rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, cbValue);
1271#ifdef VBOX_WITH_STATISTICS
1272 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1273 if (rc != VINF_IOM_HC_MMIO_READ)
1274 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1275#endif
1276 switch (rc)
1277 {
1278 case VINF_SUCCESS:
1279 default:
1280 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Vrc\n", GCPhys, *pu32Value, cbValue, rc));
1281 return rc;
1282
1283 case VINF_IOM_MMIO_UNUSED_00:
1284 switch (cbValue)
1285 {
1286 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1287 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1288 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1289 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1290 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%VGp\n", cbValue, GCPhys)); break;
1291 }
1292 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Vrc\n", GCPhys, *pu32Value, cbValue, rc));
1293 return VINF_SUCCESS;
1294
1295 case VINF_IOM_MMIO_UNUSED_FF:
1296 switch (cbValue)
1297 {
1298 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1299 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1300 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1301 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1302 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%VGp\n", cbValue, GCPhys)); break;
1303 }
1304 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Vrc\n", GCPhys, *pu32Value, cbValue, rc));
1305 return VINF_SUCCESS;
1306 }
1307 }
1308#ifndef IN_RING3
1309 if (pRange->pfnReadCallbackR3)
1310 {
1311 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1312 return VINF_IOM_HC_MMIO_READ;
1313 }
1314#endif
1315
1316 /*
1317 * Lookup the ring-3 range.
1318 */
1319#ifdef VBOX_WITH_STATISTICS
1320 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1321#endif
1322 /* Unassigned memory; this is actually not supposed to happen. */
1323 switch (cbValue)
1324 {
1325 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1326 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1327 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1328 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1329 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%VGp\n", cbValue, GCPhys)); break;
1330 }
1331 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1332 return VINF_SUCCESS;
1333}
1334
1335
1336/**
1337 * Writes to a MMIO register.
1338 *
1339 * @returns VBox status code.
1340 *
1341 * @param pVM VM handle.
1342 * @param GCPhys The physical address to write to.
1343 * @param u32Value The value to write.
1344 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1345 */
1346VMMDECL(int) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1347{
1348 /*
1349 * Lookup the current context range node.
1350 */
1351 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1352 AssertMsgReturn(pRange,
1353 ("Handlers and page tables are out of sync or something! GCPhys=%VGp cbValue=%d\n", GCPhys, cbValue),
1354 VERR_INTERNAL_ERROR);
1355#ifdef VBOX_WITH_STATISTICS
1356 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1357 if (!pStats)
1358# ifdef IN_RING3
1359 return VERR_NO_MEMORY;
1360# else
1361 return VINF_IOM_HC_MMIO_WRITE;
1362# endif
1363#endif /* VBOX_WITH_STATISTICS */
1364
1365 /*
1366 * Perform the write if there's a write handler. R0/GC may have
1367 * to defer it to ring-3.
1368 */
1369 if (pRange->CTX_SUFF(pfnWriteCallback))
1370 {
1371#ifdef VBOX_WITH_STATISTICS
1372 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1373#endif
1374 int rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, cbValue);
1375#ifdef VBOX_WITH_STATISTICS
1376 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1377 if (rc != VINF_IOM_HC_MMIO_WRITE)
1378 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1379#endif
1380 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Vrc\n", GCPhys, u32Value, cbValue, rc));
1381 return rc;
1382 }
1383#ifndef IN_RING3
1384 if (pRange->pfnWriteCallbackR3)
1385 {
1386 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1387 return VINF_IOM_HC_MMIO_WRITE;
1388 }
1389#endif
1390
1391 /*
1392 * No write handler, nothing to do.
1393 */
1394#ifdef VBOX_WITH_STATISTICS
1395 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1396#endif
1397 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Vrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1398 return VINF_SUCCESS;
1399}
1400
1401
1402/**
1403 * [REP*] INSB/INSW/INSD
1404 * ES:EDI,DX[,ECX]
1405 *
1406 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1407 *
1408 * @returns Strict VBox status code. Informational status codes other than the one documented
1409 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1410 * @retval VINF_SUCCESS Success.
1411 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1412 * status code must be passed on to EM.
1413 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1414 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1415 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1416 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1417 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1418 *
1419 * @param pVM The virtual machine.
1420 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1421 * @param uPort IO Port
1422 * @param uPrefix IO instruction prefix
1423 * @param cbTransfer Size of transfer unit
1424 */
1425VMMDECL(int) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1426{
1427#ifdef VBOX_WITH_STATISTICS
1428 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1429#endif
1430
1431 /*
1432 * We do not support REPNE or decrementing destination
1433 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1434 */
1435 if ( (uPrefix & PREFIX_REPNE)
1436 || pRegFrame->eflags.Bits.u1DF)
1437 return VINF_EM_RAW_EMULATE_INSTR;
1438
1439 /*
1440 * Get bytes/words/dwords count to transfer.
1441 */
1442 RTGCUINTREG cTransfers = 1;
1443 if (uPrefix & PREFIX_REP)
1444 {
1445#ifndef IN_GC
1446 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
1447 && pRegFrame->rcx >= _4G)
1448 return VINF_EM_RAW_EMULATE_INSTR;
1449#endif
1450 cTransfers = pRegFrame->ecx;
1451
1452 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1453 cTransfers &= 0xffff;
1454
1455 if (!cTransfers)
1456 return VINF_SUCCESS;
1457 }
1458
1459 /* Convert destination address es:edi. */
1460 RTGCPTR GCPtrDst;
1461 int rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1462 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1463 &GCPtrDst);
1464 if (RT_FAILURE(rc))
1465 {
1466 Log(("INS destination address conversion failed -> fallback, rc=%d\n", rc));
1467 return VINF_EM_RAW_EMULATE_INSTR;
1468 }
1469
1470 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1471 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
1472
1473 rc = PGMVerifyAccess(pVM, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1474 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1475 if (rc != VINF_SUCCESS)
1476 {
1477 Log(("INS will generate a trap -> fallback, rc=%d\n", rc));
1478 return VINF_EM_RAW_EMULATE_INSTR;
1479 }
1480
1481 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1482 if (cTransfers > 1)
1483 {
1484 /* If the device supports string transfers, ask it to do as
1485 * much as it wants. The rest is done with single-word transfers. */
1486 const RTGCUINTREG cTransfersOrg = cTransfers;
1487 rc = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1488 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1489 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1490 }
1491
1492#ifdef IN_GC
1493 MMGCRamRegisterTrapHandler(pVM);
1494#endif
1495
1496 while (cTransfers && rc == VINF_SUCCESS)
1497 {
1498 uint32_t u32Value;
1499 rc = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1500 if (!IOM_SUCCESS(rc))
1501 break;
1502 int rc2 = iomRamWrite(pVM, GCPtrDst, &u32Value, cbTransfer);
1503 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1504 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1505 pRegFrame->rdi += cbTransfer;
1506 cTransfers--;
1507 }
1508#ifdef IN_GC
1509 MMGCRamDeregisterTrapHandler(pVM);
1510#endif
1511
1512 /* Update ecx on exit. */
1513 if (uPrefix & PREFIX_REP)
1514 pRegFrame->ecx = cTransfers;
1515
1516 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_READ || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Vrc\n", rc));
1517 return rc;
1518}
1519
1520
1521/**
1522 * [REP*] INSB/INSW/INSD
1523 * ES:EDI,DX[,ECX]
1524 *
1525 * @returns Strict VBox status code. Informational status codes other than the one documented
1526 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1527 * @retval VINF_SUCCESS Success.
1528 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1529 * status code must be passed on to EM.
1530 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1531 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1532 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1533 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1534 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1535 *
1536 * @param pVM The virtual machine.
1537 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1538 * @param pCpu Disassembler CPU state.
1539 */
1540VMMDECL(int) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1541{
1542 /*
1543 * Get port number directly from the register (no need to bother the
1544 * disassembler). And get the I/O register size from the opcode / prefix.
1545 */
1546 RTIOPORT Port = pRegFrame->edx & 0xffff;
1547 unsigned cb = 0;
1548 if (pCpu->pCurInstr->opcode == OP_INSB)
1549 cb = 1;
1550 else
1551 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1552
1553 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1554 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1555 {
1556 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Vrc\n", rc));
1557 return rc;
1558 }
1559
1560 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1561}
1562
1563
1564/**
1565 * [REP*] OUTSB/OUTSW/OUTSD
1566 * DS:ESI,DX[,ECX]
1567 *
1568 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1569 *
1570 * @returns Strict VBox status code. Informational status codes other than the one documented
1571 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1572 * @retval VINF_SUCCESS Success.
1573 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1574 * status code must be passed on to EM.
1575 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1576 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1577 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1578 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1579 *
1580 * @param pVM The virtual machine.
1581 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1582 * @param uPort IO Port
1583 * @param uPrefix IO instruction prefix
1584 * @param cbTransfer Size of transfer unit
1585 */
1586VMMDECL(int) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1587{
1588#ifdef VBOX_WITH_STATISTICS
1589 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1590#endif
1591
1592 /*
1593 * We do not support segment prefixes, REPNE or
1594 * decrementing source pointer.
1595 */
1596 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1597 || pRegFrame->eflags.Bits.u1DF)
1598 return VINF_EM_RAW_EMULATE_INSTR;
1599
1600 /*
1601 * Get bytes/words/dwords count to transfer.
1602 */
1603 RTGCUINTREG cTransfers = 1;
1604 if (uPrefix & PREFIX_REP)
1605 {
1606#ifndef IN_GC
1607 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
1608 && pRegFrame->rcx >= _4G)
1609 return VINF_EM_RAW_EMULATE_INSTR;
1610#endif
1611 cTransfers = pRegFrame->ecx;
1612 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1613 cTransfers &= 0xffff;
1614
1615 if (!cTransfers)
1616 return VINF_SUCCESS;
1617 }
1618
1619 /* Convert source address ds:esi. */
1620 RTGCPTR GCPtrSrc;
1621 int rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1622 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1623 &GCPtrSrc);
1624 if (RT_FAILURE(rc))
1625 {
1626 Log(("OUTS source address conversion failed -> fallback, rc=%Vrc\n", rc));
1627 return VINF_EM_RAW_EMULATE_INSTR;
1628 }
1629
1630 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1631 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
1632 rc = PGMVerifyAccess(pVM, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1633 (cpl == 3) ? X86_PTE_US : 0);
1634 if (rc != VINF_SUCCESS)
1635 {
1636 Log(("OUTS will generate a trap -> fallback, rc=%Vrc\n", rc));
1637 return VINF_EM_RAW_EMULATE_INSTR;
1638 }
1639
1640 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1641 if (cTransfers > 1)
1642 {
1643 /*
1644 * If the device supports string transfers, ask it to do as
1645 * much as it wants. The rest is done with single-word transfers.
1646 */
1647 const RTGCUINTREG cTransfersOrg = cTransfers;
1648 rc = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1649 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1650 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1651 }
1652
1653#ifdef IN_GC
1654 MMGCRamRegisterTrapHandler(pVM);
1655#endif
1656
1657 while (cTransfers && rc == VINF_SUCCESS)
1658 {
1659 uint32_t u32Value;
1660 rc = iomRamRead(pVM, &u32Value, GCPtrSrc, cbTransfer);
1661 if (rc != VINF_SUCCESS)
1662 break;
1663 rc = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1664 if (!IOM_SUCCESS(rc))
1665 break;
1666 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1667 pRegFrame->rsi += cbTransfer;
1668 cTransfers--;
1669 }
1670
1671#ifdef IN_GC
1672 MMGCRamDeregisterTrapHandler(pVM);
1673#endif
1674
1675 /* Update ecx on exit. */
1676 if (uPrefix & PREFIX_REP)
1677 pRegFrame->ecx = cTransfers;
1678
1679 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_WRITE || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Vrc\n", rc));
1680 return rc;
1681}
1682
1683
1684/**
1685 * [REP*] OUTSB/OUTSW/OUTSD
1686 * DS:ESI,DX[,ECX]
1687 *
1688 * @returns Strict VBox status code. Informational status codes other than the one documented
1689 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1690 * @retval VINF_SUCCESS Success.
1691 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1692 * status code must be passed on to EM.
1693 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1694 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1695 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1696 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1697 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1698 *
1699 * @param pVM The virtual machine.
1700 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1701 * @param pCpu Disassembler CPU state.
1702 */
1703VMMDECL(int) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1704{
1705 /*
1706 * Get port number from the first parameter.
1707 * And get the I/O register size from the opcode / prefix.
1708 */
1709 uint64_t Port = 0;
1710 unsigned cb = 0;
1711 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1712 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1713 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1714 cb = 1;
1715 else
1716 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1717
1718 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1719 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1720 {
1721 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Vrc\n", rc));
1722 return rc;
1723 }
1724
1725 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1726}
1727
1728#ifndef IN_GC
1729/**
1730 * Modify an existing MMIO region page; map to another guest physical region and change the access flags
1731 *
1732 * @returns VBox status code.
1733 *
1734 * @param pVM The virtual machine.
1735 * @param GCPhys Physical address that's part of the MMIO region to be changed.
1736 * @param GCPhysRemapped Remapped address.
1737 * @param fPageFlags Page flags to set (typically X86_PTE_RW).
1738 */
1739VMMDECL(int) IOMMMIOModifyPage(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1740{
1741 Assert(fPageFlags == (X86_PTE_RW|X86_PTE_P));
1742
1743 Log(("IOMMMIOModifyPage %VGp -> %VGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1744
1745 /*
1746 * Lookup the current context range node and statistics.
1747 */
1748 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1749 AssertMsgReturn(pRange,
1750 ("Handlers and page tables are out of sync or something! GCPhys=%VGp\n", GCPhys),
1751 VERR_INTERNAL_ERROR);
1752
1753 GCPhys &= ~(RTGCPHYS)0xfff;
1754 GCPhysRemapped &= ~(RTGCPHYS)0xfff;
1755
1756 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1757 if ( CPUMIsGuestInPagedProtectedMode(pVM)
1758 && !HWACCMIsNestedPagingActive(pVM))
1759 return VINF_SUCCESS; /* ignore */
1760
1761 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1762 AssertRCReturn(rc, rc);
1763
1764 /* Mark it as writable and present so reads and writes no longer fault. */
1765 rc = PGMShwModifyPage(pVM, (RTGCPTR)GCPhys, 1, fPageFlags, ~fPageFlags);
1766 AssertRC(rc);
1767
1768 return VINF_SUCCESS;
1769}
1770
1771/**
1772 * Reset a previously modified MMIO region; restore the access flags.
1773 *
1774 * @returns VBox status code.
1775 *
1776 * @param pVM The virtual machine.
1777 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1778 */
1779VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1780{
1781 unsigned cb;
1782
1783 Log(("IOMMMIOResetRegion %VGp\n", GCPhys));
1784
1785 /*
1786 * Lookup the current context range node and statistics.
1787 */
1788 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1789 AssertMsgReturn(pRange,
1790 ("Handlers and page tables are out of sync or something! GCPhys=%VGp\n", GCPhys),
1791 VERR_INTERNAL_ERROR);
1792
1793 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1794 if ( CPUMIsGuestInPagedProtectedMode(pVM)
1795 && !HWACCMIsNestedPagingActive(pVM))
1796 return VINF_SUCCESS; /* ignore */
1797
1798
1799 cb = pRange->cb;
1800 GCPhys = pRange->GCPhys;
1801
1802 while(cb)
1803 {
1804 int rc = PGMHandlerPhysicalPageReset(pVM, pRange->GCPhys, GCPhys);
1805 AssertRC(rc);
1806
1807 /* Mark it as not present again to intercept all read and write access. */
1808 rc = PGMShwModifyPage(pVM, (RTGCPTR)GCPhys, 1, 0, ~(uint64_t)(X86_PTE_RW|X86_PTE_P));
1809 AssertRC(rc);
1810
1811#ifdef VBOX_STRICT
1812 uint64_t fFlags;
1813 RTHCPHYS HCPhys;
1814 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1815 Assert(rc == VERR_PAGE_NOT_PRESENT);
1816#endif
1817 cb -= PAGE_SIZE;
1818 GCPhys += PAGE_SIZE;
1819 }
1820 return VINF_SUCCESS;
1821}
1822#endif /* !IN_GC */
1823
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette