VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 37424

Last change on this file since 37424 was 37424, checked in by vboxsync, 13 years ago

IOM: Moved the inline functions out of IOMInternal.h and into IOMInline.h.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 72.9 KB
Line 
1/* $Id: IOMAllMMIO.cpp 37424 2011-06-12 19:28:11Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hwaccm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0, /* 5 - invalid */
66 ~0, /* 6 - invalid */
67 ~0, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Wrapper which does the write and updates range statistics when such are enabled.
79 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
80 */
81DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
82{
83#ifdef VBOX_WITH_STATISTICS
84 PIOMMMIOSTATS pStats = iomMMIOGetStats(pVM, GCPhysFault, pRange);
85 Assert(pStats);
86#endif
87
88 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
89 int rc;
90 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
91 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
92 else
93 rc = VINF_SUCCESS;
94 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
95 STAM_COUNTER_INC(&pStats->Accesses);
96 return rc;
97}
98
99
100/**
101 * Wrapper which does the read and updates range statistics when such are enabled.
102 */
103DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
104{
105#ifdef VBOX_WITH_STATISTICS
106 PIOMMMIOSTATS pStats = iomMMIOGetStats(pVM, GCPhys, pRange);
107 Assert(pStats);
108#endif
109
110 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
111 int rc;
112 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
113 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
114 else
115 rc = VINF_IOM_MMIO_UNUSED_FF;
116 if (rc != VINF_SUCCESS)
117 {
118 switch (rc)
119 {
120 case VINF_IOM_MMIO_UNUSED_FF:
121 switch (cbValue)
122 {
123 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
124 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
125 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
126 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
127 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
128 }
129 rc = VINF_SUCCESS;
130 break;
131
132 case VINF_IOM_MMIO_UNUSED_00:
133 switch (cbValue)
134 {
135 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
136 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
137 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
138 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
139 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
140 }
141 rc = VINF_SUCCESS;
142 break;
143 }
144 }
145 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
146 STAM_COUNTER_INC(&pStats->Accesses);
147 return rc;
148}
149
150
151/**
152 * Internal - statistics only.
153 */
154DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
155{
156#ifdef VBOX_WITH_STATISTICS
157 switch (cb)
158 {
159 case 1:
160 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
161 break;
162 case 2:
163 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
164 break;
165 case 4:
166 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
167 break;
168 case 8:
169 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
170 break;
171 default:
172 /* No way. */
173 AssertMsgFailed(("Invalid data length %d\n", cb));
174 break;
175 }
176#else
177 NOREF(pVM); NOREF(cb);
178#endif
179}
180
181
182/**
183 * MOV reg, mem (read)
184 * MOVZX reg, mem (read)
185 * MOVSX reg, mem (read)
186 *
187 * @returns VBox status code.
188 *
189 * @param pVM The virtual machine.
190 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
191 * @param pCpu Disassembler CPU state.
192 * @param pRange Pointer MMIO range.
193 * @param GCPhysFault The GC physical address corresponding to pvFault.
194 */
195static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
196{
197 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
198
199 /*
200 * Get the data size from parameter 2,
201 * and call the handler function to get the data.
202 */
203 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
204 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
205
206 uint64_t u64Data = 0;
207 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
208 if (rc == VINF_SUCCESS)
209 {
210 /*
211 * Do sign extension for MOVSX.
212 */
213 /** @todo checkup MOVSX implementation! */
214 if (pCpu->pCurInstr->opcode == OP_MOVSX)
215 {
216 if (cb == 1)
217 {
218 /* DWORD <- BYTE */
219 int64_t iData = (int8_t)u64Data;
220 u64Data = (uint64_t)iData;
221 }
222 else
223 {
224 /* DWORD <- WORD */
225 int64_t iData = (int16_t)u64Data;
226 u64Data = (uint64_t)iData;
227 }
228 }
229
230 /*
231 * Store the result to register (parameter 1).
232 */
233 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
234 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
235 }
236
237 if (rc == VINF_SUCCESS)
238 iomMMIOStatLength(pVM, cb);
239 return rc;
240}
241
242
243/**
244 * MOV mem, reg|imm (write)
245 *
246 * @returns VBox status code.
247 *
248 * @param pVM The virtual machine.
249 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
250 * @param pCpu Disassembler CPU state.
251 * @param pRange Pointer MMIO range.
252 * @param GCPhysFault The GC physical address corresponding to pvFault.
253 */
254static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
255{
256 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
257
258 /*
259 * Get data to write from second parameter,
260 * and call the callback to write it.
261 */
262 unsigned cb = 0;
263 uint64_t u64Data = 0;
264 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
265 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
266
267 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
268 if (rc == VINF_SUCCESS)
269 iomMMIOStatLength(pVM, cb);
270 return rc;
271}
272
273
274/** Wrapper for reading virtual memory. */
275DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
276{
277 /* Note: This will fail in R0 or RC if it hits an access handler. That
278 isn't a problem though since the operation can be restarted in REM. */
279#ifdef IN_RC
280 return MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
281#else
282 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
283#endif
284}
285
286
287/** Wrapper for writing virtual memory. */
288DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
289{
290 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
291 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
292 * as well since we're not behind the pgm lock and handler may change between calls.
293 *
294 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
295 * the state of some shadowed structures. */
296#if defined(IN_RING0) || defined(IN_RC)
297 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
298#else
299 NOREF(pCtxCore);
300 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
301#endif
302}
303
304
305#ifdef IOM_WITH_MOVS_SUPPORT
306/**
307 * [REP] MOVSB
308 * [REP] MOVSW
309 * [REP] MOVSD
310 *
311 * Restricted implementation.
312 *
313 *
314 * @returns VBox status code.
315 *
316 * @param pVM The virtual machine.
317 * @param uErrorCode CPU Error code.
318 * @param pRegFrame Trap register frame.
319 * @param GCPhysFault The GC physical address corresponding to pvFault.
320 * @param pCpu Disassembler CPU state.
321 * @param pRange Pointer MMIO range.
322 * @param ppStat Which sub-sample to attribute this call to.
323 */
324static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
325{
326 /*
327 * We do not support segment prefixes or REPNE.
328 */
329 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
330 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
331
332 PVMCPU pVCpu = VMMGetCpu(pVM);
333
334 /*
335 * Get bytes/words/dwords/qword count to copy.
336 */
337 uint32_t cTransfers = 1;
338 if (pCpu->prefix & PREFIX_REP)
339 {
340#ifndef IN_RC
341 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
342 && pRegFrame->rcx >= _4G)
343 return VINF_EM_RAW_EMULATE_INSTR;
344#endif
345
346 cTransfers = pRegFrame->ecx;
347 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
348 cTransfers &= 0xffff;
349
350 if (!cTransfers)
351 return VINF_SUCCESS;
352 }
353
354 /* Get the current privilege level. */
355 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
356
357 /*
358 * Get data size.
359 */
360 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
361 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
362 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
363
364#ifdef VBOX_WITH_STATISTICS
365 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
366 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
367#endif
368
369/** @todo re-evaluate on page boundaries. */
370
371 RTGCPHYS Phys = GCPhysFault;
372 int rc;
373 if (fWriteAccess)
374 {
375 /*
376 * Write operation: [Mem] -> [MMIO]
377 * ds:esi (Virt Src) -> es:edi (Phys Dst)
378 */
379 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
380
381 /* Check callback. */
382 if (!pRange->CTX_SUFF(pfnWriteCallback))
383 return VINF_IOM_HC_MMIO_WRITE;
384
385 /* Convert source address ds:esi. */
386 RTGCUINTPTR pu8Virt;
387 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
388 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
389 (PRTGCPTR)&pu8Virt);
390 if (RT_SUCCESS(rc))
391 {
392
393 /* Access verification first; we currently can't recover properly from traps inside this instruction */
394 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
395 if (rc != VINF_SUCCESS)
396 {
397 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
398 return VINF_EM_RAW_EMULATE_INSTR;
399 }
400
401#ifdef IN_RC
402 MMGCRamRegisterTrapHandler(pVM);
403#endif
404
405 /* copy loop. */
406 while (cTransfers)
407 {
408 uint32_t u32Data = 0;
409 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
410 if (rc != VINF_SUCCESS)
411 break;
412 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
413 if (rc != VINF_SUCCESS)
414 break;
415
416 pu8Virt += offIncrement;
417 Phys += offIncrement;
418 pRegFrame->rsi += offIncrement;
419 pRegFrame->rdi += offIncrement;
420 cTransfers--;
421 }
422#ifdef IN_RC
423 MMGCRamDeregisterTrapHandler(pVM);
424#endif
425 /* Update ecx. */
426 if (pCpu->prefix & PREFIX_REP)
427 pRegFrame->ecx = cTransfers;
428 }
429 else
430 rc = VINF_IOM_HC_MMIO_READ_WRITE;
431 }
432 else
433 {
434 /*
435 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
436 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
437 */
438 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
439
440 /* Check callback. */
441 if (!pRange->CTX_SUFF(pfnReadCallback))
442 return VINF_IOM_HC_MMIO_READ;
443
444 /* Convert destination address. */
445 RTGCUINTPTR pu8Virt;
446 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
447 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
448 (RTGCPTR *)&pu8Virt);
449 if (RT_FAILURE(rc))
450 return VINF_IOM_HC_MMIO_READ;
451
452 /* Check if destination address is MMIO. */
453 PIOMMMIORANGE pMMIODst;
454 RTGCPHYS PhysDst;
455 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
456 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
457 if ( RT_SUCCESS(rc)
458 && (pMMIODst = iomMMIOGetRange(pVM, PhysDst)))
459 {
460 /** @todo implement per-device locks for MMIO access. */
461 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
462
463 /*
464 * Extra: [MMIO] -> [MMIO]
465 */
466 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
467 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
468 return VINF_IOM_HC_MMIO_READ_WRITE;
469
470 /* copy loop. */
471 while (cTransfers)
472 {
473 uint32_t u32Data;
474 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
475 if (rc != VINF_SUCCESS)
476 break;
477 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
478 if (rc != VINF_SUCCESS)
479 break;
480
481 Phys += offIncrement;
482 PhysDst += offIncrement;
483 pRegFrame->rsi += offIncrement;
484 pRegFrame->rdi += offIncrement;
485 cTransfers--;
486 }
487 }
488 else
489 {
490 /*
491 * Normal: [MMIO] -> [Mem]
492 */
493 /* Access verification first; we currently can't recover properly from traps inside this instruction */
494 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
495 if (rc != VINF_SUCCESS)
496 {
497 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
498 return VINF_EM_RAW_EMULATE_INSTR;
499 }
500
501 /* copy loop. */
502#ifdef IN_RC
503 MMGCRamRegisterTrapHandler(pVM);
504#endif
505 while (cTransfers)
506 {
507 uint32_t u32Data;
508 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
509 if (rc != VINF_SUCCESS)
510 break;
511 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
512 if (rc != VINF_SUCCESS)
513 {
514 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
515 break;
516 }
517
518 pu8Virt += offIncrement;
519 Phys += offIncrement;
520 pRegFrame->rsi += offIncrement;
521 pRegFrame->rdi += offIncrement;
522 cTransfers--;
523 }
524#ifdef IN_RC
525 MMGCRamDeregisterTrapHandler(pVM);
526#endif
527 }
528
529 /* Update ecx on exit. */
530 if (pCpu->prefix & PREFIX_REP)
531 pRegFrame->ecx = cTransfers;
532 }
533
534 /* work statistics. */
535 if (rc == VINF_SUCCESS)
536 iomMMIOStatLength(pVM, cb);
537 NOREF(ppStat);
538 return rc;
539}
540#endif /* IOM_WITH_MOVS_SUPPORT */
541
542
543/**
544 * [REP] STOSB
545 * [REP] STOSW
546 * [REP] STOSD
547 *
548 * Restricted implementation.
549 *
550 *
551 * @returns VBox status code.
552 *
553 * @param pVM The virtual machine.
554 * @param pRegFrame Trap register frame.
555 * @param GCPhysFault The GC physical address corresponding to pvFault.
556 * @param pCpu Disassembler CPU state.
557 * @param pRange Pointer MMIO range.
558 */
559static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
560{
561 /*
562 * We do not support segment prefixes or REPNE..
563 */
564 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
565 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
566
567 /*
568 * Get bytes/words/dwords count to copy.
569 */
570 uint32_t cTransfers = 1;
571 if (pCpu->prefix & PREFIX_REP)
572 {
573#ifndef IN_RC
574 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM), pRegFrame)
575 && pRegFrame->rcx >= _4G)
576 return VINF_EM_RAW_EMULATE_INSTR;
577#endif
578
579 cTransfers = pRegFrame->ecx;
580 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
581 cTransfers &= 0xffff;
582
583 if (!cTransfers)
584 return VINF_SUCCESS;
585 }
586
587/** @todo r=bird: bounds checks! */
588
589 /*
590 * Get data size.
591 */
592 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
593 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
594 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
595
596#ifdef VBOX_WITH_STATISTICS
597 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
598 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
599#endif
600
601
602 RTGCPHYS Phys = GCPhysFault;
603 uint32_t u32Data = pRegFrame->eax;
604 int rc;
605 if (pRange->CTX_SUFF(pfnFillCallback))
606 {
607 /*
608 * Use the fill callback.
609 */
610 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
611 if (offIncrement > 0)
612 {
613 /* addr++ variant. */
614 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
615 if (rc == VINF_SUCCESS)
616 {
617 /* Update registers. */
618 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
619 if (pCpu->prefix & PREFIX_REP)
620 pRegFrame->ecx = 0;
621 }
622 }
623 else
624 {
625 /* addr-- variant. */
626 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)), u32Data, cb, cTransfers);
627 if (rc == VINF_SUCCESS)
628 {
629 /* Update registers. */
630 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
631 if (pCpu->prefix & PREFIX_REP)
632 pRegFrame->ecx = 0;
633 }
634 }
635 }
636 else
637 {
638 /*
639 * Use the write callback.
640 */
641 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
642
643 /* fill loop. */
644 do
645 {
646 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
647 if (rc != VINF_SUCCESS)
648 break;
649
650 Phys += offIncrement;
651 pRegFrame->rdi += offIncrement;
652 cTransfers--;
653 } while (cTransfers);
654
655 /* Update ecx on exit. */
656 if (pCpu->prefix & PREFIX_REP)
657 pRegFrame->ecx = cTransfers;
658 }
659
660 /*
661 * Work statistics and return.
662 */
663 if (rc == VINF_SUCCESS)
664 iomMMIOStatLength(pVM, cb);
665 return rc;
666}
667
668
669/**
670 * [REP] LODSB
671 * [REP] LODSW
672 * [REP] LODSD
673 *
674 * Restricted implementation.
675 *
676 *
677 * @returns VBox status code.
678 *
679 * @param pVM The virtual machine.
680 * @param pRegFrame Trap register frame.
681 * @param GCPhysFault The GC physical address corresponding to pvFault.
682 * @param pCpu Disassembler CPU state.
683 * @param pRange Pointer MMIO range.
684 */
685static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
686{
687 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
688
689 /*
690 * We do not support segment prefixes or REP*.
691 */
692 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
693 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
694
695 /*
696 * Get data size.
697 */
698 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
699 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
700 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
701
702 /*
703 * Perform read.
704 */
705 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
706 if (rc == VINF_SUCCESS)
707 pRegFrame->rsi += offIncrement;
708
709 /*
710 * Work statistics and return.
711 */
712 if (rc == VINF_SUCCESS)
713 iomMMIOStatLength(pVM, cb);
714 return rc;
715}
716
717
718/**
719 * CMP [MMIO], reg|imm
720 * CMP reg|imm, [MMIO]
721 *
722 * Restricted implementation.
723 *
724 *
725 * @returns VBox status code.
726 *
727 * @param pVM The virtual machine.
728 * @param pRegFrame Trap register frame.
729 * @param GCPhysFault The GC physical address corresponding to pvFault.
730 * @param pCpu Disassembler CPU state.
731 * @param pRange Pointer MMIO range.
732 */
733static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
734{
735 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
736
737 /*
738 * Get the operands.
739 */
740 unsigned cb = 0;
741 uint64_t uData1 = 0;
742 uint64_t uData2 = 0;
743 int rc;
744 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
745 /* cmp reg, [MMIO]. */
746 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
747 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
748 /* cmp [MMIO], reg|imm. */
749 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
750 else
751 {
752 AssertMsgFailed(("Disassember CMP problem..\n"));
753 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
754 }
755
756 if (rc == VINF_SUCCESS)
757 {
758#if HC_ARCH_BITS == 32
759 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
760 if (cb > 4)
761 return VINF_IOM_HC_MMIO_READ_WRITE;
762#endif
763 /* Emulate CMP and update guest flags. */
764 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
765 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
766 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
767 iomMMIOStatLength(pVM, cb);
768 }
769
770 return rc;
771}
772
773
774/**
775 * AND [MMIO], reg|imm
776 * AND reg, [MMIO]
777 * OR [MMIO], reg|imm
778 * OR reg, [MMIO]
779 *
780 * Restricted implementation.
781 *
782 *
783 * @returns VBox status code.
784 *
785 * @param pVM The virtual machine.
786 * @param pRegFrame Trap register frame.
787 * @param GCPhysFault The GC physical address corresponding to pvFault.
788 * @param pCpu Disassembler CPU state.
789 * @param pRange Pointer MMIO range.
790 * @param pfnEmulate Instruction emulation function.
791 */
792static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
793{
794 unsigned cb = 0;
795 uint64_t uData1 = 0;
796 uint64_t uData2 = 0;
797 bool fAndWrite;
798 int rc;
799
800#ifdef LOG_ENABLED
801 const char *pszInstr;
802
803 if (pCpu->pCurInstr->opcode == OP_XOR)
804 pszInstr = "Xor";
805 else if (pCpu->pCurInstr->opcode == OP_OR)
806 pszInstr = "Or";
807 else if (pCpu->pCurInstr->opcode == OP_AND)
808 pszInstr = "And";
809 else
810 pszInstr = "OrXorAnd??";
811#endif
812
813 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
814 {
815#if HC_ARCH_BITS == 32
816 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
817 if (cb > 4)
818 return VINF_IOM_HC_MMIO_READ_WRITE;
819#endif
820 /* and reg, [MMIO]. */
821 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
822 fAndWrite = false;
823 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
824 }
825 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
826 {
827#if HC_ARCH_BITS == 32
828 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
829 if (cb > 4)
830 return VINF_IOM_HC_MMIO_READ_WRITE;
831#endif
832 /* and [MMIO], reg|imm. */
833 fAndWrite = true;
834 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
835 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
836 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
837 else
838 rc = VINF_IOM_HC_MMIO_READ_WRITE;
839 }
840 else
841 {
842 AssertMsgFailed(("Disassember AND problem..\n"));
843 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
844 }
845
846 if (rc == VINF_SUCCESS)
847 {
848 /* Emulate AND and update guest flags. */
849 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
850
851 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
852
853 if (fAndWrite)
854 /* Store result to MMIO. */
855 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
856 else
857 {
858 /* Store result to register. */
859 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
860 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
861 }
862 if (rc == VINF_SUCCESS)
863 {
864 /* Update guest's eflags and finish. */
865 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
866 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
867 iomMMIOStatLength(pVM, cb);
868 }
869 }
870
871 return rc;
872}
873
874
875/**
876 * TEST [MMIO], reg|imm
877 * TEST reg, [MMIO]
878 *
879 * Restricted implementation.
880 *
881 *
882 * @returns VBox status code.
883 *
884 * @param pVM The virtual machine.
885 * @param pRegFrame Trap register frame.
886 * @param GCPhysFault The GC physical address corresponding to pvFault.
887 * @param pCpu Disassembler CPU state.
888 * @param pRange Pointer MMIO range.
889 */
890static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
891{
892 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
893
894 unsigned cb = 0;
895 uint64_t uData1 = 0;
896 uint64_t uData2 = 0;
897 int rc;
898
899 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
900 {
901 /* and test, [MMIO]. */
902 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
903 }
904 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
905 {
906 /* test [MMIO], reg|imm. */
907 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
908 }
909 else
910 {
911 AssertMsgFailed(("Disassember TEST problem..\n"));
912 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
913 }
914
915 if (rc == VINF_SUCCESS)
916 {
917#if HC_ARCH_BITS == 32
918 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
919 if (cb > 4)
920 return VINF_IOM_HC_MMIO_READ_WRITE;
921#endif
922
923 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
924 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
925 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
926 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
927 iomMMIOStatLength(pVM, cb);
928 }
929
930 return rc;
931}
932
933
934/**
935 * BT [MMIO], reg|imm
936 *
937 * Restricted implementation.
938 *
939 *
940 * @returns VBox status code.
941 *
942 * @param pVM The virtual machine.
943 * @param pRegFrame Trap register frame.
944 * @param GCPhysFault The GC physical address corresponding to pvFault.
945 * @param pCpu Disassembler CPU state.
946 * @param pRange Pointer MMIO range.
947 */
948static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
949{
950 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
951
952 uint64_t uBit = 0;
953 uint64_t uData = 0;
954 unsigned cbIgnored;
955
956 if (!iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cbIgnored))
957 {
958 AssertMsgFailed(("Disassember BT problem..\n"));
959 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
960 }
961 /* The size of the memory operand only matters here. */
962 unsigned cbData = DISGetParamSize(pCpu, &pCpu->param1);
963
964 /* bt [MMIO], reg|imm. */
965 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
966 if (rc == VINF_SUCCESS)
967 {
968 /* Find the bit inside the faulting address */
969 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
970 iomMMIOStatLength(pVM, cbData);
971 }
972
973 return rc;
974}
975
976/**
977 * XCHG [MMIO], reg
978 * XCHG reg, [MMIO]
979 *
980 * Restricted implementation.
981 *
982 *
983 * @returns VBox status code.
984 *
985 * @param pVM The virtual machine.
986 * @param pRegFrame Trap register frame.
987 * @param GCPhysFault The GC physical address corresponding to pvFault.
988 * @param pCpu Disassembler CPU state.
989 * @param pRange Pointer MMIO range.
990 */
991static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
992{
993 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
994 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
995 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
996 return VINF_IOM_HC_MMIO_READ_WRITE;
997
998 int rc;
999 unsigned cb = 0;
1000 uint64_t uData1 = 0;
1001 uint64_t uData2 = 0;
1002 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
1003 {
1004 /* xchg reg, [MMIO]. */
1005 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1006 if (rc == VINF_SUCCESS)
1007 {
1008 /* Store result to MMIO. */
1009 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1010
1011 if (rc == VINF_SUCCESS)
1012 {
1013 /* Store result to register. */
1014 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
1015 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1016 }
1017 else
1018 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1019 }
1020 else
1021 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1022 }
1023 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1024 {
1025 /* xchg [MMIO], reg. */
1026 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1027 if (rc == VINF_SUCCESS)
1028 {
1029 /* Store result to MMIO. */
1030 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1031 if (rc == VINF_SUCCESS)
1032 {
1033 /* Store result to register. */
1034 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1035 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1036 }
1037 else
1038 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1039 }
1040 else
1041 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1042 }
1043 else
1044 {
1045 AssertMsgFailed(("Disassember XCHG problem..\n"));
1046 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1047 }
1048 return rc;
1049}
1050
1051
1052/**
1053 * \#PF Handler callback for MMIO ranges.
1054 *
1055 * @returns VBox status code (appropriate for GC return).
1056 * @param pVM VM Handle.
1057 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1058 * any error code (the EPT misconfig hack).
1059 * @param pCtxCore Trap register frame.
1060 * @param GCPhysFault The GC physical address corresponding to pvFault.
1061 * @param pvUser Pointer to the MMIO ring-3 range entry.
1062 */
1063static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1064{
1065 /* Take the IOM lock before performing any MMIO. */
1066 int rc = iomLock(pVM);
1067#ifndef IN_RING3
1068 if (rc == VERR_SEM_BUSY)
1069 return VINF_IOM_HC_MMIO_READ_WRITE;
1070#endif
1071 AssertRC(rc);
1072
1073 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1074 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1075 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1076
1077 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1078 Assert(pRange);
1079 Assert(pRange == iomMMIOGetRange(pVM, GCPhysFault));
1080 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1081 * lock for most of the code, provided that we retake the lock while
1082 * deregistering PIOMMMIORANGE to deal with remapping/access races
1083 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1084 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1085
1086#ifdef VBOX_WITH_STATISTICS
1087 /*
1088 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1089 */
1090 PIOMMMIOSTATS pStats = iomMMIOGetStats(pVM, GCPhysFault, pRange);
1091 if (!pStats)
1092 {
1093# ifdef IN_RING3
1094 iomUnlock(pVM);
1095 return VERR_NO_MEMORY;
1096# else
1097 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1098 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1099 iomUnlock(pVM);
1100 return VINF_IOM_HC_MMIO_READ_WRITE;
1101# endif
1102 }
1103#endif
1104
1105#ifndef IN_RING3
1106 /*
1107 * Should we defer the request right away? This isn't usually the case, so
1108 * do the simple test first and the try deal with uErrorCode being N/A.
1109 */
1110 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1111 || !pRange->CTX_SUFF(pfnReadCallback))
1112 && ( uErrorCode == UINT32_MAX
1113 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1114 : uErrorCode & X86_TRAP_PF_RW
1115 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1116 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1117 )
1118 )
1119 )
1120 {
1121 if (uErrorCode & X86_TRAP_PF_RW)
1122 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1123 else
1124 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1125
1126 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1127 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1128 iomUnlock(pVM);
1129 return VINF_IOM_HC_MMIO_READ_WRITE;
1130 }
1131#endif /* !IN_RING3 */
1132
1133 /*
1134 * Disassemble the instruction and interpret it.
1135 */
1136 PVMCPU pVCpu = VMMGetCpu(pVM);
1137 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1138 unsigned cbOp;
1139 rc = EMInterpretDisasOne(pVM, pVCpu, pCtxCore, pDis, &cbOp);
1140 AssertRC(rc);
1141 if (RT_FAILURE(rc))
1142 {
1143 iomUnlock(pVM);
1144 return rc;
1145 }
1146 switch (pDis->pCurInstr->opcode)
1147 {
1148 case OP_MOV:
1149 case OP_MOVZX:
1150 case OP_MOVSX:
1151 {
1152 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1153 AssertMsg(uErrorCode == UINT32_MAX || DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->param1.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags), pDis->param2.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param2.flags), uErrorCode));
1154 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1155 ? uErrorCode & X86_TRAP_PF_RW
1156 : DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags))
1157 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1158 else
1159 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1160 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1161 break;
1162 }
1163
1164
1165#ifdef IOM_WITH_MOVS_SUPPORT
1166 case OP_MOVSB:
1167 case OP_MOVSWD:
1168 {
1169 if (uErrorCode == UINT32_MAX)
1170 return VINF_IOM_HC_MMIO_READ_WRITE;
1171 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1172 PSTAMPROFILE pStat = NULL;
1173 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1174 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1175 break;
1176 }
1177#endif
1178
1179 case OP_STOSB:
1180 case OP_STOSWD:
1181 Assert(uErrorCode & X86_TRAP_PF_RW);
1182 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1183 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1184 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1185 break;
1186
1187 case OP_LODSB:
1188 case OP_LODSWD:
1189 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1190 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1191 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1192 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1193 break;
1194
1195 case OP_CMP:
1196 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1197 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1198 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1199 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1200 break;
1201
1202 case OP_AND:
1203 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1204 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1205 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1206 break;
1207
1208 case OP_OR:
1209 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1210 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1211 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1212 break;
1213
1214 case OP_XOR:
1215 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1216 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1217 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1218 break;
1219
1220 case OP_TEST:
1221 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1222 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1223 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1224 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1225 break;
1226
1227 case OP_BT:
1228 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1229 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1230 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1231 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1232 break;
1233
1234 case OP_XCHG:
1235 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1236 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1237 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1238 break;
1239
1240
1241 /*
1242 * The instruction isn't supported. Hand it on to ring-3.
1243 */
1244 default:
1245 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1246 rc = VINF_IOM_HC_MMIO_READ_WRITE;
1247 break;
1248 }
1249
1250 /*
1251 * On success advance EIP.
1252 */
1253 if (rc == VINF_SUCCESS)
1254 pCtxCore->rip += cbOp;
1255 else
1256 {
1257 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1258#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1259 switch (rc)
1260 {
1261 case VINF_IOM_HC_MMIO_READ:
1262 case VINF_IOM_HC_MMIO_READ_WRITE:
1263 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1264 break;
1265 case VINF_IOM_HC_MMIO_WRITE:
1266 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1267 break;
1268 }
1269#endif
1270 }
1271
1272 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1273 iomUnlock(pVM);
1274 return rc;
1275}
1276
1277/**
1278 * \#PF Handler callback for MMIO ranges.
1279 *
1280 * @returns VBox status code (appropriate for GC return).
1281 * @param pVM VM Handle.
1282 * @param uErrorCode CPU Error code.
1283 * @param pCtxCore Trap register frame.
1284 * @param pvFault The fault address (cr2).
1285 * @param GCPhysFault The GC physical address corresponding to pvFault.
1286 * @param pvUser Pointer to the MMIO ring-3 range entry.
1287 */
1288VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1289{
1290 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1291 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1292 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1293 return VBOXSTRICTRC_VAL(rcStrict);
1294}
1295
1296/**
1297 * Physical access handler for MMIO ranges.
1298 *
1299 * @returns VBox status code (appropriate for GC return).
1300 * @param pVM VM Handle.
1301 * @param uErrorCode CPU Error code.
1302 * @param pCtxCore Trap register frame.
1303 * @param GCPhysFault The GC physical address.
1304 */
1305VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1306{
1307 int rc2 = iomLock(pVM);
1308#ifndef IN_RING3
1309 if (rc2 == VERR_SEM_BUSY)
1310 return VINF_IOM_HC_MMIO_READ_WRITE;
1311#endif
1312 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMMIOGetRange(pVM, GCPhysFault));
1313 iomUnlock(pVM);
1314 return VBOXSTRICTRC_VAL(rcStrict);
1315}
1316
1317#ifdef IN_RING3
1318/**
1319 * \#PF Handler callback for MMIO ranges.
1320 *
1321 * @returns VINF_SUCCESS if the handler have carried out the operation.
1322 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1323 * @param pVM VM Handle.
1324 * @param GCPhys The physical address the guest is writing to.
1325 * @param pvPhys The HC mapping of that address.
1326 * @param pvBuf What the guest is reading/writing.
1327 * @param cbBuf How much it's reading/writing.
1328 * @param enmAccessType The access type.
1329 * @param pvUser Pointer to the MMIO range entry.
1330 */
1331DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1332{
1333 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1334 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1335
1336 /* Take the IOM lock before performing any MMIO. */
1337 int rc = iomLock(pVM);
1338 AssertRC(rc);
1339
1340 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1341
1342 Assert(pRange);
1343 Assert(pRange == iomMMIOGetRange(pVM, GCPhysFault));
1344 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1345 * lock for most of the code, provided that we retake the lock while
1346 * deregistering PIOMMMIORANGE to deal with remapping/access races
1347 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1348 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1349
1350 if (enmAccessType == PGMACCESSTYPE_READ)
1351 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1352 else
1353 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1354
1355 AssertRC(rc);
1356 iomUnlock(pVM);
1357 return rc;
1358}
1359#endif /* IN_RING3 */
1360
1361/**
1362 * Reads a MMIO register.
1363 *
1364 * @returns VBox status code.
1365 *
1366 * @param pVM VM handle.
1367 * @param GCPhys The physical address to read.
1368 * @param pu32Value Where to store the value read.
1369 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1370 */
1371VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1372{
1373 /* Take the IOM lock before performing any MMIO. */
1374 int rc = iomLock(pVM);
1375#ifndef IN_RING3
1376 if (rc == VERR_SEM_BUSY)
1377 return VINF_IOM_HC_MMIO_WRITE;
1378#endif
1379 AssertRC(rc);
1380#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1381 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1382#endif
1383
1384 /*
1385 * Lookup the current context range node and statistics.
1386 */
1387 PIOMMMIORANGE pRange = iomMMIOGetRange(pVM, GCPhys);
1388 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1389 if (!pRange)
1390 {
1391 iomUnlock(pVM);
1392 return VERR_INTERNAL_ERROR;
1393 }
1394 /** @todo implement per-device locks for MMIO access. */
1395 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1396#ifdef VBOX_WITH_STATISTICS
1397 PIOMMMIOSTATS pStats = iomMMIOGetStats(pVM, GCPhys, pRange);
1398 if (!pStats)
1399 {
1400 iomUnlock(pVM);
1401# ifdef IN_RING3
1402 return VERR_NO_MEMORY;
1403# else
1404 return VINF_IOM_HC_MMIO_READ;
1405# endif
1406 }
1407 STAM_COUNTER_INC(&pStats->Accesses);
1408#endif /* VBOX_WITH_STATISTICS */
1409
1410 if (pRange->CTX_SUFF(pfnReadCallback))
1411 {
1412 /*
1413 * Perform the read and deal with the result.
1414 */
1415 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1416 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1417 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1418 switch (rc)
1419 {
1420 case VINF_SUCCESS:
1421 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1422 iomUnlock(pVM);
1423 return rc;
1424#ifndef IN_RING3
1425 case VINF_IOM_HC_MMIO_READ:
1426 case VINF_IOM_HC_MMIO_READ_WRITE:
1427 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1428#endif
1429 default:
1430 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1431 iomUnlock(pVM);
1432 return rc;
1433
1434 case VINF_IOM_MMIO_UNUSED_00:
1435 switch (cbValue)
1436 {
1437 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1438 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1439 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1440 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1441 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1442 }
1443 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1444 iomUnlock(pVM);
1445 return VINF_SUCCESS;
1446
1447 case VINF_IOM_MMIO_UNUSED_FF:
1448 switch (cbValue)
1449 {
1450 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1451 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1452 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1453 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1454 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1455 }
1456 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1457 iomUnlock(pVM);
1458 return VINF_SUCCESS;
1459 }
1460 }
1461#ifndef IN_RING3
1462 if (pRange->pfnReadCallbackR3)
1463 {
1464 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1465 iomUnlock(pVM);
1466 return VINF_IOM_HC_MMIO_READ;
1467 }
1468#endif
1469
1470 /*
1471 * Lookup the ring-3 range.
1472 */
1473 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1474 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1475 /* Unassigned memory; this is actually not supposed to happen. */
1476 switch (cbValue)
1477 {
1478 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1479 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1480 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1481 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1482 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1483 }
1484 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1485 iomUnlock(pVM);
1486 return VINF_SUCCESS;
1487}
1488
1489
1490/**
1491 * Writes to a MMIO register.
1492 *
1493 * @returns VBox status code.
1494 *
1495 * @param pVM VM handle.
1496 * @param GCPhys The physical address to write to.
1497 * @param u32Value The value to write.
1498 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1499 */
1500VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1501{
1502 /* Take the IOM lock before performing any MMIO. */
1503 int rc = iomLock(pVM);
1504#ifndef IN_RING3
1505 if (rc == VERR_SEM_BUSY)
1506 return VINF_IOM_HC_MMIO_WRITE;
1507#endif
1508 AssertRC(rc);
1509#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1510 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1511#endif
1512
1513 /*
1514 * Lookup the current context range node.
1515 */
1516 PIOMMMIORANGE pRange = iomMMIOGetRange(pVM, GCPhys);
1517 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1518 if (!pRange)
1519 {
1520 iomUnlock(pVM);
1521 return VERR_INTERNAL_ERROR;
1522 }
1523 /** @todo implement per-device locks for MMIO access. */
1524 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1525#ifdef VBOX_WITH_STATISTICS
1526 PIOMMMIOSTATS pStats = iomMMIOGetStats(pVM, GCPhys, pRange);
1527 if (!pStats)
1528 {
1529 iomUnlock(pVM);
1530# ifdef IN_RING3
1531 return VERR_NO_MEMORY;
1532# else
1533 return VINF_IOM_HC_MMIO_WRITE;
1534# endif
1535 }
1536 STAM_COUNTER_INC(&pStats->Accesses);
1537#endif /* VBOX_WITH_STATISTICS */
1538
1539 /*
1540 * Perform the write if there's a write handler. R0/GC may have
1541 * to defer it to ring-3.
1542 */
1543 if (pRange->CTX_SUFF(pfnWriteCallback))
1544 {
1545 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1546 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue);
1547 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1548#ifndef IN_RING3
1549 if ( rc == VINF_IOM_HC_MMIO_WRITE
1550 || rc == VINF_IOM_HC_MMIO_READ_WRITE)
1551 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1552#endif
1553 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1554 iomUnlock(pVM);
1555 return rc;
1556 }
1557#ifndef IN_RING3
1558 if (pRange->pfnWriteCallbackR3)
1559 {
1560 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1561 iomUnlock(pVM);
1562 return VINF_IOM_HC_MMIO_WRITE;
1563 }
1564#endif
1565
1566 /*
1567 * No write handler, nothing to do.
1568 */
1569 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1570 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1571 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1572 iomUnlock(pVM);
1573 return VINF_SUCCESS;
1574}
1575
1576/**
1577 * [REP*] INSB/INSW/INSD
1578 * ES:EDI,DX[,ECX]
1579 *
1580 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1581 *
1582 * @returns Strict VBox status code. Informational status codes other than the one documented
1583 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1584 * @retval VINF_SUCCESS Success.
1585 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1586 * status code must be passed on to EM.
1587 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1588 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1589 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1590 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1591 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1592 *
1593 * @param pVM The virtual machine.
1594 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1595 * @param uPort IO Port
1596 * @param uPrefix IO instruction prefix
1597 * @param cbTransfer Size of transfer unit
1598 */
1599VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1600{
1601 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1602
1603 /*
1604 * We do not support REPNE or decrementing destination
1605 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1606 */
1607 if ( (uPrefix & PREFIX_REPNE)
1608 || pRegFrame->eflags.Bits.u1DF)
1609 return VINF_EM_RAW_EMULATE_INSTR;
1610
1611 PVMCPU pVCpu = VMMGetCpu(pVM);
1612
1613 /*
1614 * Get bytes/words/dwords count to transfer.
1615 */
1616 RTGCUINTREG cTransfers = 1;
1617 if (uPrefix & PREFIX_REP)
1618 {
1619#ifndef IN_RC
1620 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1621 && pRegFrame->rcx >= _4G)
1622 return VINF_EM_RAW_EMULATE_INSTR;
1623#endif
1624 cTransfers = pRegFrame->ecx;
1625
1626 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1627 cTransfers &= 0xffff;
1628
1629 if (!cTransfers)
1630 return VINF_SUCCESS;
1631 }
1632
1633 /* Convert destination address es:edi. */
1634 RTGCPTR GCPtrDst;
1635 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1636 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1637 &GCPtrDst);
1638 if (RT_FAILURE(rc2))
1639 {
1640 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
1641 return VINF_EM_RAW_EMULATE_INSTR;
1642 }
1643
1644 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1645 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1646
1647 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1648 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1649 if (rc2 != VINF_SUCCESS)
1650 {
1651 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
1652 return VINF_EM_RAW_EMULATE_INSTR;
1653 }
1654
1655 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1656 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1657 if (cTransfers > 1)
1658 {
1659 /* If the device supports string transfers, ask it to do as
1660 * much as it wants. The rest is done with single-word transfers. */
1661 const RTGCUINTREG cTransfersOrg = cTransfers;
1662 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1663 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1664 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1665 }
1666
1667#ifdef IN_RC
1668 MMGCRamRegisterTrapHandler(pVM);
1669#endif
1670 while (cTransfers && rcStrict == VINF_SUCCESS)
1671 {
1672 uint32_t u32Value;
1673 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1674 if (!IOM_SUCCESS(rcStrict))
1675 break;
1676 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1677 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1678 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1679 pRegFrame->rdi += cbTransfer;
1680 cTransfers--;
1681 }
1682#ifdef IN_RC
1683 MMGCRamDeregisterTrapHandler(pVM);
1684#endif
1685
1686 /* Update ecx on exit. */
1687 if (uPrefix & PREFIX_REP)
1688 pRegFrame->ecx = cTransfers;
1689
1690 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1691 return rcStrict;
1692}
1693
1694
1695/**
1696 * [REP*] INSB/INSW/INSD
1697 * ES:EDI,DX[,ECX]
1698 *
1699 * @returns Strict VBox status code. Informational status codes other than the one documented
1700 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1701 * @retval VINF_SUCCESS Success.
1702 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1703 * status code must be passed on to EM.
1704 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1705 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1706 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1707 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1708 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1709 *
1710 * @param pVM The virtual machine.
1711 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1712 * @param pCpu Disassembler CPU state.
1713 */
1714VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1715{
1716 /*
1717 * Get port number directly from the register (no need to bother the
1718 * disassembler). And get the I/O register size from the opcode / prefix.
1719 */
1720 RTIOPORT Port = pRegFrame->edx & 0xffff;
1721 unsigned cb = 0;
1722 if (pCpu->pCurInstr->opcode == OP_INSB)
1723 cb = 1;
1724 else
1725 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1726
1727 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1728 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1729 {
1730 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1731 return rcStrict;
1732 }
1733
1734 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1735}
1736
1737
1738/**
1739 * [REP*] OUTSB/OUTSW/OUTSD
1740 * DS:ESI,DX[,ECX]
1741 *
1742 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1743 *
1744 * @returns Strict VBox status code. Informational status codes other than the one documented
1745 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1746 * @retval VINF_SUCCESS Success.
1747 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1748 * status code must be passed on to EM.
1749 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1750 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1751 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1752 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1753 *
1754 * @param pVM The virtual machine.
1755 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1756 * @param uPort IO Port
1757 * @param uPrefix IO instruction prefix
1758 * @param cbTransfer Size of transfer unit
1759 */
1760VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1761{
1762 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1763
1764 /*
1765 * We do not support segment prefixes, REPNE or
1766 * decrementing source pointer.
1767 */
1768 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1769 || pRegFrame->eflags.Bits.u1DF)
1770 return VINF_EM_RAW_EMULATE_INSTR;
1771
1772 PVMCPU pVCpu = VMMGetCpu(pVM);
1773
1774 /*
1775 * Get bytes/words/dwords count to transfer.
1776 */
1777 RTGCUINTREG cTransfers = 1;
1778 if (uPrefix & PREFIX_REP)
1779 {
1780#ifndef IN_RC
1781 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1782 && pRegFrame->rcx >= _4G)
1783 return VINF_EM_RAW_EMULATE_INSTR;
1784#endif
1785 cTransfers = pRegFrame->ecx;
1786 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1787 cTransfers &= 0xffff;
1788
1789 if (!cTransfers)
1790 return VINF_SUCCESS;
1791 }
1792
1793 /* Convert source address ds:esi. */
1794 RTGCPTR GCPtrSrc;
1795 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1796 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1797 &GCPtrSrc);
1798 if (RT_FAILURE(rc2))
1799 {
1800 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
1801 return VINF_EM_RAW_EMULATE_INSTR;
1802 }
1803
1804 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1805 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1806 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1807 (cpl == 3) ? X86_PTE_US : 0);
1808 if (rc2 != VINF_SUCCESS)
1809 {
1810 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
1811 return VINF_EM_RAW_EMULATE_INSTR;
1812 }
1813
1814 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1815 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1816 if (cTransfers > 1)
1817 {
1818 /*
1819 * If the device supports string transfers, ask it to do as
1820 * much as it wants. The rest is done with single-word transfers.
1821 */
1822 const RTGCUINTREG cTransfersOrg = cTransfers;
1823 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1824 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1825 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1826 }
1827
1828#ifdef IN_RC
1829 MMGCRamRegisterTrapHandler(pVM);
1830#endif
1831
1832 while (cTransfers && rcStrict == VINF_SUCCESS)
1833 {
1834 uint32_t u32Value = 0;
1835 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
1836 if (rcStrict != VINF_SUCCESS)
1837 break;
1838 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1839 if (!IOM_SUCCESS(rcStrict))
1840 break;
1841 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1842 pRegFrame->rsi += cbTransfer;
1843 cTransfers--;
1844 }
1845
1846#ifdef IN_RC
1847 MMGCRamDeregisterTrapHandler(pVM);
1848#endif
1849
1850 /* Update ecx on exit. */
1851 if (uPrefix & PREFIX_REP)
1852 pRegFrame->ecx = cTransfers;
1853
1854 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1855 return rcStrict;
1856}
1857
1858
1859/**
1860 * [REP*] OUTSB/OUTSW/OUTSD
1861 * DS:ESI,DX[,ECX]
1862 *
1863 * @returns Strict VBox status code. Informational status codes other than the one documented
1864 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1865 * @retval VINF_SUCCESS Success.
1866 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1867 * status code must be passed on to EM.
1868 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1869 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1870 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1871 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1872 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1873 *
1874 * @param pVM The virtual machine.
1875 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1876 * @param pCpu Disassembler CPU state.
1877 */
1878VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1879{
1880 /*
1881 * Get port number from the first parameter.
1882 * And get the I/O register size from the opcode / prefix.
1883 */
1884 uint64_t Port = 0;
1885 unsigned cb = 0;
1886 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1887 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1888 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1889 cb = 1;
1890 else
1891 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1892
1893 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1894 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1895 {
1896 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1897 return rcStrict;
1898 }
1899
1900 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1901}
1902
1903#ifndef IN_RC
1904
1905/**
1906 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1907 *
1908 * (This is a special optimization used by the VGA device.)
1909 *
1910 * @returns VBox status code.
1911 *
1912 * @param pVM The virtual machine.
1913 * @param GCPhys The address of the MMIO page to be changed.
1914 * @param GCPhysRemapped The address of the MMIO2 page.
1915 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1916 * for the time being.
1917 */
1918VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1919{
1920 /* Currently only called from the VGA device during MMIO. */
1921 Assert(IOMIsLockOwner(pVM));
1922 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1923
1924 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1925
1926 PVMCPU pVCpu = VMMGetCpu(pVM);
1927
1928 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1929 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1930 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1931 && !HWACCMIsNestedPagingActive(pVM)))
1932 return VINF_SUCCESS; /* ignore */
1933
1934 /*
1935 * Lookup the context range node the page belongs to.
1936 */
1937 PIOMMMIORANGE pRange = iomMMIOGetRange(pVM, GCPhys);
1938 AssertMsgReturn(pRange,
1939 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1940
1941 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1942 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1943
1944 /*
1945 * Do the aliasing; page align the addresses since PGM is picky.
1946 */
1947 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1948 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1949
1950 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1951 AssertRCReturn(rc, rc);
1952
1953 /*
1954 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1955 * can simply prefetch it.
1956 *
1957 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1958 */
1959#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1960# ifdef VBOX_STRICT
1961 uint64_t fFlags;
1962 RTHCPHYS HCPhys;
1963 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1964 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1965# endif
1966#endif
1967 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1968 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1969 return VINF_SUCCESS;
1970}
1971
1972
1973/**
1974 * Mapping a HC page in place of an MMIO page for direct access.
1975 *
1976 * (This is a special optimization used by the APIC in the VT-x case.)
1977 *
1978 * @returns VBox status code.
1979 *
1980 * @param pVM The virtual machine.
1981 * @param GCPhys The address of the MMIO page to be changed.
1982 * @param HCPhys The address of the host physical page.
1983 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1984 * for the time being.
1985 */
1986VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1987{
1988 /* Currently only called from VT-x code during a page fault. */
1989 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1990
1991 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1992 Assert(HWACCMIsEnabled(pVM));
1993
1994 PVMCPU pVCpu = VMMGetCpu(pVM);
1995
1996 /*
1997 * Lookup the context range node the page belongs to.
1998 */
1999#ifdef VBOX_STRICT
2000 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2001 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2002 AssertMsgReturn(pRange,
2003 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2004 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2005 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2006#endif
2007
2008 /*
2009 * Do the aliasing; page align the addresses since PGM is picky.
2010 */
2011 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2012 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2013
2014 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2015 AssertRCReturn(rc, rc);
2016
2017 /*
2018 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2019 * can simply prefetch it.
2020 *
2021 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2022 */
2023 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2024 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2025 return VINF_SUCCESS;
2026}
2027
2028
2029/**
2030 * Reset a previously modified MMIO region; restore the access flags.
2031 *
2032 * @returns VBox status code.
2033 *
2034 * @param pVM The virtual machine.
2035 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2036 */
2037VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2038{
2039 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2040
2041 PVMCPU pVCpu = VMMGetCpu(pVM);
2042
2043 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2044 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2045 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2046 && !HWACCMIsNestedPagingActive(pVM)))
2047 return VINF_SUCCESS; /* ignore */
2048
2049 /*
2050 * Lookup the context range node the page belongs to.
2051 */
2052#ifdef VBOX_STRICT
2053 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2054 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2055 AssertMsgReturn(pRange,
2056 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2057 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2058 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2059#endif
2060
2061 /*
2062 * Call PGM to do the job work.
2063 *
2064 * After the call, all the pages should be non-present... unless there is
2065 * a page pool flush pending (unlikely).
2066 */
2067 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2068 AssertRC(rc);
2069
2070#ifdef VBOX_STRICT
2071 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2072 {
2073 uint32_t cb = pRange->cb;
2074 GCPhys = pRange->GCPhys;
2075 while (cb)
2076 {
2077 uint64_t fFlags;
2078 RTHCPHYS HCPhys;
2079 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2080 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2081 cb -= PAGE_SIZE;
2082 GCPhys += PAGE_SIZE;
2083 }
2084 }
2085#endif
2086 return rc;
2087}
2088
2089#endif /* !IN_RC */
2090
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette