VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 37452

Last change on this file since 37452 was 37452, checked in by vboxsync, 14 years ago

IOM,PDMCritSect: Extended PDMCritSectEnter to handle rcBusy=VINF_SUCCESS as a request to call ring-3 to acquire a busy lock. Implemented device level locking in the MMIO code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 74.6 KB
Line 
1/* $Id: IOMAllMMIO.cpp 37452 2011-06-14 18:13:48Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hwaccm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0, /* 5 - invalid */
66 ~0, /* 6 - invalid */
67 ~0, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Wrapper which does the write and updates range statistics when such are enabled.
79 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
80 */
81static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
82{
83#ifdef VBOX_WITH_STATISTICS
84 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
85 Assert(pStats);
86#endif
87
88 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
89 int rc;
90 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
91 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
92 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
93 else
94 rc = VINF_SUCCESS;
95 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
96 STAM_COUNTER_INC(&pStats->Accesses);
97 return rc;
98}
99
100
101/**
102 * Wrapper which does the read and updates range statistics when such are enabled.
103 */
104DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
105{
106#ifdef VBOX_WITH_STATISTICS
107 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
108 Assert(pStats);
109#endif
110
111 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
112 int rc;
113 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
114 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
115 else
116 rc = VINF_IOM_MMIO_UNUSED_FF;
117 if (rc != VINF_SUCCESS)
118 {
119 switch (rc)
120 {
121 case VINF_IOM_MMIO_UNUSED_FF:
122 switch (cbValue)
123 {
124 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
125 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
126 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
127 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
128 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
129 }
130 rc = VINF_SUCCESS;
131 break;
132
133 case VINF_IOM_MMIO_UNUSED_00:
134 switch (cbValue)
135 {
136 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
137 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
138 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
139 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
140 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
141 }
142 rc = VINF_SUCCESS;
143 break;
144 }
145 }
146 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
147 STAM_COUNTER_INC(&pStats->Accesses);
148 return rc;
149}
150
151
152/**
153 * Internal - statistics only.
154 */
155DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
156{
157#ifdef VBOX_WITH_STATISTICS
158 switch (cb)
159 {
160 case 1:
161 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
162 break;
163 case 2:
164 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
165 break;
166 case 4:
167 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
168 break;
169 case 8:
170 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
171 break;
172 default:
173 /* No way. */
174 AssertMsgFailed(("Invalid data length %d\n", cb));
175 break;
176 }
177#else
178 NOREF(pVM); NOREF(cb);
179#endif
180}
181
182
183/**
184 * MOV reg, mem (read)
185 * MOVZX reg, mem (read)
186 * MOVSX reg, mem (read)
187 *
188 * @returns VBox status code.
189 *
190 * @param pVM The virtual machine.
191 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
192 * @param pCpu Disassembler CPU state.
193 * @param pRange Pointer MMIO range.
194 * @param GCPhysFault The GC physical address corresponding to pvFault.
195 */
196static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
197{
198 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
199
200 /*
201 * Get the data size from parameter 2,
202 * and call the handler function to get the data.
203 */
204 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
205 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
206
207 uint64_t u64Data = 0;
208 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
209 if (rc == VINF_SUCCESS)
210 {
211 /*
212 * Do sign extension for MOVSX.
213 */
214 /** @todo checkup MOVSX implementation! */
215 if (pCpu->pCurInstr->opcode == OP_MOVSX)
216 {
217 if (cb == 1)
218 {
219 /* DWORD <- BYTE */
220 int64_t iData = (int8_t)u64Data;
221 u64Data = (uint64_t)iData;
222 }
223 else
224 {
225 /* DWORD <- WORD */
226 int64_t iData = (int16_t)u64Data;
227 u64Data = (uint64_t)iData;
228 }
229 }
230
231 /*
232 * Store the result to register (parameter 1).
233 */
234 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
235 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
236 }
237
238 if (rc == VINF_SUCCESS)
239 iomMMIOStatLength(pVM, cb);
240 return rc;
241}
242
243
244/**
245 * MOV mem, reg|imm (write)
246 *
247 * @returns VBox status code.
248 *
249 * @param pVM The virtual machine.
250 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
251 * @param pCpu Disassembler CPU state.
252 * @param pRange Pointer MMIO range.
253 * @param GCPhysFault The GC physical address corresponding to pvFault.
254 */
255static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
256{
257 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
258
259 /*
260 * Get data to write from second parameter,
261 * and call the callback to write it.
262 */
263 unsigned cb = 0;
264 uint64_t u64Data = 0;
265 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
266 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
267
268 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
269 if (rc == VINF_SUCCESS)
270 iomMMIOStatLength(pVM, cb);
271 return rc;
272}
273
274
275/** Wrapper for reading virtual memory. */
276DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
277{
278 /* Note: This will fail in R0 or RC if it hits an access handler. That
279 isn't a problem though since the operation can be restarted in REM. */
280#ifdef IN_RC
281 return MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
282#else
283 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
284#endif
285}
286
287
288/** Wrapper for writing virtual memory. */
289DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
290{
291 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
292 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
293 * as well since we're not behind the pgm lock and handler may change between calls.
294 *
295 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
296 * the state of some shadowed structures. */
297#if defined(IN_RING0) || defined(IN_RC)
298 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
299#else
300 NOREF(pCtxCore);
301 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
302#endif
303}
304
305
306#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working */
307/**
308 * [REP] MOVSB
309 * [REP] MOVSW
310 * [REP] MOVSD
311 *
312 * Restricted implementation.
313 *
314 *
315 * @returns VBox status code.
316 *
317 * @param pVM The virtual machine.
318 * @param uErrorCode CPU Error code.
319 * @param pRegFrame Trap register frame.
320 * @param GCPhysFault The GC physical address corresponding to pvFault.
321 * @param pCpu Disassembler CPU state.
322 * @param pRange Pointer MMIO range.
323 * @param ppStat Which sub-sample to attribute this call to.
324 */
325static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
326 PSTAMPROFILE *ppStat)
327{
328 /*
329 * We do not support segment prefixes or REPNE.
330 */
331 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
332 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
333
334 PVMCPU pVCpu = VMMGetCpu(pVM);
335
336 /*
337 * Get bytes/words/dwords/qword count to copy.
338 */
339 uint32_t cTransfers = 1;
340 if (pCpu->prefix & PREFIX_REP)
341 {
342#ifndef IN_RC
343 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
344 && pRegFrame->rcx >= _4G)
345 return VINF_EM_RAW_EMULATE_INSTR;
346#endif
347
348 cTransfers = pRegFrame->ecx;
349 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
350 cTransfers &= 0xffff;
351
352 if (!cTransfers)
353 return VINF_SUCCESS;
354 }
355
356 /* Get the current privilege level. */
357 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
358
359 /*
360 * Get data size.
361 */
362 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
363 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
364 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
365
366#ifdef VBOX_WITH_STATISTICS
367 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
368 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
369#endif
370
371/** @todo re-evaluate on page boundaries. */
372
373 RTGCPHYS Phys = GCPhysFault;
374 int rc;
375 if (fWriteAccess)
376 {
377 /*
378 * Write operation: [Mem] -> [MMIO]
379 * ds:esi (Virt Src) -> es:edi (Phys Dst)
380 */
381 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
382
383 /* Check callback. */
384 if (!pRange->CTX_SUFF(pfnWriteCallback))
385 return VINF_IOM_HC_MMIO_WRITE;
386
387 /* Convert source address ds:esi. */
388 RTGCUINTPTR pu8Virt;
389 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
390 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
391 (PRTGCPTR)&pu8Virt);
392 if (RT_SUCCESS(rc))
393 {
394
395 /* Access verification first; we currently can't recover properly from traps inside this instruction */
396 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
397 if (rc != VINF_SUCCESS)
398 {
399 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
400 return VINF_EM_RAW_EMULATE_INSTR;
401 }
402
403#ifdef IN_RC
404 MMGCRamRegisterTrapHandler(pVM);
405#endif
406
407 /* copy loop. */
408 while (cTransfers)
409 {
410 uint32_t u32Data = 0;
411 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
412 if (rc != VINF_SUCCESS)
413 break;
414 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
415 if (rc != VINF_SUCCESS)
416 break;
417
418 pu8Virt += offIncrement;
419 Phys += offIncrement;
420 pRegFrame->rsi += offIncrement;
421 pRegFrame->rdi += offIncrement;
422 cTransfers--;
423 }
424#ifdef IN_RC
425 MMGCRamDeregisterTrapHandler(pVM);
426#endif
427 /* Update ecx. */
428 if (pCpu->prefix & PREFIX_REP)
429 pRegFrame->ecx = cTransfers;
430 }
431 else
432 rc = VINF_IOM_HC_MMIO_READ_WRITE;
433 }
434 else
435 {
436 /*
437 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
438 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
439 */
440 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
441
442 /* Check callback. */
443 if (!pRange->CTX_SUFF(pfnReadCallback))
444 return VINF_IOM_HC_MMIO_READ;
445
446 /* Convert destination address. */
447 RTGCUINTPTR pu8Virt;
448 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
449 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
450 (RTGCPTR *)&pu8Virt);
451 if (RT_FAILURE(rc))
452 return VINF_IOM_HC_MMIO_READ;
453
454 /* Check if destination address is MMIO. */
455 PIOMMMIORANGE pMMIODst;
456 RTGCPHYS PhysDst;
457 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
458 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
459 if ( RT_SUCCESS(rc)
460 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
461 {
462 /** @todo implement per-device locks for MMIO access. */
463 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
464
465 /*
466 * Extra: [MMIO] -> [MMIO]
467 */
468 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
469 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
470 {
471 iomMmioReleaseRange(pVM, pRange);
472 return VINF_IOM_HC_MMIO_READ_WRITE;
473 }
474
475 /* copy loop. */
476 while (cTransfers)
477 {
478 uint32_t u32Data;
479 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
480 if (rc != VINF_SUCCESS)
481 break;
482 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
483 if (rc != VINF_SUCCESS)
484 break;
485
486 Phys += offIncrement;
487 PhysDst += offIncrement;
488 pRegFrame->rsi += offIncrement;
489 pRegFrame->rdi += offIncrement;
490 cTransfers--;
491 }
492 iomMmioReleaseRange(pVM, pRange);
493 }
494 else
495 {
496 /*
497 * Normal: [MMIO] -> [Mem]
498 */
499 /* Access verification first; we currently can't recover properly from traps inside this instruction */
500 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
501 if (rc != VINF_SUCCESS)
502 {
503 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
504 return VINF_EM_RAW_EMULATE_INSTR;
505 }
506
507 /* copy loop. */
508#ifdef IN_RC
509 MMGCRamRegisterTrapHandler(pVM);
510#endif
511 while (cTransfers)
512 {
513 uint32_t u32Data;
514 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
515 if (rc != VINF_SUCCESS)
516 break;
517 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
518 if (rc != VINF_SUCCESS)
519 {
520 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
521 break;
522 }
523
524 pu8Virt += offIncrement;
525 Phys += offIncrement;
526 pRegFrame->rsi += offIncrement;
527 pRegFrame->rdi += offIncrement;
528 cTransfers--;
529 }
530#ifdef IN_RC
531 MMGCRamDeregisterTrapHandler(pVM);
532#endif
533 }
534
535 /* Update ecx on exit. */
536 if (pCpu->prefix & PREFIX_REP)
537 pRegFrame->ecx = cTransfers;
538 }
539
540 /* work statistics. */
541 if (rc == VINF_SUCCESS)
542 iomMMIOStatLength(pVM, cb);
543 NOREF(ppStat);
544 return rc;
545}
546#endif /* IOM_WITH_MOVS_SUPPORT */
547
548
549/**
550 * [REP] STOSB
551 * [REP] STOSW
552 * [REP] STOSD
553 *
554 * Restricted implementation.
555 *
556 *
557 * @returns VBox status code.
558 *
559 * @param pVM The virtual machine.
560 * @param pRegFrame Trap register frame.
561 * @param GCPhysFault The GC physical address corresponding to pvFault.
562 * @param pCpu Disassembler CPU state.
563 * @param pRange Pointer MMIO range.
564 */
565static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
566{
567 /*
568 * We do not support segment prefixes or REPNE..
569 */
570 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
571 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
572
573 /*
574 * Get bytes/words/dwords count to copy.
575 */
576 uint32_t cTransfers = 1;
577 if (pCpu->prefix & PREFIX_REP)
578 {
579#ifndef IN_RC
580 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM), pRegFrame)
581 && pRegFrame->rcx >= _4G)
582 return VINF_EM_RAW_EMULATE_INSTR;
583#endif
584
585 cTransfers = pRegFrame->ecx;
586 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
587 cTransfers &= 0xffff;
588
589 if (!cTransfers)
590 return VINF_SUCCESS;
591 }
592
593/** @todo r=bird: bounds checks! */
594
595 /*
596 * Get data size.
597 */
598 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
599 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
600 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
601
602#ifdef VBOX_WITH_STATISTICS
603 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
604 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
605#endif
606
607
608 RTGCPHYS Phys = GCPhysFault;
609 uint32_t u32Data = pRegFrame->eax;
610 int rc;
611 if (pRange->CTX_SUFF(pfnFillCallback))
612 {
613 /*
614 * Use the fill callback.
615 */
616 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
617 if (offIncrement > 0)
618 {
619 /* addr++ variant. */
620 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
621 if (rc == VINF_SUCCESS)
622 {
623 /* Update registers. */
624 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
625 if (pCpu->prefix & PREFIX_REP)
626 pRegFrame->ecx = 0;
627 }
628 }
629 else
630 {
631 /* addr-- variant. */
632 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)), u32Data, cb, cTransfers);
633 if (rc == VINF_SUCCESS)
634 {
635 /* Update registers. */
636 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
637 if (pCpu->prefix & PREFIX_REP)
638 pRegFrame->ecx = 0;
639 }
640 }
641 }
642 else
643 {
644 /*
645 * Use the write callback.
646 */
647 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
648
649 /* fill loop. */
650 do
651 {
652 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
653 if (rc != VINF_SUCCESS)
654 break;
655
656 Phys += offIncrement;
657 pRegFrame->rdi += offIncrement;
658 cTransfers--;
659 } while (cTransfers);
660
661 /* Update ecx on exit. */
662 if (pCpu->prefix & PREFIX_REP)
663 pRegFrame->ecx = cTransfers;
664 }
665
666 /*
667 * Work statistics and return.
668 */
669 if (rc == VINF_SUCCESS)
670 iomMMIOStatLength(pVM, cb);
671 return rc;
672}
673
674
675/**
676 * [REP] LODSB
677 * [REP] LODSW
678 * [REP] LODSD
679 *
680 * Restricted implementation.
681 *
682 *
683 * @returns VBox status code.
684 *
685 * @param pVM The virtual machine.
686 * @param pRegFrame Trap register frame.
687 * @param GCPhysFault The GC physical address corresponding to pvFault.
688 * @param pCpu Disassembler CPU state.
689 * @param pRange Pointer MMIO range.
690 */
691static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
692{
693 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
694
695 /*
696 * We do not support segment prefixes or REP*.
697 */
698 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
699 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
700
701 /*
702 * Get data size.
703 */
704 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
705 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
706 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
707
708 /*
709 * Perform read.
710 */
711 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
712 if (rc == VINF_SUCCESS)
713 pRegFrame->rsi += offIncrement;
714
715 /*
716 * Work statistics and return.
717 */
718 if (rc == VINF_SUCCESS)
719 iomMMIOStatLength(pVM, cb);
720 return rc;
721}
722
723
724/**
725 * CMP [MMIO], reg|imm
726 * CMP reg|imm, [MMIO]
727 *
728 * Restricted implementation.
729 *
730 *
731 * @returns VBox status code.
732 *
733 * @param pVM The virtual machine.
734 * @param pRegFrame Trap register frame.
735 * @param GCPhysFault The GC physical address corresponding to pvFault.
736 * @param pCpu Disassembler CPU state.
737 * @param pRange Pointer MMIO range.
738 */
739static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
740{
741 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
742
743 /*
744 * Get the operands.
745 */
746 unsigned cb = 0;
747 uint64_t uData1 = 0;
748 uint64_t uData2 = 0;
749 int rc;
750 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
751 /* cmp reg, [MMIO]. */
752 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
753 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
754 /* cmp [MMIO], reg|imm. */
755 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
756 else
757 {
758 AssertMsgFailed(("Disassember CMP problem..\n"));
759 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
760 }
761
762 if (rc == VINF_SUCCESS)
763 {
764#if HC_ARCH_BITS == 32
765 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
766 if (cb > 4)
767 return VINF_IOM_HC_MMIO_READ_WRITE;
768#endif
769 /* Emulate CMP and update guest flags. */
770 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
771 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
772 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
773 iomMMIOStatLength(pVM, cb);
774 }
775
776 return rc;
777}
778
779
780/**
781 * AND [MMIO], reg|imm
782 * AND reg, [MMIO]
783 * OR [MMIO], reg|imm
784 * OR reg, [MMIO]
785 *
786 * Restricted implementation.
787 *
788 *
789 * @returns VBox status code.
790 *
791 * @param pVM The virtual machine.
792 * @param pRegFrame Trap register frame.
793 * @param GCPhysFault The GC physical address corresponding to pvFault.
794 * @param pCpu Disassembler CPU state.
795 * @param pRange Pointer MMIO range.
796 * @param pfnEmulate Instruction emulation function.
797 */
798static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
799{
800 unsigned cb = 0;
801 uint64_t uData1 = 0;
802 uint64_t uData2 = 0;
803 bool fAndWrite;
804 int rc;
805
806#ifdef LOG_ENABLED
807 const char *pszInstr;
808
809 if (pCpu->pCurInstr->opcode == OP_XOR)
810 pszInstr = "Xor";
811 else if (pCpu->pCurInstr->opcode == OP_OR)
812 pszInstr = "Or";
813 else if (pCpu->pCurInstr->opcode == OP_AND)
814 pszInstr = "And";
815 else
816 pszInstr = "OrXorAnd??";
817#endif
818
819 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
820 {
821#if HC_ARCH_BITS == 32
822 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
823 if (cb > 4)
824 return VINF_IOM_HC_MMIO_READ_WRITE;
825#endif
826 /* and reg, [MMIO]. */
827 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
828 fAndWrite = false;
829 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
830 }
831 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
832 {
833#if HC_ARCH_BITS == 32
834 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
835 if (cb > 4)
836 return VINF_IOM_HC_MMIO_READ_WRITE;
837#endif
838 /* and [MMIO], reg|imm. */
839 fAndWrite = true;
840 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
841 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
842 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
843 else
844 rc = VINF_IOM_HC_MMIO_READ_WRITE;
845 }
846 else
847 {
848 AssertMsgFailed(("Disassember AND problem..\n"));
849 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
850 }
851
852 if (rc == VINF_SUCCESS)
853 {
854 /* Emulate AND and update guest flags. */
855 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
856
857 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
858
859 if (fAndWrite)
860 /* Store result to MMIO. */
861 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
862 else
863 {
864 /* Store result to register. */
865 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
866 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
867 }
868 if (rc == VINF_SUCCESS)
869 {
870 /* Update guest's eflags and finish. */
871 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
872 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
873 iomMMIOStatLength(pVM, cb);
874 }
875 }
876
877 return rc;
878}
879
880
881/**
882 * TEST [MMIO], reg|imm
883 * TEST reg, [MMIO]
884 *
885 * Restricted implementation.
886 *
887 *
888 * @returns VBox status code.
889 *
890 * @param pVM The virtual machine.
891 * @param pRegFrame Trap register frame.
892 * @param GCPhysFault The GC physical address corresponding to pvFault.
893 * @param pCpu Disassembler CPU state.
894 * @param pRange Pointer MMIO range.
895 */
896static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
897{
898 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
899
900 unsigned cb = 0;
901 uint64_t uData1 = 0;
902 uint64_t uData2 = 0;
903 int rc;
904
905 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
906 {
907 /* and test, [MMIO]. */
908 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
909 }
910 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
911 {
912 /* test [MMIO], reg|imm. */
913 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
914 }
915 else
916 {
917 AssertMsgFailed(("Disassember TEST problem..\n"));
918 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
919 }
920
921 if (rc == VINF_SUCCESS)
922 {
923#if HC_ARCH_BITS == 32
924 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
925 if (cb > 4)
926 return VINF_IOM_HC_MMIO_READ_WRITE;
927#endif
928
929 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
930 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
931 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
932 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
933 iomMMIOStatLength(pVM, cb);
934 }
935
936 return rc;
937}
938
939
940/**
941 * BT [MMIO], reg|imm
942 *
943 * Restricted implementation.
944 *
945 *
946 * @returns VBox status code.
947 *
948 * @param pVM The virtual machine.
949 * @param pRegFrame Trap register frame.
950 * @param GCPhysFault The GC physical address corresponding to pvFault.
951 * @param pCpu Disassembler CPU state.
952 * @param pRange Pointer MMIO range.
953 */
954static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
955{
956 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
957
958 uint64_t uBit = 0;
959 uint64_t uData = 0;
960 unsigned cbIgnored;
961
962 if (!iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cbIgnored))
963 {
964 AssertMsgFailed(("Disassember BT problem..\n"));
965 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
966 }
967 /* The size of the memory operand only matters here. */
968 unsigned cbData = DISGetParamSize(pCpu, &pCpu->param1);
969
970 /* bt [MMIO], reg|imm. */
971 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
972 if (rc == VINF_SUCCESS)
973 {
974 /* Find the bit inside the faulting address */
975 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
976 iomMMIOStatLength(pVM, cbData);
977 }
978
979 return rc;
980}
981
982/**
983 * XCHG [MMIO], reg
984 * XCHG reg, [MMIO]
985 *
986 * Restricted implementation.
987 *
988 *
989 * @returns VBox status code.
990 *
991 * @param pVM The virtual machine.
992 * @param pRegFrame Trap register frame.
993 * @param GCPhysFault The GC physical address corresponding to pvFault.
994 * @param pCpu Disassembler CPU state.
995 * @param pRange Pointer MMIO range.
996 */
997static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
998{
999 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1000 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1001 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1002 return VINF_IOM_HC_MMIO_READ_WRITE;
1003
1004 int rc;
1005 unsigned cb = 0;
1006 uint64_t uData1 = 0;
1007 uint64_t uData2 = 0;
1008 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
1009 {
1010 /* xchg reg, [MMIO]. */
1011 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1012 if (rc == VINF_SUCCESS)
1013 {
1014 /* Store result to MMIO. */
1015 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1016
1017 if (rc == VINF_SUCCESS)
1018 {
1019 /* Store result to register. */
1020 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
1021 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1022 }
1023 else
1024 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1025 }
1026 else
1027 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1028 }
1029 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1030 {
1031 /* xchg [MMIO], reg. */
1032 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1033 if (rc == VINF_SUCCESS)
1034 {
1035 /* Store result to MMIO. */
1036 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1037 if (rc == VINF_SUCCESS)
1038 {
1039 /* Store result to register. */
1040 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1041 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1042 }
1043 else
1044 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1045 }
1046 else
1047 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1048 }
1049 else
1050 {
1051 AssertMsgFailed(("Disassember XCHG problem..\n"));
1052 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1053 }
1054 return rc;
1055}
1056
1057
1058/**
1059 * \#PF Handler callback for MMIO ranges.
1060 *
1061 * @returns VBox status code (appropriate for GC return).
1062 * @param pVM VM Handle.
1063 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1064 * any error code (the EPT misconfig hack).
1065 * @param pCtxCore Trap register frame.
1066 * @param GCPhysFault The GC physical address corresponding to pvFault.
1067 * @param pvUser Pointer to the MMIO ring-3 range entry.
1068 */
1069static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1070{
1071 /* Take the IOM lock before performing any MMIO. */
1072 int rc = iomLock(pVM);
1073#ifndef IN_RING3
1074 if (rc == VERR_SEM_BUSY)
1075 return VINF_IOM_HC_MMIO_READ_WRITE;
1076#endif
1077 AssertRC(rc);
1078
1079 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1080 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1081 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1082
1083 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1084 Assert(pRange);
1085 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1086
1087#ifdef VBOX_WITH_STATISTICS
1088 /*
1089 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1090 */
1091 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
1092 if (!pStats)
1093 {
1094# ifdef IN_RING3
1095 iomUnlock(pVM);
1096 return VERR_NO_MEMORY;
1097# else
1098 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1099 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1100 iomUnlock(pVM);
1101 return VINF_IOM_HC_MMIO_READ_WRITE;
1102# endif
1103 }
1104#endif
1105
1106#ifndef IN_RING3
1107 /*
1108 * Should we defer the request right away? This isn't usually the case, so
1109 * do the simple test first and the try deal with uErrorCode being N/A.
1110 */
1111 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1112 || !pRange->CTX_SUFF(pfnReadCallback))
1113 && ( uErrorCode == UINT32_MAX
1114 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1115 : uErrorCode & X86_TRAP_PF_RW
1116 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1117 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1118 )
1119 )
1120 )
1121 {
1122 if (uErrorCode & X86_TRAP_PF_RW)
1123 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1124 else
1125 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1126
1127 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1128 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1129 iomUnlock(pVM);
1130 return VINF_IOM_HC_MMIO_READ_WRITE;
1131 }
1132#endif /* !IN_RING3 */
1133
1134 /*
1135 * Retain the range and do locking.
1136 */
1137 iomMmioRetainRange(pRange);
1138 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1139 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect);
1140 if (!pLock)
1141 pLock = &pVM->iom.s.EmtLock;
1142 else
1143 {
1144 iomUnlock(pVM);
1145 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_READ_WRITE);
1146 if (rc != VINF_SUCCESS)
1147 {
1148 iomMmioReleaseRange(pVM, pRange);
1149 return rc;
1150 }
1151 }
1152
1153 /*
1154 * Disassemble the instruction and interpret it.
1155 */
1156 PVMCPU pVCpu = VMMGetCpu(pVM);
1157 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1158 unsigned cbOp;
1159 rc = EMInterpretDisasOne(pVM, pVCpu, pCtxCore, pDis, &cbOp);
1160 AssertRC(rc);
1161 if (RT_FAILURE(rc))
1162 {
1163 iomMmioReleaseRange(pVM, pRange);
1164 PDMCritSectLeave(pLock);
1165 return rc;
1166 }
1167 switch (pDis->pCurInstr->opcode)
1168 {
1169 case OP_MOV:
1170 case OP_MOVZX:
1171 case OP_MOVSX:
1172 {
1173 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1174 AssertMsg(uErrorCode == UINT32_MAX || DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->param1.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags), pDis->param2.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param2.flags), uErrorCode));
1175 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1176 ? uErrorCode & X86_TRAP_PF_RW
1177 : DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags))
1178 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1179 else
1180 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1181 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1182 break;
1183 }
1184
1185
1186#ifdef IOM_WITH_MOVS_SUPPORT
1187 case OP_MOVSB:
1188 case OP_MOVSWD:
1189 {
1190 if (uErrorCode == UINT32_MAX)
1191 rc = VINF_IOM_HC_MMIO_READ_WRITE;
1192 else
1193 {
1194 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1195 PSTAMPROFILE pStat = NULL;
1196 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1197 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1198 }
1199 break;
1200 }
1201#endif
1202
1203 case OP_STOSB:
1204 case OP_STOSWD:
1205 Assert(uErrorCode & X86_TRAP_PF_RW);
1206 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1207 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1208 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1209 break;
1210
1211 case OP_LODSB:
1212 case OP_LODSWD:
1213 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1214 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1215 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1216 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1217 break;
1218
1219 case OP_CMP:
1220 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1221 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1222 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1223 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1224 break;
1225
1226 case OP_AND:
1227 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1228 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1229 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1230 break;
1231
1232 case OP_OR:
1233 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1234 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1235 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1236 break;
1237
1238 case OP_XOR:
1239 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1240 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1241 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1242 break;
1243
1244 case OP_TEST:
1245 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1246 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1247 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1248 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1249 break;
1250
1251 case OP_BT:
1252 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1253 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1254 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1255 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1256 break;
1257
1258 case OP_XCHG:
1259 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1260 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1261 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1262 break;
1263
1264
1265 /*
1266 * The instruction isn't supported. Hand it on to ring-3.
1267 */
1268 default:
1269 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1270 rc = VINF_IOM_HC_MMIO_READ_WRITE;
1271 break;
1272 }
1273
1274 /*
1275 * On success advance EIP.
1276 */
1277 if (rc == VINF_SUCCESS)
1278 pCtxCore->rip += cbOp;
1279 else
1280 {
1281 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1282#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1283 switch (rc)
1284 {
1285 case VINF_IOM_HC_MMIO_READ:
1286 case VINF_IOM_HC_MMIO_READ_WRITE:
1287 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1288 break;
1289 case VINF_IOM_HC_MMIO_WRITE:
1290 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1291 break;
1292 }
1293#endif
1294 }
1295
1296 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1297 iomMmioReleaseRange(pVM, pRange);
1298 PDMCritSectLeave(pLock);
1299 return rc;
1300}
1301
1302/**
1303 * \#PF Handler callback for MMIO ranges.
1304 *
1305 * @returns VBox status code (appropriate for GC return).
1306 * @param pVM VM Handle.
1307 * @param uErrorCode CPU Error code.
1308 * @param pCtxCore Trap register frame.
1309 * @param pvFault The fault address (cr2).
1310 * @param GCPhysFault The GC physical address corresponding to pvFault.
1311 * @param pvUser Pointer to the MMIO ring-3 range entry.
1312 */
1313VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1314{
1315 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1316 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1317 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1318 return VBOXSTRICTRC_VAL(rcStrict);
1319}
1320
1321/**
1322 * Physical access handler for MMIO ranges.
1323 *
1324 * @returns VBox status code (appropriate for GC return).
1325 * @param pVM VM Handle.
1326 * @param uErrorCode CPU Error code.
1327 * @param pCtxCore Trap register frame.
1328 * @param GCPhysFault The GC physical address.
1329 */
1330VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1331{
1332 int rc2 = iomLock(pVM);
1333#ifndef IN_RING3
1334 if (rc2 == VERR_SEM_BUSY)
1335 return VINF_IOM_HC_MMIO_READ_WRITE;
1336#endif
1337 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault));
1338 iomUnlock(pVM);
1339 return VBOXSTRICTRC_VAL(rcStrict);
1340}
1341
1342
1343#ifdef IN_RING3
1344/**
1345 * \#PF Handler callback for MMIO ranges.
1346 *
1347 * @returns VINF_SUCCESS if the handler have carried out the operation.
1348 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1349 * @param pVM VM Handle.
1350 * @param GCPhys The physical address the guest is writing to.
1351 * @param pvPhys The HC mapping of that address.
1352 * @param pvBuf What the guest is reading/writing.
1353 * @param cbBuf How much it's reading/writing.
1354 * @param enmAccessType The access type.
1355 * @param pvUser Pointer to the MMIO range entry.
1356 */
1357DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1358 PGMACCESSTYPE enmAccessType, void *pvUser)
1359{
1360 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1361 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1362
1363 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1364 AssertPtr(pRange);
1365
1366 /*
1367 * Validate the range.
1368 */
1369 int rc = iomLock(pVM);
1370 AssertRC(rc);
1371 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1372
1373 /*
1374 * Perform locking.
1375 */
1376 iomMmioRetainRange(pRange);
1377 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1378 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect);
1379 if (!pLock)
1380 pLock = &pVM->iom.s.EmtLock;
1381 else
1382 {
1383 iomUnlock(pVM);
1384 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_READ_WRITE);
1385 if (rc != VINF_SUCCESS)
1386 {
1387 iomMmioReleaseRange(pVM, pRange);
1388 return rc;
1389 }
1390 }
1391
1392 /*
1393 * Perform the access.
1394 */
1395 if (enmAccessType == PGMACCESSTYPE_READ)
1396 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1397 else
1398 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1399
1400 AssertRC(rc);
1401 iomMmioReleaseRange(pVM, pRange);
1402 PDMCritSectLeave(pLock);
1403 return rc;
1404}
1405#endif /* IN_RING3 */
1406
1407
1408/**
1409 * Reads a MMIO register.
1410 *
1411 * @returns VBox status code.
1412 *
1413 * @param pVM VM handle.
1414 * @param GCPhys The physical address to read.
1415 * @param pu32Value Where to store the value read.
1416 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1417 */
1418VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1419{
1420 /* Take the IOM lock before performing any MMIO. */
1421 int rc = iomLock(pVM);
1422#ifndef IN_RING3
1423 if (rc == VERR_SEM_BUSY)
1424 return VINF_IOM_HC_MMIO_WRITE;
1425#endif
1426 AssertRC(rc);
1427#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1428 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1429#endif
1430
1431 /*
1432 * Lookup the current context range node and statistics.
1433 */
1434 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1435 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1436 if (!pRange)
1437 {
1438 iomUnlock(pVM);
1439 return VERR_INTERNAL_ERROR;
1440 }
1441#ifdef VBOX_WITH_STATISTICS
1442 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1443 if (!pStats)
1444 {
1445 iomUnlock(pVM);
1446# ifdef IN_RING3
1447 return VERR_NO_MEMORY;
1448# else
1449 return VINF_IOM_HC_MMIO_READ;
1450# endif
1451 }
1452 STAM_COUNTER_INC(&pStats->Accesses);
1453#endif /* VBOX_WITH_STATISTICS */
1454
1455 if (pRange->CTX_SUFF(pfnReadCallback))
1456 {
1457 /*
1458 * Perform locking.
1459 */
1460 iomMmioRetainRange(pRange);
1461 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1462 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect);
1463 if (!pLock)
1464 pLock = &pVM->iom.s.EmtLock;
1465 else
1466 {
1467 iomUnlock(pVM);
1468 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_WRITE);
1469 if (rc != VINF_SUCCESS)
1470 {
1471 iomMmioReleaseRange(pVM, pRange);
1472 return rc;
1473 }
1474 }
1475
1476 /*
1477 * Perform the read and deal with the result.
1478 */
1479 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1480 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1481 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1482 switch (rc)
1483 {
1484 case VINF_SUCCESS:
1485 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1486 iomMmioReleaseRange(pVM, pRange);
1487 PDMCritSectLeave(pLock);
1488 return rc;
1489#ifndef IN_RING3
1490 case VINF_IOM_HC_MMIO_READ:
1491 case VINF_IOM_HC_MMIO_READ_WRITE:
1492 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1493#endif
1494 default:
1495 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1496 iomMmioReleaseRange(pVM, pRange);
1497 PDMCritSectLeave(pLock);
1498 return rc;
1499
1500 case VINF_IOM_MMIO_UNUSED_00:
1501 switch (cbValue)
1502 {
1503 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1504 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1505 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1506 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1507 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1508 }
1509 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1510 iomMmioReleaseRange(pVM, pRange);
1511 PDMCritSectLeave(pLock);
1512 return VINF_SUCCESS;
1513
1514 case VINF_IOM_MMIO_UNUSED_FF:
1515 switch (cbValue)
1516 {
1517 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1518 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1519 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1520 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1521 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1522 }
1523 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1524 iomMmioReleaseRange(pVM, pRange);
1525 PDMCritSectLeave(pLock);
1526 return VINF_SUCCESS;
1527 }
1528 /* not reached */
1529 }
1530#ifndef IN_RING3
1531 if (pRange->pfnReadCallbackR3)
1532 {
1533 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1534 iomUnlock(pVM);
1535 return VINF_IOM_HC_MMIO_READ;
1536 }
1537#endif
1538
1539 /*
1540 * Unassigned memory - this is actually not supposed t happen...
1541 */
1542 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1543 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1544 switch (cbValue)
1545 {
1546 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1547 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1548 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1549 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1550 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1551 }
1552 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1553 iomUnlock(pVM);
1554 return VINF_SUCCESS;
1555}
1556
1557
1558/**
1559 * Writes to a MMIO register.
1560 *
1561 * @returns VBox status code.
1562 *
1563 * @param pVM VM handle.
1564 * @param GCPhys The physical address to write to.
1565 * @param u32Value The value to write.
1566 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1567 */
1568VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1569{
1570 /* Take the IOM lock before performing any MMIO. */
1571 int rc = iomLock(pVM);
1572#ifndef IN_RING3
1573 if (rc == VERR_SEM_BUSY)
1574 return VINF_IOM_HC_MMIO_WRITE;
1575#endif
1576 AssertRC(rc);
1577#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1578 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1579#endif
1580
1581 /*
1582 * Lookup the current context range node.
1583 */
1584 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1585 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1586 if (!pRange)
1587 {
1588 iomUnlock(pVM);
1589 return VERR_INTERNAL_ERROR;
1590 }
1591#ifdef VBOX_WITH_STATISTICS
1592 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1593 if (!pStats)
1594 {
1595 iomUnlock(pVM);
1596# ifdef IN_RING3
1597 return VERR_NO_MEMORY;
1598# else
1599 return VINF_IOM_HC_MMIO_WRITE;
1600# endif
1601 }
1602 STAM_COUNTER_INC(&pStats->Accesses);
1603#endif /* VBOX_WITH_STATISTICS */
1604
1605 if (pRange->CTX_SUFF(pfnWriteCallback))
1606 {
1607 /*
1608 * Perform locking.
1609 */
1610 iomMmioRetainRange(pRange);
1611 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1612 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect);
1613 if (!pLock)
1614 pLock = &pVM->iom.s.EmtLock;
1615 else
1616 {
1617 iomUnlock(pVM);
1618 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_READ);
1619 if (rc != VINF_SUCCESS)
1620 {
1621 iomMmioReleaseRange(pVM, pRange);
1622 return rc;
1623 }
1624 }
1625
1626 /*
1627 * Perform the write.
1628 */
1629 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1630 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1631 GCPhys, &u32Value, (unsigned)cbValue);
1632 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1633#ifndef IN_RING3
1634 if ( rc == VINF_IOM_HC_MMIO_WRITE
1635 || rc == VINF_IOM_HC_MMIO_READ_WRITE)
1636 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1637#endif
1638 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1639 iomMmioReleaseRange(pVM, pRange);
1640 PDMCritSectLeave(pLock);
1641 return rc;
1642 }
1643#ifndef IN_RING3
1644 if (pRange->pfnWriteCallbackR3)
1645 {
1646 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1647 iomUnlock(pVM);
1648 return VINF_IOM_HC_MMIO_WRITE;
1649 }
1650#endif
1651
1652 /*
1653 * No write handler, nothing to do.
1654 */
1655 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1656 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1657 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1658 iomUnlock(pVM);
1659 return VINF_SUCCESS;
1660}
1661
1662/**
1663 * [REP*] INSB/INSW/INSD
1664 * ES:EDI,DX[,ECX]
1665 *
1666 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1667 *
1668 * @returns Strict VBox status code. Informational status codes other than the one documented
1669 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1670 * @retval VINF_SUCCESS Success.
1671 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1672 * status code must be passed on to EM.
1673 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1674 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1675 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1676 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1677 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1678 *
1679 * @param pVM The virtual machine.
1680 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1681 * @param uPort IO Port
1682 * @param uPrefix IO instruction prefix
1683 * @param cbTransfer Size of transfer unit
1684 */
1685VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1686{
1687 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1688
1689 /*
1690 * We do not support REPNE or decrementing destination
1691 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1692 */
1693 if ( (uPrefix & PREFIX_REPNE)
1694 || pRegFrame->eflags.Bits.u1DF)
1695 return VINF_EM_RAW_EMULATE_INSTR;
1696
1697 PVMCPU pVCpu = VMMGetCpu(pVM);
1698
1699 /*
1700 * Get bytes/words/dwords count to transfer.
1701 */
1702 RTGCUINTREG cTransfers = 1;
1703 if (uPrefix & PREFIX_REP)
1704 {
1705#ifndef IN_RC
1706 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1707 && pRegFrame->rcx >= _4G)
1708 return VINF_EM_RAW_EMULATE_INSTR;
1709#endif
1710 cTransfers = pRegFrame->ecx;
1711
1712 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1713 cTransfers &= 0xffff;
1714
1715 if (!cTransfers)
1716 return VINF_SUCCESS;
1717 }
1718
1719 /* Convert destination address es:edi. */
1720 RTGCPTR GCPtrDst;
1721 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1722 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1723 &GCPtrDst);
1724 if (RT_FAILURE(rc2))
1725 {
1726 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
1727 return VINF_EM_RAW_EMULATE_INSTR;
1728 }
1729
1730 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1731 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1732
1733 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1734 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1735 if (rc2 != VINF_SUCCESS)
1736 {
1737 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
1738 return VINF_EM_RAW_EMULATE_INSTR;
1739 }
1740
1741 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1742 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1743 if (cTransfers > 1)
1744 {
1745 /* If the device supports string transfers, ask it to do as
1746 * much as it wants. The rest is done with single-word transfers. */
1747 const RTGCUINTREG cTransfersOrg = cTransfers;
1748 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1749 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1750 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1751 }
1752
1753#ifdef IN_RC
1754 MMGCRamRegisterTrapHandler(pVM);
1755#endif
1756 while (cTransfers && rcStrict == VINF_SUCCESS)
1757 {
1758 uint32_t u32Value;
1759 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1760 if (!IOM_SUCCESS(rcStrict))
1761 break;
1762 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1763 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1764 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1765 pRegFrame->rdi += cbTransfer;
1766 cTransfers--;
1767 }
1768#ifdef IN_RC
1769 MMGCRamDeregisterTrapHandler(pVM);
1770#endif
1771
1772 /* Update ecx on exit. */
1773 if (uPrefix & PREFIX_REP)
1774 pRegFrame->ecx = cTransfers;
1775
1776 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1777 return rcStrict;
1778}
1779
1780
1781/**
1782 * [REP*] INSB/INSW/INSD
1783 * ES:EDI,DX[,ECX]
1784 *
1785 * @returns Strict VBox status code. Informational status codes other than the one documented
1786 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1787 * @retval VINF_SUCCESS Success.
1788 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1789 * status code must be passed on to EM.
1790 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1791 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1792 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1793 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1794 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1795 *
1796 * @param pVM The virtual machine.
1797 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1798 * @param pCpu Disassembler CPU state.
1799 */
1800VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1801{
1802 /*
1803 * Get port number directly from the register (no need to bother the
1804 * disassembler). And get the I/O register size from the opcode / prefix.
1805 */
1806 RTIOPORT Port = pRegFrame->edx & 0xffff;
1807 unsigned cb = 0;
1808 if (pCpu->pCurInstr->opcode == OP_INSB)
1809 cb = 1;
1810 else
1811 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1812
1813 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1814 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1815 {
1816 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1817 return rcStrict;
1818 }
1819
1820 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1821}
1822
1823
1824/**
1825 * [REP*] OUTSB/OUTSW/OUTSD
1826 * DS:ESI,DX[,ECX]
1827 *
1828 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1829 *
1830 * @returns Strict VBox status code. Informational status codes other than the one documented
1831 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1832 * @retval VINF_SUCCESS Success.
1833 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1834 * status code must be passed on to EM.
1835 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1836 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1837 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1838 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1839 *
1840 * @param pVM The virtual machine.
1841 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1842 * @param uPort IO Port
1843 * @param uPrefix IO instruction prefix
1844 * @param cbTransfer Size of transfer unit
1845 */
1846VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1847{
1848 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1849
1850 /*
1851 * We do not support segment prefixes, REPNE or
1852 * decrementing source pointer.
1853 */
1854 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1855 || pRegFrame->eflags.Bits.u1DF)
1856 return VINF_EM_RAW_EMULATE_INSTR;
1857
1858 PVMCPU pVCpu = VMMGetCpu(pVM);
1859
1860 /*
1861 * Get bytes/words/dwords count to transfer.
1862 */
1863 RTGCUINTREG cTransfers = 1;
1864 if (uPrefix & PREFIX_REP)
1865 {
1866#ifndef IN_RC
1867 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1868 && pRegFrame->rcx >= _4G)
1869 return VINF_EM_RAW_EMULATE_INSTR;
1870#endif
1871 cTransfers = pRegFrame->ecx;
1872 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1873 cTransfers &= 0xffff;
1874
1875 if (!cTransfers)
1876 return VINF_SUCCESS;
1877 }
1878
1879 /* Convert source address ds:esi. */
1880 RTGCPTR GCPtrSrc;
1881 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1882 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1883 &GCPtrSrc);
1884 if (RT_FAILURE(rc2))
1885 {
1886 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
1887 return VINF_EM_RAW_EMULATE_INSTR;
1888 }
1889
1890 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1891 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1892 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1893 (cpl == 3) ? X86_PTE_US : 0);
1894 if (rc2 != VINF_SUCCESS)
1895 {
1896 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
1897 return VINF_EM_RAW_EMULATE_INSTR;
1898 }
1899
1900 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1901 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1902 if (cTransfers > 1)
1903 {
1904 /*
1905 * If the device supports string transfers, ask it to do as
1906 * much as it wants. The rest is done with single-word transfers.
1907 */
1908 const RTGCUINTREG cTransfersOrg = cTransfers;
1909 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1910 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1911 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1912 }
1913
1914#ifdef IN_RC
1915 MMGCRamRegisterTrapHandler(pVM);
1916#endif
1917
1918 while (cTransfers && rcStrict == VINF_SUCCESS)
1919 {
1920 uint32_t u32Value = 0;
1921 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
1922 if (rcStrict != VINF_SUCCESS)
1923 break;
1924 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1925 if (!IOM_SUCCESS(rcStrict))
1926 break;
1927 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1928 pRegFrame->rsi += cbTransfer;
1929 cTransfers--;
1930 }
1931
1932#ifdef IN_RC
1933 MMGCRamDeregisterTrapHandler(pVM);
1934#endif
1935
1936 /* Update ecx on exit. */
1937 if (uPrefix & PREFIX_REP)
1938 pRegFrame->ecx = cTransfers;
1939
1940 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1941 return rcStrict;
1942}
1943
1944
1945/**
1946 * [REP*] OUTSB/OUTSW/OUTSD
1947 * DS:ESI,DX[,ECX]
1948 *
1949 * @returns Strict VBox status code. Informational status codes other than the one documented
1950 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1951 * @retval VINF_SUCCESS Success.
1952 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1953 * status code must be passed on to EM.
1954 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1955 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1956 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1957 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1958 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1959 *
1960 * @param pVM The virtual machine.
1961 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1962 * @param pCpu Disassembler CPU state.
1963 */
1964VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1965{
1966 /*
1967 * Get port number from the first parameter.
1968 * And get the I/O register size from the opcode / prefix.
1969 */
1970 uint64_t Port = 0;
1971 unsigned cb = 0;
1972 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1973 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1974 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1975 cb = 1;
1976 else
1977 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1978
1979 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1980 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1981 {
1982 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1983 return rcStrict;
1984 }
1985
1986 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1987}
1988
1989#ifndef IN_RC
1990
1991/**
1992 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1993 *
1994 * (This is a special optimization used by the VGA device.)
1995 *
1996 * @returns VBox status code.
1997 *
1998 * @param pVM The virtual machine.
1999 * @param GCPhys The address of the MMIO page to be changed.
2000 * @param GCPhysRemapped The address of the MMIO2 page.
2001 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2002 * for the time being.
2003 */
2004VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2005{
2006 /* Currently only called from the VGA device during MMIO. */
2007 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2008 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2009 PVMCPU pVCpu = VMMGetCpu(pVM);
2010
2011 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2012 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2013 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2014 && !HWACCMIsNestedPagingActive(pVM)))
2015 return VINF_SUCCESS; /* ignore */
2016
2017 PDMCritSectEnter(&pVM->iom.s.EmtLock, VINF_SUCCESS);
2018
2019 /*
2020 * Lookup the context range node the page belongs to.
2021 */
2022 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
2023 AssertMsgReturn(pRange,
2024 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2025
2026 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2027 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2028
2029 /*
2030 * Do the aliasing; page align the addresses since PGM is picky.
2031 */
2032 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2033 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2034
2035 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2036
2037 PDMCritSectLeave(&pVM->iom.s.EmtLock);
2038 AssertRCReturn(rc, rc);
2039
2040 /*
2041 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2042 * can simply prefetch it.
2043 *
2044 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2045 */
2046#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2047# ifdef VBOX_STRICT
2048 uint64_t fFlags;
2049 RTHCPHYS HCPhys;
2050 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2051 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2052# endif
2053#endif
2054 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2055 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2056 return VINF_SUCCESS;
2057}
2058
2059
2060/**
2061 * Mapping a HC page in place of an MMIO page for direct access.
2062 *
2063 * (This is a special optimization used by the APIC in the VT-x case.)
2064 *
2065 * @returns VBox status code.
2066 *
2067 * @param pVM The virtual machine.
2068 * @param GCPhys The address of the MMIO page to be changed.
2069 * @param HCPhys The address of the host physical page.
2070 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2071 * for the time being.
2072 */
2073VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2074{
2075 /* Currently only called from VT-x code during a page fault. */
2076 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2077
2078 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2079 Assert(HWACCMIsEnabled(pVM));
2080
2081 PVMCPU pVCpu = VMMGetCpu(pVM);
2082
2083 /*
2084 * Lookup the context range node the page belongs to.
2085 */
2086#ifdef VBOX_STRICT
2087 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2088 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2089 AssertMsgReturn(pRange,
2090 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2091 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2092 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2093#endif
2094
2095 /*
2096 * Do the aliasing; page align the addresses since PGM is picky.
2097 */
2098 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2099 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2100
2101 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2102 AssertRCReturn(rc, rc);
2103
2104 /*
2105 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2106 * can simply prefetch it.
2107 *
2108 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2109 */
2110 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2111 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2112 return VINF_SUCCESS;
2113}
2114
2115
2116/**
2117 * Reset a previously modified MMIO region; restore the access flags.
2118 *
2119 * @returns VBox status code.
2120 *
2121 * @param pVM The virtual machine.
2122 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2123 */
2124VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2125{
2126 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2127
2128 PVMCPU pVCpu = VMMGetCpu(pVM);
2129
2130 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2131 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2132 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2133 && !HWACCMIsNestedPagingActive(pVM)))
2134 return VINF_SUCCESS; /* ignore */
2135
2136 /*
2137 * Lookup the context range node the page belongs to.
2138 */
2139#ifdef VBOX_STRICT
2140 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2141 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2142 AssertMsgReturn(pRange,
2143 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2144 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2145 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2146#endif
2147
2148 /*
2149 * Call PGM to do the job work.
2150 *
2151 * After the call, all the pages should be non-present... unless there is
2152 * a page pool flush pending (unlikely).
2153 */
2154 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2155 AssertRC(rc);
2156
2157#ifdef VBOX_STRICT
2158 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2159 {
2160 uint32_t cb = pRange->cb;
2161 GCPhys = pRange->GCPhys;
2162 while (cb)
2163 {
2164 uint64_t fFlags;
2165 RTHCPHYS HCPhys;
2166 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2167 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2168 cb -= PAGE_SIZE;
2169 GCPhys += PAGE_SIZE;
2170 }
2171 }
2172#endif
2173 return rc;
2174}
2175
2176#endif /* !IN_RC */
2177
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette