VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 35346

Last change on this file since 35346 was 35346, checked in by vboxsync, 14 years ago

VMM reorg: Moving the public include files from include/VBox to include/VBox/vmm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 72.6 KB
Line 
1/* $Id: IOMAllMMIO.cpp 35346 2010-12-27 16:13:13Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include "IOMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/vmm/vmm.h>
34#include <VBox/vmm/hwaccm.h>
35
36#include <VBox/dis.h>
37#include <VBox/disopcode.h>
38#include <VBox/vmm/pdmdev.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <iprt/assert.h>
42#include <VBox/log.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45
46
47/*******************************************************************************
48* Global Variables *
49*******************************************************************************/
50
51/**
52 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
53 */
54static const unsigned g_aSize2Shift[] =
55{
56 ~0, /* 0 - invalid */
57 0, /* *1 == 2^0 */
58 1, /* *2 == 2^1 */
59 ~0, /* 3 - invalid */
60 2, /* *4 == 2^2 */
61 ~0, /* 5 - invalid */
62 ~0, /* 6 - invalid */
63 ~0, /* 7 - invalid */
64 3 /* *8 == 2^3 */
65};
66
67/**
68 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
69 */
70#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
71
72
73/**
74 * Wrapper which does the write and updates range statistics when such are enabled.
75 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
76 */
77DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
78{
79#ifdef VBOX_WITH_STATISTICS
80 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
81 Assert(pStats);
82#endif
83
84 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
85 int rc;
86 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
87 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
88 else
89 rc = VINF_SUCCESS;
90 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
91 STAM_COUNTER_INC(&pStats->Accesses);
92 return rc;
93}
94
95
96/**
97 * Wrapper which does the read and updates range statistics when such are enabled.
98 */
99DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
100{
101#ifdef VBOX_WITH_STATISTICS
102 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
103 Assert(pStats);
104#endif
105
106 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
107 int rc;
108 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
109 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
110 else
111 rc = VINF_IOM_MMIO_UNUSED_FF;
112 if (rc != VINF_SUCCESS)
113 {
114 switch (rc)
115 {
116 case VINF_IOM_MMIO_UNUSED_FF:
117 switch (cbValue)
118 {
119 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
120 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
121 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
122 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
123 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
124 }
125 rc = VINF_SUCCESS;
126 break;
127
128 case VINF_IOM_MMIO_UNUSED_00:
129 switch (cbValue)
130 {
131 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
132 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
133 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
134 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
135 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
136 }
137 rc = VINF_SUCCESS;
138 break;
139 }
140 }
141 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
142 STAM_COUNTER_INC(&pStats->Accesses);
143 return rc;
144}
145
146
147/**
148 * Internal - statistics only.
149 */
150DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
151{
152#ifdef VBOX_WITH_STATISTICS
153 switch (cb)
154 {
155 case 1:
156 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
157 break;
158 case 2:
159 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
160 break;
161 case 4:
162 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
163 break;
164 case 8:
165 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
166 break;
167 default:
168 /* No way. */
169 AssertMsgFailed(("Invalid data length %d\n", cb));
170 break;
171 }
172#else
173 NOREF(pVM); NOREF(cb);
174#endif
175}
176
177
178/**
179 * MOV reg, mem (read)
180 * MOVZX reg, mem (read)
181 * MOVSX reg, mem (read)
182 *
183 * @returns VBox status code.
184 *
185 * @param pVM The virtual machine.
186 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
187 * @param pCpu Disassembler CPU state.
188 * @param pRange Pointer MMIO range.
189 * @param GCPhysFault The GC physical address corresponding to pvFault.
190 */
191static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
192{
193 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
194
195 /*
196 * Get the data size from parameter 2,
197 * and call the handler function to get the data.
198 */
199 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
200 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
201
202 uint64_t u64Data = 0;
203 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
204 if (rc == VINF_SUCCESS)
205 {
206 /*
207 * Do sign extension for MOVSX.
208 */
209 /** @todo checkup MOVSX implementation! */
210 if (pCpu->pCurInstr->opcode == OP_MOVSX)
211 {
212 if (cb == 1)
213 {
214 /* DWORD <- BYTE */
215 int64_t iData = (int8_t)u64Data;
216 u64Data = (uint64_t)iData;
217 }
218 else
219 {
220 /* DWORD <- WORD */
221 int64_t iData = (int16_t)u64Data;
222 u64Data = (uint64_t)iData;
223 }
224 }
225
226 /*
227 * Store the result to register (parameter 1).
228 */
229 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
230 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
231 }
232
233 if (rc == VINF_SUCCESS)
234 iomMMIOStatLength(pVM, cb);
235 return rc;
236}
237
238
239/**
240 * MOV mem, reg|imm (write)
241 *
242 * @returns VBox status code.
243 *
244 * @param pVM The virtual machine.
245 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
246 * @param pCpu Disassembler CPU state.
247 * @param pRange Pointer MMIO range.
248 * @param GCPhysFault The GC physical address corresponding to pvFault.
249 */
250static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
251{
252 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
253
254 /*
255 * Get data to write from second parameter,
256 * and call the callback to write it.
257 */
258 unsigned cb = 0;
259 uint64_t u64Data = 0;
260 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
261 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
262
263 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
264 if (rc == VINF_SUCCESS)
265 iomMMIOStatLength(pVM, cb);
266 return rc;
267}
268
269
270/** Wrapper for reading virtual memory. */
271DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
272{
273 /* Note: This will fail in R0 or RC if it hits an access handler. That
274 isn't a problem though since the operation can be restarted in REM. */
275#ifdef IN_RC
276 return MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
277#else
278 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
279#endif
280}
281
282
283/** Wrapper for writing virtual memory. */
284DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
285{
286 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
287 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
288 * as well since we're not behind the pgm lock and handler may change between calls.
289 *
290 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
291 * the state of some shadowed structures. */
292#if defined(IN_RING0) || defined(IN_RC)
293 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
294#else
295 NOREF(pCtxCore);
296 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
297#endif
298}
299
300
301#ifdef IOM_WITH_MOVS_SUPPORT
302/**
303 * [REP] MOVSB
304 * [REP] MOVSW
305 * [REP] MOVSD
306 *
307 * Restricted implementation.
308 *
309 *
310 * @returns VBox status code.
311 *
312 * @param pVM The virtual machine.
313 * @param uErrorCode CPU Error code.
314 * @param pRegFrame Trap register frame.
315 * @param GCPhysFault The GC physical address corresponding to pvFault.
316 * @param pCpu Disassembler CPU state.
317 * @param pRange Pointer MMIO range.
318 * @param ppStat Which sub-sample to attribute this call to.
319 */
320static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
321{
322 /*
323 * We do not support segment prefixes or REPNE.
324 */
325 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
326 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
327
328 PVMCPU pVCpu = VMMGetCpu(pVM);
329
330 /*
331 * Get bytes/words/dwords/qword count to copy.
332 */
333 uint32_t cTransfers = 1;
334 if (pCpu->prefix & PREFIX_REP)
335 {
336#ifndef IN_RC
337 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
338 && pRegFrame->rcx >= _4G)
339 return VINF_EM_RAW_EMULATE_INSTR;
340#endif
341
342 cTransfers = pRegFrame->ecx;
343 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
344 cTransfers &= 0xffff;
345
346 if (!cTransfers)
347 return VINF_SUCCESS;
348 }
349
350 /* Get the current privilege level. */
351 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
352
353 /*
354 * Get data size.
355 */
356 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
357 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
358 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
359
360#ifdef VBOX_WITH_STATISTICS
361 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
362 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
363#endif
364
365/** @todo re-evaluate on page boundaries. */
366
367 RTGCPHYS Phys = GCPhysFault;
368 int rc;
369 if (fWriteAccess)
370 {
371 /*
372 * Write operation: [Mem] -> [MMIO]
373 * ds:esi (Virt Src) -> es:edi (Phys Dst)
374 */
375 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
376
377 /* Check callback. */
378 if (!pRange->CTX_SUFF(pfnWriteCallback))
379 return VINF_IOM_HC_MMIO_WRITE;
380
381 /* Convert source address ds:esi. */
382 RTGCUINTPTR pu8Virt;
383 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
384 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
385 (PRTGCPTR)&pu8Virt);
386 if (RT_SUCCESS(rc))
387 {
388
389 /* Access verification first; we currently can't recover properly from traps inside this instruction */
390 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
391 if (rc != VINF_SUCCESS)
392 {
393 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
394 return VINF_EM_RAW_EMULATE_INSTR;
395 }
396
397#ifdef IN_RC
398 MMGCRamRegisterTrapHandler(pVM);
399#endif
400
401 /* copy loop. */
402 while (cTransfers)
403 {
404 uint32_t u32Data = 0;
405 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
406 if (rc != VINF_SUCCESS)
407 break;
408 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
409 if (rc != VINF_SUCCESS)
410 break;
411
412 pu8Virt += offIncrement;
413 Phys += offIncrement;
414 pRegFrame->rsi += offIncrement;
415 pRegFrame->rdi += offIncrement;
416 cTransfers--;
417 }
418#ifdef IN_RC
419 MMGCRamDeregisterTrapHandler(pVM);
420#endif
421 /* Update ecx. */
422 if (pCpu->prefix & PREFIX_REP)
423 pRegFrame->ecx = cTransfers;
424 }
425 else
426 rc = VINF_IOM_HC_MMIO_READ_WRITE;
427 }
428 else
429 {
430 /*
431 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
432 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
433 */
434 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
435
436 /* Check callback. */
437 if (!pRange->CTX_SUFF(pfnReadCallback))
438 return VINF_IOM_HC_MMIO_READ;
439
440 /* Convert destination address. */
441 RTGCUINTPTR pu8Virt;
442 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
443 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
444 (RTGCPTR *)&pu8Virt);
445 if (RT_FAILURE(rc))
446 return VINF_IOM_HC_MMIO_READ;
447
448 /* Check if destination address is MMIO. */
449 PIOMMMIORANGE pMMIODst;
450 RTGCPHYS PhysDst;
451 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
452 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
453 if ( RT_SUCCESS(rc)
454 && (pMMIODst = iomMMIOGetRange(&pVM->iom.s, PhysDst)))
455 {
456 /** @todo implement per-device locks for MMIO access. */
457 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
458
459 /*
460 * Extra: [MMIO] -> [MMIO]
461 */
462 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
463 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
464 return VINF_IOM_HC_MMIO_READ_WRITE;
465
466 /* copy loop. */
467 while (cTransfers)
468 {
469 uint32_t u32Data;
470 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
471 if (rc != VINF_SUCCESS)
472 break;
473 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
474 if (rc != VINF_SUCCESS)
475 break;
476
477 Phys += offIncrement;
478 PhysDst += offIncrement;
479 pRegFrame->rsi += offIncrement;
480 pRegFrame->rdi += offIncrement;
481 cTransfers--;
482 }
483 }
484 else
485 {
486 /*
487 * Normal: [MMIO] -> [Mem]
488 */
489 /* Access verification first; we currently can't recover properly from traps inside this instruction */
490 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
491 if (rc != VINF_SUCCESS)
492 {
493 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
494 return VINF_EM_RAW_EMULATE_INSTR;
495 }
496
497 /* copy loop. */
498#ifdef IN_RC
499 MMGCRamRegisterTrapHandler(pVM);
500#endif
501 while (cTransfers)
502 {
503 uint32_t u32Data;
504 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
505 if (rc != VINF_SUCCESS)
506 break;
507 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
508 if (rc != VINF_SUCCESS)
509 {
510 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
511 break;
512 }
513
514 pu8Virt += offIncrement;
515 Phys += offIncrement;
516 pRegFrame->rsi += offIncrement;
517 pRegFrame->rdi += offIncrement;
518 cTransfers--;
519 }
520#ifdef IN_RC
521 MMGCRamDeregisterTrapHandler(pVM);
522#endif
523 }
524
525 /* Update ecx on exit. */
526 if (pCpu->prefix & PREFIX_REP)
527 pRegFrame->ecx = cTransfers;
528 }
529
530 /* work statistics. */
531 if (rc == VINF_SUCCESS)
532 iomMMIOStatLength(pVM, cb);
533 NOREF(ppStat);
534 return rc;
535}
536#endif /* IOM_WITH_MOVS_SUPPORT */
537
538
539/**
540 * [REP] STOSB
541 * [REP] STOSW
542 * [REP] STOSD
543 *
544 * Restricted implementation.
545 *
546 *
547 * @returns VBox status code.
548 *
549 * @param pVM The virtual machine.
550 * @param pRegFrame Trap register frame.
551 * @param GCPhysFault The GC physical address corresponding to pvFault.
552 * @param pCpu Disassembler CPU state.
553 * @param pRange Pointer MMIO range.
554 */
555static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
556{
557 /*
558 * We do not support segment prefixes or REPNE..
559 */
560 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
561 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
562
563 /*
564 * Get bytes/words/dwords count to copy.
565 */
566 uint32_t cTransfers = 1;
567 if (pCpu->prefix & PREFIX_REP)
568 {
569#ifndef IN_RC
570 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM), pRegFrame)
571 && pRegFrame->rcx >= _4G)
572 return VINF_EM_RAW_EMULATE_INSTR;
573#endif
574
575 cTransfers = pRegFrame->ecx;
576 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
577 cTransfers &= 0xffff;
578
579 if (!cTransfers)
580 return VINF_SUCCESS;
581 }
582
583/** @todo r=bird: bounds checks! */
584
585 /*
586 * Get data size.
587 */
588 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
589 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
590 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
591
592#ifdef VBOX_WITH_STATISTICS
593 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
594 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
595#endif
596
597
598 RTGCPHYS Phys = GCPhysFault;
599 uint32_t u32Data = pRegFrame->eax;
600 int rc;
601 if (pRange->CTX_SUFF(pfnFillCallback))
602 {
603 /*
604 * Use the fill callback.
605 */
606 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
607 if (offIncrement > 0)
608 {
609 /* addr++ variant. */
610 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
611 if (rc == VINF_SUCCESS)
612 {
613 /* Update registers. */
614 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
615 if (pCpu->prefix & PREFIX_REP)
616 pRegFrame->ecx = 0;
617 }
618 }
619 else
620 {
621 /* addr-- variant. */
622 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)), u32Data, cb, cTransfers);
623 if (rc == VINF_SUCCESS)
624 {
625 /* Update registers. */
626 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
627 if (pCpu->prefix & PREFIX_REP)
628 pRegFrame->ecx = 0;
629 }
630 }
631 }
632 else
633 {
634 /*
635 * Use the write callback.
636 */
637 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
638
639 /* fill loop. */
640 do
641 {
642 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
643 if (rc != VINF_SUCCESS)
644 break;
645
646 Phys += offIncrement;
647 pRegFrame->rdi += offIncrement;
648 cTransfers--;
649 } while (cTransfers);
650
651 /* Update ecx on exit. */
652 if (pCpu->prefix & PREFIX_REP)
653 pRegFrame->ecx = cTransfers;
654 }
655
656 /*
657 * Work statistics and return.
658 */
659 if (rc == VINF_SUCCESS)
660 iomMMIOStatLength(pVM, cb);
661 return rc;
662}
663
664
665/**
666 * [REP] LODSB
667 * [REP] LODSW
668 * [REP] LODSD
669 *
670 * Restricted implementation.
671 *
672 *
673 * @returns VBox status code.
674 *
675 * @param pVM The virtual machine.
676 * @param pRegFrame Trap register frame.
677 * @param GCPhysFault The GC physical address corresponding to pvFault.
678 * @param pCpu Disassembler CPU state.
679 * @param pRange Pointer MMIO range.
680 */
681static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
682{
683 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
684
685 /*
686 * We do not support segment prefixes or REP*.
687 */
688 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
689 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
690
691 /*
692 * Get data size.
693 */
694 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
695 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
696 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
697
698 /*
699 * Perform read.
700 */
701 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
702 if (rc == VINF_SUCCESS)
703 pRegFrame->rsi += offIncrement;
704
705 /*
706 * Work statistics and return.
707 */
708 if (rc == VINF_SUCCESS)
709 iomMMIOStatLength(pVM, cb);
710 return rc;
711}
712
713
714/**
715 * CMP [MMIO], reg|imm
716 * CMP reg|imm, [MMIO]
717 *
718 * Restricted implementation.
719 *
720 *
721 * @returns VBox status code.
722 *
723 * @param pVM The virtual machine.
724 * @param pRegFrame Trap register frame.
725 * @param GCPhysFault The GC physical address corresponding to pvFault.
726 * @param pCpu Disassembler CPU state.
727 * @param pRange Pointer MMIO range.
728 */
729static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
730{
731 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
732
733 /*
734 * Get the operands.
735 */
736 unsigned cb = 0;
737 uint64_t uData1 = 0;
738 uint64_t uData2 = 0;
739 int rc;
740 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
741 /* cmp reg, [MMIO]. */
742 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
743 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
744 /* cmp [MMIO], reg|imm. */
745 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
746 else
747 {
748 AssertMsgFailed(("Disassember CMP problem..\n"));
749 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
750 }
751
752 if (rc == VINF_SUCCESS)
753 {
754#if HC_ARCH_BITS == 32
755 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
756 if (cb > 4)
757 return VINF_IOM_HC_MMIO_READ_WRITE;
758#endif
759 /* Emulate CMP and update guest flags. */
760 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
761 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
762 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
763 iomMMIOStatLength(pVM, cb);
764 }
765
766 return rc;
767}
768
769
770/**
771 * AND [MMIO], reg|imm
772 * AND reg, [MMIO]
773 * OR [MMIO], reg|imm
774 * OR reg, [MMIO]
775 *
776 * Restricted implementation.
777 *
778 *
779 * @returns VBox status code.
780 *
781 * @param pVM The virtual machine.
782 * @param pRegFrame Trap register frame.
783 * @param GCPhysFault The GC physical address corresponding to pvFault.
784 * @param pCpu Disassembler CPU state.
785 * @param pRange Pointer MMIO range.
786 * @param pfnEmulate Instruction emulation function.
787 */
788static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
789{
790 unsigned cb = 0;
791 uint64_t uData1 = 0;
792 uint64_t uData2 = 0;
793 bool fAndWrite;
794 int rc;
795
796#ifdef LOG_ENABLED
797 const char *pszInstr;
798
799 if (pCpu->pCurInstr->opcode == OP_XOR)
800 pszInstr = "Xor";
801 else if (pCpu->pCurInstr->opcode == OP_OR)
802 pszInstr = "Or";
803 else if (pCpu->pCurInstr->opcode == OP_AND)
804 pszInstr = "And";
805 else
806 pszInstr = "OrXorAnd??";
807#endif
808
809 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
810 {
811#if HC_ARCH_BITS == 32
812 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
813 if (cb > 4)
814 return VINF_IOM_HC_MMIO_READ_WRITE;
815#endif
816 /* and reg, [MMIO]. */
817 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
818 fAndWrite = false;
819 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
820 }
821 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
822 {
823#if HC_ARCH_BITS == 32
824 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
825 if (cb > 4)
826 return VINF_IOM_HC_MMIO_READ_WRITE;
827#endif
828 /* and [MMIO], reg|imm. */
829 fAndWrite = true;
830 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
831 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
832 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
833 else
834 rc = VINF_IOM_HC_MMIO_READ_WRITE;
835 }
836 else
837 {
838 AssertMsgFailed(("Disassember AND problem..\n"));
839 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
840 }
841
842 if (rc == VINF_SUCCESS)
843 {
844 /* Emulate AND and update guest flags. */
845 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
846
847 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
848
849 if (fAndWrite)
850 /* Store result to MMIO. */
851 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
852 else
853 {
854 /* Store result to register. */
855 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
856 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
857 }
858 if (rc == VINF_SUCCESS)
859 {
860 /* Update guest's eflags and finish. */
861 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
862 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
863 iomMMIOStatLength(pVM, cb);
864 }
865 }
866
867 return rc;
868}
869
870
871/**
872 * TEST [MMIO], reg|imm
873 * TEST reg, [MMIO]
874 *
875 * Restricted implementation.
876 *
877 *
878 * @returns VBox status code.
879 *
880 * @param pVM The virtual machine.
881 * @param pRegFrame Trap register frame.
882 * @param GCPhysFault The GC physical address corresponding to pvFault.
883 * @param pCpu Disassembler CPU state.
884 * @param pRange Pointer MMIO range.
885 */
886static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
887{
888 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
889
890 unsigned cb = 0;
891 uint64_t uData1 = 0;
892 uint64_t uData2 = 0;
893 int rc;
894
895 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
896 {
897 /* and test, [MMIO]. */
898 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
899 }
900 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
901 {
902 /* test [MMIO], reg|imm. */
903 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
904 }
905 else
906 {
907 AssertMsgFailed(("Disassember TEST problem..\n"));
908 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
909 }
910
911 if (rc == VINF_SUCCESS)
912 {
913#if HC_ARCH_BITS == 32
914 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
915 if (cb > 4)
916 return VINF_IOM_HC_MMIO_READ_WRITE;
917#endif
918
919 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
920 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
921 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
922 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
923 iomMMIOStatLength(pVM, cb);
924 }
925
926 return rc;
927}
928
929
930/**
931 * BT [MMIO], reg|imm
932 *
933 * Restricted implementation.
934 *
935 *
936 * @returns VBox status code.
937 *
938 * @param pVM The virtual machine.
939 * @param pRegFrame Trap register frame.
940 * @param GCPhysFault The GC physical address corresponding to pvFault.
941 * @param pCpu Disassembler CPU state.
942 * @param pRange Pointer MMIO range.
943 */
944static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
945{
946 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
947
948 uint64_t uBit = 0;
949 uint64_t uData = 0;
950 unsigned cbIgnored;
951
952 if (!iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cbIgnored))
953 {
954 AssertMsgFailed(("Disassember BT problem..\n"));
955 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
956 }
957 /* The size of the memory operand only matters here. */
958 unsigned cbData = DISGetParamSize(pCpu, &pCpu->param1);
959
960 /* bt [MMIO], reg|imm. */
961 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
962 if (rc == VINF_SUCCESS)
963 {
964 /* Find the bit inside the faulting address */
965 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
966 iomMMIOStatLength(pVM, cbData);
967 }
968
969 return rc;
970}
971
972/**
973 * XCHG [MMIO], reg
974 * XCHG reg, [MMIO]
975 *
976 * Restricted implementation.
977 *
978 *
979 * @returns VBox status code.
980 *
981 * @param pVM The virtual machine.
982 * @param pRegFrame Trap register frame.
983 * @param GCPhysFault The GC physical address corresponding to pvFault.
984 * @param pCpu Disassembler CPU state.
985 * @param pRange Pointer MMIO range.
986 */
987static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
988{
989 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
990 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
991 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
992 return VINF_IOM_HC_MMIO_READ_WRITE;
993
994 int rc;
995 unsigned cb = 0;
996 uint64_t uData1 = 0;
997 uint64_t uData2 = 0;
998 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
999 {
1000 /* xchg reg, [MMIO]. */
1001 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1002 if (rc == VINF_SUCCESS)
1003 {
1004 /* Store result to MMIO. */
1005 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1006
1007 if (rc == VINF_SUCCESS)
1008 {
1009 /* Store result to register. */
1010 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
1011 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1012 }
1013 else
1014 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1015 }
1016 else
1017 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1018 }
1019 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1020 {
1021 /* xchg [MMIO], reg. */
1022 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1023 if (rc == VINF_SUCCESS)
1024 {
1025 /* Store result to MMIO. */
1026 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1027 if (rc == VINF_SUCCESS)
1028 {
1029 /* Store result to register. */
1030 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1031 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1032 }
1033 else
1034 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1035 }
1036 else
1037 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1038 }
1039 else
1040 {
1041 AssertMsgFailed(("Disassember XCHG problem..\n"));
1042 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1043 }
1044 return rc;
1045}
1046
1047
1048/**
1049 * \#PF Handler callback for MMIO ranges.
1050 *
1051 * @returns VBox status code (appropriate for GC return).
1052 * @param pVM VM Handle.
1053 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1054 * any error code (the EPT misconfig hack).
1055 * @param pCtxCore Trap register frame.
1056 * @param GCPhysFault The GC physical address corresponding to pvFault.
1057 * @param pvUser Pointer to the MMIO ring-3 range entry.
1058 */
1059static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1060{
1061 /* Take the IOM lock before performing any MMIO. */
1062 int rc = iomLock(pVM);
1063#ifndef IN_RING3
1064 if (rc == VERR_SEM_BUSY)
1065 return VINF_IOM_HC_MMIO_READ_WRITE;
1066#endif
1067 AssertRC(rc);
1068
1069 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1070 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1071 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1072
1073 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1074 Assert(pRange);
1075 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1076 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1077 * lock for most of the code, provided that we retake the lock while
1078 * deregistering PIOMMMIORANGE to deal with remapping/access races
1079 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1080 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1081
1082#ifdef VBOX_WITH_STATISTICS
1083 /*
1084 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1085 */
1086 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
1087 if (!pStats)
1088 {
1089# ifdef IN_RING3
1090 iomUnlock(pVM);
1091 return VERR_NO_MEMORY;
1092# else
1093 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1094 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1095 iomUnlock(pVM);
1096 return VINF_IOM_HC_MMIO_READ_WRITE;
1097# endif
1098 }
1099#endif
1100
1101#ifndef IN_RING3
1102 /*
1103 * Should we defer the request right away? This isn't usually the case, so
1104 * do the simple test first and the try deal with uErrorCode being N/A.
1105 */
1106 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1107 || !pRange->CTX_SUFF(pfnReadCallback))
1108 && ( uErrorCode == UINT32_MAX
1109 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1110 : uErrorCode & X86_TRAP_PF_RW
1111 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1112 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1113 )
1114 )
1115 )
1116 {
1117 if (uErrorCode & X86_TRAP_PF_RW)
1118 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1119 else
1120 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1121
1122 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1123 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1124 iomUnlock(pVM);
1125 return VINF_IOM_HC_MMIO_READ_WRITE;
1126 }
1127#endif /* !IN_RING3 */
1128
1129 /*
1130 * Disassemble the instruction and interpret it.
1131 */
1132 PVMCPU pVCpu = VMMGetCpu(pVM);
1133 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1134 unsigned cbOp;
1135 rc = EMInterpretDisasOne(pVM, pVCpu, pCtxCore, pDis, &cbOp);
1136 AssertRC(rc);
1137 if (RT_FAILURE(rc))
1138 {
1139 iomUnlock(pVM);
1140 return rc;
1141 }
1142 switch (pDis->pCurInstr->opcode)
1143 {
1144 case OP_MOV:
1145 case OP_MOVZX:
1146 case OP_MOVSX:
1147 {
1148 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1149 AssertMsg(uErrorCode == UINT32_MAX || DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->param1.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags), pDis->param2.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param2.flags), uErrorCode));
1150 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1151 ? uErrorCode & X86_TRAP_PF_RW
1152 : DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags))
1153 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1154 else
1155 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1156 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1157 break;
1158 }
1159
1160
1161#ifdef IOM_WITH_MOVS_SUPPORT
1162 case OP_MOVSB:
1163 case OP_MOVSWD:
1164 {
1165 if (uErrorCode == UINT32_MAX)
1166 return VINF_IOM_HC_MMIO_READ_WRITE;
1167 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1168 PSTAMPROFILE pStat = NULL;
1169 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1170 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1171 break;
1172 }
1173#endif
1174
1175 case OP_STOSB:
1176 case OP_STOSWD:
1177 Assert(uErrorCode & X86_TRAP_PF_RW);
1178 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1179 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1180 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1181 break;
1182
1183 case OP_LODSB:
1184 case OP_LODSWD:
1185 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1186 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1187 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1188 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1189 break;
1190
1191 case OP_CMP:
1192 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1193 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1194 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1195 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1196 break;
1197
1198 case OP_AND:
1199 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1200 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1201 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1202 break;
1203
1204 case OP_OR:
1205 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1206 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1207 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1208 break;
1209
1210 case OP_XOR:
1211 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1212 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1213 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1214 break;
1215
1216 case OP_TEST:
1217 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1218 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1219 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1220 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1221 break;
1222
1223 case OP_BT:
1224 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1225 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1226 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1227 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1228 break;
1229
1230 case OP_XCHG:
1231 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1232 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1233 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1234 break;
1235
1236
1237 /*
1238 * The instruction isn't supported. Hand it on to ring-3.
1239 */
1240 default:
1241 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1242 rc = VINF_IOM_HC_MMIO_READ_WRITE;
1243 break;
1244 }
1245
1246 /*
1247 * On success advance EIP.
1248 */
1249 if (rc == VINF_SUCCESS)
1250 pCtxCore->rip += cbOp;
1251 else
1252 {
1253 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1254#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1255 switch (rc)
1256 {
1257 case VINF_IOM_HC_MMIO_READ:
1258 case VINF_IOM_HC_MMIO_READ_WRITE:
1259 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1260 break;
1261 case VINF_IOM_HC_MMIO_WRITE:
1262 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1263 break;
1264 }
1265#endif
1266 }
1267
1268 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1269 iomUnlock(pVM);
1270 return rc;
1271}
1272
1273/**
1274 * \#PF Handler callback for MMIO ranges.
1275 *
1276 * @returns VBox status code (appropriate for GC return).
1277 * @param pVM VM Handle.
1278 * @param uErrorCode CPU Error code.
1279 * @param pCtxCore Trap register frame.
1280 * @param pvFault The fault address (cr2).
1281 * @param GCPhysFault The GC physical address corresponding to pvFault.
1282 * @param pvUser Pointer to the MMIO ring-3 range entry.
1283 */
1284VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1285{
1286 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1287 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1288 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1289 return VBOXSTRICTRC_VAL(rcStrict);
1290}
1291
1292/**
1293 * Physical access handler for MMIO ranges.
1294 *
1295 * @returns VBox status code (appropriate for GC return).
1296 * @param pVM VM Handle.
1297 * @param uErrorCode CPU Error code.
1298 * @param pCtxCore Trap register frame.
1299 * @param GCPhysFault The GC physical address.
1300 */
1301VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1302{
1303 int rc2 = iomLock(pVM);
1304#ifndef IN_RING3
1305 if (rc2 == VERR_SEM_BUSY)
1306 return VINF_IOM_HC_MMIO_READ_WRITE;
1307#endif
1308 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1309 iomUnlock(pVM);
1310 return VBOXSTRICTRC_VAL(rcStrict);
1311}
1312
1313#ifdef IN_RING3
1314/**
1315 * \#PF Handler callback for MMIO ranges.
1316 *
1317 * @returns VINF_SUCCESS if the handler have carried out the operation.
1318 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1319 * @param pVM VM Handle.
1320 * @param GCPhys The physical address the guest is writing to.
1321 * @param pvPhys The HC mapping of that address.
1322 * @param pvBuf What the guest is reading/writing.
1323 * @param cbBuf How much it's reading/writing.
1324 * @param enmAccessType The access type.
1325 * @param pvUser Pointer to the MMIO range entry.
1326 */
1327DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1328{
1329 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1330 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1331
1332 /* Take the IOM lock before performing any MMIO. */
1333 int rc = iomLock(pVM);
1334 AssertRC(rc);
1335
1336 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1337
1338 Assert(pRange);
1339 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1340 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1341 * lock for most of the code, provided that we retake the lock while
1342 * deregistering PIOMMMIORANGE to deal with remapping/access races
1343 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1344 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1345
1346 if (enmAccessType == PGMACCESSTYPE_READ)
1347 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1348 else
1349 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1350
1351 AssertRC(rc);
1352 iomUnlock(pVM);
1353 return rc;
1354}
1355#endif /* IN_RING3 */
1356
1357/**
1358 * Reads a MMIO register.
1359 *
1360 * @returns VBox status code.
1361 *
1362 * @param pVM VM handle.
1363 * @param GCPhys The physical address to read.
1364 * @param pu32Value Where to store the value read.
1365 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1366 */
1367VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1368{
1369 /* Take the IOM lock before performing any MMIO. */
1370 int rc = iomLock(pVM);
1371#ifndef IN_RING3
1372 if (rc == VERR_SEM_BUSY)
1373 return VINF_IOM_HC_MMIO_WRITE;
1374#endif
1375 AssertRC(rc);
1376
1377 /*
1378 * Lookup the current context range node and statistics.
1379 */
1380 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1381 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1382 if (!pRange)
1383 {
1384 iomUnlock(pVM);
1385 return VERR_INTERNAL_ERROR;
1386 }
1387 /** @todo implement per-device locks for MMIO access. */
1388 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1389#ifdef VBOX_WITH_STATISTICS
1390 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1391 if (!pStats)
1392 {
1393 iomUnlock(pVM);
1394# ifdef IN_RING3
1395 return VERR_NO_MEMORY;
1396# else
1397 return VINF_IOM_HC_MMIO_READ;
1398# endif
1399 }
1400 STAM_COUNTER_INC(&pStats->Accesses);
1401#endif /* VBOX_WITH_STATISTICS */
1402
1403 if (pRange->CTX_SUFF(pfnReadCallback))
1404 {
1405 /*
1406 * Perform the read and deal with the result.
1407 */
1408 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1409 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1410 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1411 switch (rc)
1412 {
1413 case VINF_SUCCESS:
1414 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1415 iomUnlock(pVM);
1416 return rc;
1417#ifndef IN_RING3
1418 case VINF_IOM_HC_MMIO_READ:
1419 case VINF_IOM_HC_MMIO_READ_WRITE:
1420 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1421#endif
1422 default:
1423 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1424 iomUnlock(pVM);
1425 return rc;
1426
1427 case VINF_IOM_MMIO_UNUSED_00:
1428 switch (cbValue)
1429 {
1430 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1431 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1432 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1433 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1434 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1435 }
1436 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1437 iomUnlock(pVM);
1438 return VINF_SUCCESS;
1439
1440 case VINF_IOM_MMIO_UNUSED_FF:
1441 switch (cbValue)
1442 {
1443 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1444 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1445 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1446 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1447 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1448 }
1449 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1450 iomUnlock(pVM);
1451 return VINF_SUCCESS;
1452 }
1453 }
1454#ifndef IN_RING3
1455 if (pRange->pfnReadCallbackR3)
1456 {
1457 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1458 iomUnlock(pVM);
1459 return VINF_IOM_HC_MMIO_READ;
1460 }
1461#endif
1462
1463 /*
1464 * Lookup the ring-3 range.
1465 */
1466 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1467 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1468 /* Unassigned memory; this is actually not supposed to happen. */
1469 switch (cbValue)
1470 {
1471 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1472 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1473 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1474 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1475 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1476 }
1477 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1478 iomUnlock(pVM);
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Writes to a MMIO register.
1485 *
1486 * @returns VBox status code.
1487 *
1488 * @param pVM VM handle.
1489 * @param GCPhys The physical address to write to.
1490 * @param u32Value The value to write.
1491 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1492 */
1493VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1494{
1495 /* Take the IOM lock before performing any MMIO. */
1496 int rc = iomLock(pVM);
1497#ifndef IN_RING3
1498 if (rc == VERR_SEM_BUSY)
1499 return VINF_IOM_HC_MMIO_WRITE;
1500#endif
1501 AssertRC(rc);
1502
1503 /*
1504 * Lookup the current context range node.
1505 */
1506 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1507 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1508 if (!pRange)
1509 {
1510 iomUnlock(pVM);
1511 return VERR_INTERNAL_ERROR;
1512 }
1513 /** @todo implement per-device locks for MMIO access. */
1514 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1515#ifdef VBOX_WITH_STATISTICS
1516 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1517 if (!pStats)
1518 {
1519 iomUnlock(pVM);
1520# ifdef IN_RING3
1521 return VERR_NO_MEMORY;
1522# else
1523 return VINF_IOM_HC_MMIO_WRITE;
1524# endif
1525 }
1526 STAM_COUNTER_INC(&pStats->Accesses);
1527#endif /* VBOX_WITH_STATISTICS */
1528
1529 /*
1530 * Perform the write if there's a write handler. R0/GC may have
1531 * to defer it to ring-3.
1532 */
1533 if (pRange->CTX_SUFF(pfnWriteCallback))
1534 {
1535 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1536 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue);
1537 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1538#ifndef IN_RING3
1539 if ( rc == VINF_IOM_HC_MMIO_WRITE
1540 || rc == VINF_IOM_HC_MMIO_READ_WRITE)
1541 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1542#endif
1543 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1544 iomUnlock(pVM);
1545 return rc;
1546 }
1547#ifndef IN_RING3
1548 if (pRange->pfnWriteCallbackR3)
1549 {
1550 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1551 iomUnlock(pVM);
1552 return VINF_IOM_HC_MMIO_WRITE;
1553 }
1554#endif
1555
1556 /*
1557 * No write handler, nothing to do.
1558 */
1559 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1560 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1561 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1562 iomUnlock(pVM);
1563 return VINF_SUCCESS;
1564}
1565
1566/**
1567 * [REP*] INSB/INSW/INSD
1568 * ES:EDI,DX[,ECX]
1569 *
1570 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1571 *
1572 * @returns Strict VBox status code. Informational status codes other than the one documented
1573 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1574 * @retval VINF_SUCCESS Success.
1575 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1576 * status code must be passed on to EM.
1577 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1578 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1579 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1580 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1581 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1582 *
1583 * @param pVM The virtual machine.
1584 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1585 * @param uPort IO Port
1586 * @param uPrefix IO instruction prefix
1587 * @param cbTransfer Size of transfer unit
1588 */
1589VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1590{
1591 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1592
1593 /*
1594 * We do not support REPNE or decrementing destination
1595 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1596 */
1597 if ( (uPrefix & PREFIX_REPNE)
1598 || pRegFrame->eflags.Bits.u1DF)
1599 return VINF_EM_RAW_EMULATE_INSTR;
1600
1601 PVMCPU pVCpu = VMMGetCpu(pVM);
1602
1603 /*
1604 * Get bytes/words/dwords count to transfer.
1605 */
1606 RTGCUINTREG cTransfers = 1;
1607 if (uPrefix & PREFIX_REP)
1608 {
1609#ifndef IN_RC
1610 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1611 && pRegFrame->rcx >= _4G)
1612 return VINF_EM_RAW_EMULATE_INSTR;
1613#endif
1614 cTransfers = pRegFrame->ecx;
1615
1616 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1617 cTransfers &= 0xffff;
1618
1619 if (!cTransfers)
1620 return VINF_SUCCESS;
1621 }
1622
1623 /* Convert destination address es:edi. */
1624 RTGCPTR GCPtrDst;
1625 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1626 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1627 &GCPtrDst);
1628 if (RT_FAILURE(rc2))
1629 {
1630 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
1631 return VINF_EM_RAW_EMULATE_INSTR;
1632 }
1633
1634 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1635 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1636
1637 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1638 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1639 if (rc2 != VINF_SUCCESS)
1640 {
1641 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
1642 return VINF_EM_RAW_EMULATE_INSTR;
1643 }
1644
1645 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1646 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1647 if (cTransfers > 1)
1648 {
1649 /* If the device supports string transfers, ask it to do as
1650 * much as it wants. The rest is done with single-word transfers. */
1651 const RTGCUINTREG cTransfersOrg = cTransfers;
1652 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1653 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1654 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1655 }
1656
1657#ifdef IN_RC
1658 MMGCRamRegisterTrapHandler(pVM);
1659#endif
1660 while (cTransfers && rcStrict == VINF_SUCCESS)
1661 {
1662 uint32_t u32Value;
1663 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1664 if (!IOM_SUCCESS(rcStrict))
1665 break;
1666 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1667 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1668 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1669 pRegFrame->rdi += cbTransfer;
1670 cTransfers--;
1671 }
1672#ifdef IN_RC
1673 MMGCRamDeregisterTrapHandler(pVM);
1674#endif
1675
1676 /* Update ecx on exit. */
1677 if (uPrefix & PREFIX_REP)
1678 pRegFrame->ecx = cTransfers;
1679
1680 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1681 return rcStrict;
1682}
1683
1684
1685/**
1686 * [REP*] INSB/INSW/INSD
1687 * ES:EDI,DX[,ECX]
1688 *
1689 * @returns Strict VBox status code. Informational status codes other than the one documented
1690 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1691 * @retval VINF_SUCCESS Success.
1692 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1693 * status code must be passed on to EM.
1694 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1695 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1696 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1697 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1698 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1699 *
1700 * @param pVM The virtual machine.
1701 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1702 * @param pCpu Disassembler CPU state.
1703 */
1704VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1705{
1706 /*
1707 * Get port number directly from the register (no need to bother the
1708 * disassembler). And get the I/O register size from the opcode / prefix.
1709 */
1710 RTIOPORT Port = pRegFrame->edx & 0xffff;
1711 unsigned cb = 0;
1712 if (pCpu->pCurInstr->opcode == OP_INSB)
1713 cb = 1;
1714 else
1715 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1716
1717 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1718 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1719 {
1720 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1721 return rcStrict;
1722 }
1723
1724 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1725}
1726
1727
1728/**
1729 * [REP*] OUTSB/OUTSW/OUTSD
1730 * DS:ESI,DX[,ECX]
1731 *
1732 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1733 *
1734 * @returns Strict VBox status code. Informational status codes other than the one documented
1735 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1736 * @retval VINF_SUCCESS Success.
1737 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1738 * status code must be passed on to EM.
1739 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1740 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1741 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1742 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1743 *
1744 * @param pVM The virtual machine.
1745 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1746 * @param uPort IO Port
1747 * @param uPrefix IO instruction prefix
1748 * @param cbTransfer Size of transfer unit
1749 */
1750VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1751{
1752 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1753
1754 /*
1755 * We do not support segment prefixes, REPNE or
1756 * decrementing source pointer.
1757 */
1758 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1759 || pRegFrame->eflags.Bits.u1DF)
1760 return VINF_EM_RAW_EMULATE_INSTR;
1761
1762 PVMCPU pVCpu = VMMGetCpu(pVM);
1763
1764 /*
1765 * Get bytes/words/dwords count to transfer.
1766 */
1767 RTGCUINTREG cTransfers = 1;
1768 if (uPrefix & PREFIX_REP)
1769 {
1770#ifndef IN_RC
1771 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1772 && pRegFrame->rcx >= _4G)
1773 return VINF_EM_RAW_EMULATE_INSTR;
1774#endif
1775 cTransfers = pRegFrame->ecx;
1776 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1777 cTransfers &= 0xffff;
1778
1779 if (!cTransfers)
1780 return VINF_SUCCESS;
1781 }
1782
1783 /* Convert source address ds:esi. */
1784 RTGCPTR GCPtrSrc;
1785 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1786 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1787 &GCPtrSrc);
1788 if (RT_FAILURE(rc2))
1789 {
1790 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
1791 return VINF_EM_RAW_EMULATE_INSTR;
1792 }
1793
1794 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1795 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1796 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1797 (cpl == 3) ? X86_PTE_US : 0);
1798 if (rc2 != VINF_SUCCESS)
1799 {
1800 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
1801 return VINF_EM_RAW_EMULATE_INSTR;
1802 }
1803
1804 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1805 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1806 if (cTransfers > 1)
1807 {
1808 /*
1809 * If the device supports string transfers, ask it to do as
1810 * much as it wants. The rest is done with single-word transfers.
1811 */
1812 const RTGCUINTREG cTransfersOrg = cTransfers;
1813 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1814 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1815 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1816 }
1817
1818#ifdef IN_RC
1819 MMGCRamRegisterTrapHandler(pVM);
1820#endif
1821
1822 while (cTransfers && rcStrict == VINF_SUCCESS)
1823 {
1824 uint32_t u32Value = 0;
1825 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
1826 if (rcStrict != VINF_SUCCESS)
1827 break;
1828 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1829 if (!IOM_SUCCESS(rcStrict))
1830 break;
1831 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1832 pRegFrame->rsi += cbTransfer;
1833 cTransfers--;
1834 }
1835
1836#ifdef IN_RC
1837 MMGCRamDeregisterTrapHandler(pVM);
1838#endif
1839
1840 /* Update ecx on exit. */
1841 if (uPrefix & PREFIX_REP)
1842 pRegFrame->ecx = cTransfers;
1843
1844 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1845 return rcStrict;
1846}
1847
1848
1849/**
1850 * [REP*] OUTSB/OUTSW/OUTSD
1851 * DS:ESI,DX[,ECX]
1852 *
1853 * @returns Strict VBox status code. Informational status codes other than the one documented
1854 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1855 * @retval VINF_SUCCESS Success.
1856 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1857 * status code must be passed on to EM.
1858 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1859 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1860 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1861 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1862 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1863 *
1864 * @param pVM The virtual machine.
1865 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1866 * @param pCpu Disassembler CPU state.
1867 */
1868VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1869{
1870 /*
1871 * Get port number from the first parameter.
1872 * And get the I/O register size from the opcode / prefix.
1873 */
1874 uint64_t Port = 0;
1875 unsigned cb = 0;
1876 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1877 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1878 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1879 cb = 1;
1880 else
1881 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1882
1883 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1884 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1885 {
1886 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1887 return rcStrict;
1888 }
1889
1890 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1891}
1892
1893
1894#ifndef IN_RC
1895/**
1896 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1897 *
1898 * (This is a special optimization used by the VGA device.)
1899 *
1900 * @returns VBox status code.
1901 *
1902 * @param pVM The virtual machine.
1903 * @param GCPhys The address of the MMIO page to be changed.
1904 * @param GCPhysRemapped The address of the MMIO2 page.
1905 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1906 * for the time being.
1907 */
1908VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1909{
1910 /* Currently only called from the VGA device during MMIO. */
1911 Assert(IOMIsLockOwner(pVM));
1912 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1913
1914 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1915
1916 PVMCPU pVCpu = VMMGetCpu(pVM);
1917
1918 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1919 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1920 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1921 && !HWACCMIsNestedPagingActive(pVM)))
1922 return VINF_SUCCESS; /* ignore */
1923
1924 /*
1925 * Lookup the context range node the page belongs to.
1926 */
1927 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1928 AssertMsgReturn(pRange,
1929 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1930
1931 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1932 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1933
1934 /*
1935 * Do the aliasing; page align the addresses since PGM is picky.
1936 */
1937 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1938 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1939
1940 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1941 AssertRCReturn(rc, rc);
1942
1943 /*
1944 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1945 * can simply prefetch it.
1946 *
1947 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1948 */
1949#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1950# ifdef VBOX_STRICT
1951 uint64_t fFlags;
1952 RTHCPHYS HCPhys;
1953 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1954 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1955# endif
1956#endif
1957 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1958 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1959 return VINF_SUCCESS;
1960}
1961
1962/**
1963 * Mapping a HC page in place of an MMIO page for direct access.
1964 *
1965 * (This is a special optimization used by the APIC in the VT-x case.)
1966 *
1967 * @returns VBox status code.
1968 *
1969 * @param pVM The virtual machine.
1970 * @param GCPhys The address of the MMIO page to be changed.
1971 * @param HCPhys The address of the host physical page.
1972 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1973 * for the time being.
1974 */
1975VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1976{
1977 /* Currently only called from VT-x code during a page fault. */
1978 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1979
1980 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1981 Assert(HWACCMIsEnabled(pVM));
1982
1983 PVMCPU pVCpu = VMMGetCpu(pVM);
1984
1985 /*
1986 * Lookup the context range node the page belongs to.
1987 */
1988#ifdef VBOX_STRICT
1989 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1990 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(&pVM->iom.s, GCPhys);
1991 AssertMsgReturn(pRange,
1992 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1993 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1994 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1995#endif
1996
1997 /*
1998 * Do the aliasing; page align the addresses since PGM is picky.
1999 */
2000 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2001 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2002
2003 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2004 AssertRCReturn(rc, rc);
2005
2006 /*
2007 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2008 * can simply prefetch it.
2009 *
2010 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2011 */
2012 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2013 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2014 return VINF_SUCCESS;
2015}
2016
2017/**
2018 * Reset a previously modified MMIO region; restore the access flags.
2019 *
2020 * @returns VBox status code.
2021 *
2022 * @param pVM The virtual machine.
2023 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2024 */
2025VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2026{
2027 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2028
2029 PVMCPU pVCpu = VMMGetCpu(pVM);
2030
2031 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2032 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2033 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2034 && !HWACCMIsNestedPagingActive(pVM)))
2035 return VINF_SUCCESS; /* ignore */
2036
2037 /*
2038 * Lookup the context range node the page belongs to.
2039 */
2040#ifdef VBOX_STRICT
2041 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2042 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(&pVM->iom.s, GCPhys);
2043 AssertMsgReturn(pRange,
2044 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2045 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2046 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2047#endif
2048
2049 /*
2050 * Call PGM to do the job work.
2051 *
2052 * After the call, all the pages should be non-present... unless there is
2053 * a page pool flush pending (unlikely).
2054 */
2055 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2056 AssertRC(rc);
2057
2058#ifdef VBOX_STRICT
2059 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2060 {
2061 uint32_t cb = pRange->cb;
2062 GCPhys = pRange->GCPhys;
2063 while (cb)
2064 {
2065 uint64_t fFlags;
2066 RTHCPHYS HCPhys;
2067 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2068 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2069 cb -= PAGE_SIZE;
2070 GCPhys += PAGE_SIZE;
2071 }
2072 }
2073#endif
2074 return rc;
2075}
2076#endif /* !IN_RC */
2077
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette