VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 29450

Last change on this file since 29450 was 29436, checked in by vboxsync, 15 years ago

IOM: Profile all callbacks, don't bother count the as we can derive that from the profiling. Adjusted the layout so that it easier to navigate in the GUI, adding a total count as a toplevel branch.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 71.3 KB
Line 
1/* $Id: IOMAllMMIO.cpp 29436 2010-05-12 20:57:57Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/iom.h>
24#include <VBox/cpum.h>
25#include <VBox/pgm.h>
26#include <VBox/selm.h>
27#include <VBox/mm.h>
28#include <VBox/em.h>
29#include <VBox/pgm.h>
30#include <VBox/trpm.h>
31#include "IOMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/vmm.h>
34#include <VBox/hwaccm.h>
35
36#include <VBox/dis.h>
37#include <VBox/disopcode.h>
38#include <VBox/pdmdev.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <iprt/assert.h>
42#include <VBox/log.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45
46
47/*******************************************************************************
48* Global Variables *
49*******************************************************************************/
50
51/**
52 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
53 */
54static const unsigned g_aSize2Shift[] =
55{
56 ~0, /* 0 - invalid */
57 0, /* *1 == 2^0 */
58 1, /* *2 == 2^1 */
59 ~0, /* 3 - invalid */
60 2, /* *4 == 2^2 */
61 ~0, /* 5 - invalid */
62 ~0, /* 6 - invalid */
63 ~0, /* 7 - invalid */
64 3 /* *8 == 2^3 */
65};
66
67/**
68 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
69 */
70#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
71
72
73/**
74 * Wrapper which does the write and updates range statistics when such are enabled.
75 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
76 */
77DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
78{
79#ifdef VBOX_WITH_STATISTICS
80 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
81 Assert(pStats);
82#endif
83
84 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
85 int rc;
86 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
87 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
88 else
89 rc = VINF_SUCCESS;
90 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
91 STAM_COUNTER_INC(&pStats->Accesses);
92 return rc;
93}
94
95
96/**
97 * Wrapper which does the read and updates range statistics when such are enabled.
98 */
99DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
100{
101#ifdef VBOX_WITH_STATISTICS
102 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
103 Assert(pStats);
104#endif
105
106 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
107 int rc;
108 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
109 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
110 else
111 rc = VINF_IOM_MMIO_UNUSED_FF;
112 if (rc != VINF_SUCCESS)
113 {
114 switch (rc)
115 {
116 case VINF_IOM_MMIO_UNUSED_FF:
117 switch (cbValue)
118 {
119 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
120 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
121 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
122 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
123 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
124 }
125 rc = VINF_SUCCESS;
126 break;
127
128 case VINF_IOM_MMIO_UNUSED_00:
129 switch (cbValue)
130 {
131 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
132 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
133 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
134 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
135 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
136 }
137 rc = VINF_SUCCESS;
138 break;
139 }
140 }
141 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
142 STAM_COUNTER_INC(&pStats->Accesses);
143 return rc;
144}
145
146
147/**
148 * Internal - statistics only.
149 */
150DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
151{
152#ifdef VBOX_WITH_STATISTICS
153 switch (cb)
154 {
155 case 1:
156 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
157 break;
158 case 2:
159 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
160 break;
161 case 4:
162 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
163 break;
164 case 8:
165 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
166 break;
167 default:
168 /* No way. */
169 AssertMsgFailed(("Invalid data length %d\n", cb));
170 break;
171 }
172#else
173 NOREF(pVM); NOREF(cb);
174#endif
175}
176
177
178/**
179 * MOV reg, mem (read)
180 * MOVZX reg, mem (read)
181 * MOVSX reg, mem (read)
182 *
183 * @returns VBox status code.
184 *
185 * @param pVM The virtual machine.
186 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
187 * @param pCpu Disassembler CPU state.
188 * @param pRange Pointer MMIO range.
189 * @param GCPhysFault The GC physical address corresponding to pvFault.
190 */
191static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
192{
193 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
194
195 /*
196 * Get the data size from parameter 2,
197 * and call the handler function to get the data.
198 */
199 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
200 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
201
202 uint64_t u64Data = 0;
203 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
204 if (rc == VINF_SUCCESS)
205 {
206 /*
207 * Do sign extension for MOVSX.
208 */
209 /** @todo checkup MOVSX implementation! */
210 if (pCpu->pCurInstr->opcode == OP_MOVSX)
211 {
212 if (cb == 1)
213 {
214 /* DWORD <- BYTE */
215 int64_t iData = (int8_t)u64Data;
216 u64Data = (uint64_t)iData;
217 }
218 else
219 {
220 /* DWORD <- WORD */
221 int64_t iData = (int16_t)u64Data;
222 u64Data = (uint64_t)iData;
223 }
224 }
225
226 /*
227 * Store the result to register (parameter 1).
228 */
229 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
230 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
231 }
232
233 if (rc == VINF_SUCCESS)
234 iomMMIOStatLength(pVM, cb);
235 return rc;
236}
237
238
239/**
240 * MOV mem, reg|imm (write)
241 *
242 * @returns VBox status code.
243 *
244 * @param pVM The virtual machine.
245 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
246 * @param pCpu Disassembler CPU state.
247 * @param pRange Pointer MMIO range.
248 * @param GCPhysFault The GC physical address corresponding to pvFault.
249 */
250static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
251{
252 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
253
254 /*
255 * Get data to write from second parameter,
256 * and call the callback to write it.
257 */
258 unsigned cb = 0;
259 uint64_t u64Data = 0;
260 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
261 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
262
263 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
264 if (rc == VINF_SUCCESS)
265 iomMMIOStatLength(pVM, cb);
266 return rc;
267}
268
269
270/** Wrapper for reading virtual memory. */
271DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
272{
273 /* Note: This will fail in R0 or RC if it hits an access handler. That
274 isn't a problem though since the operation can be restarted in REM. */
275#ifdef IN_RC
276 return MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
277#else
278 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
279#endif
280}
281
282
283/** Wrapper for writing virtual memory. */
284DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
285{
286 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
287 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
288 * as well since we're not behind the pgm lock and handler may change between calls.
289 * MMGCRamWriteNoTrapHandler may also trap if the page isn't shadowed, or was kicked
290 * out from both the shadow pt (SMP or our changes) and TLB.
291 *
292 * Currently MMGCRamWriteNoTrapHandler may also fail when it hits a write access handler.
293 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr OTOH may mess up the state
294 * of some shadowed structure in R0. */
295#ifdef IN_RC
296 NOREF(pCtxCore);
297 return MMGCRamWriteNoTrapHandler((void *)(uintptr_t)GCPtrDst, pvSrc, cb);
298#elif IN_RING0
299 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
300#else
301 NOREF(pCtxCore);
302 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
303#endif
304}
305
306
307#ifdef IOM_WITH_MOVS_SUPPORT
308/**
309 * [REP] MOVSB
310 * [REP] MOVSW
311 * [REP] MOVSD
312 *
313 * Restricted implementation.
314 *
315 *
316 * @returns VBox status code.
317 *
318 * @param pVM The virtual machine.
319 * @param uErrorCode CPU Error code.
320 * @param pRegFrame Trap register frame.
321 * @param GCPhysFault The GC physical address corresponding to pvFault.
322 * @param pCpu Disassembler CPU state.
323 * @param pRange Pointer MMIO range.
324 * @param ppStat Which sub-sample to attribute this call to.
325 */
326static int iomInterpretMOVS(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
327{
328 /*
329 * We do not support segment prefixes or REPNE.
330 */
331 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
332 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
333
334 PVMCPU pVCpu = VMMGetCpu(pVM);
335
336 /*
337 * Get bytes/words/dwords/qword count to copy.
338 */
339 uint32_t cTransfers = 1;
340 if (pCpu->prefix & PREFIX_REP)
341 {
342#ifndef IN_RC
343 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
344 && pRegFrame->rcx >= _4G)
345 return VINF_EM_RAW_EMULATE_INSTR;
346#endif
347
348 cTransfers = pRegFrame->ecx;
349 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
350 cTransfers &= 0xffff;
351
352 if (!cTransfers)
353 return VINF_SUCCESS;
354 }
355
356 /* Get the current privilege level. */
357 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
358
359 /*
360 * Get data size.
361 */
362 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
363 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
364 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
365
366#ifdef VBOX_WITH_STATISTICS
367 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
368 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
369#endif
370
371/** @todo re-evaluate on page boundraries. */
372
373 RTGCPHYS Phys = GCPhysFault;
374 int rc;
375 if (uErrorCode & X86_TRAP_PF_RW)
376 {
377 /*
378 * Write operation: [Mem] -> [MMIO]
379 * ds:esi (Virt Src) -> es:edi (Phys Dst)
380 */
381 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
382
383 /* Check callback. */
384 if (!pRange->CTX_SUFF(pfnWriteCallback))
385 return VINF_IOM_HC_MMIO_WRITE;
386
387 /* Convert source address ds:esi. */
388 RTGCUINTPTR pu8Virt;
389 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
390 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
391 (PRTGCPTR)&pu8Virt);
392 if (RT_SUCCESS(rc))
393 {
394
395 /* Access verification first; we currently can't recover properly from traps inside this instruction */
396 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
397 if (rc != VINF_SUCCESS)
398 {
399 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
400 return VINF_EM_RAW_EMULATE_INSTR;
401 }
402
403#ifdef IN_RC
404 MMGCRamRegisterTrapHandler(pVM);
405#endif
406
407 /* copy loop. */
408 while (cTransfers)
409 {
410 uint32_t u32Data = 0;
411 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
412 if (rc != VINF_SUCCESS)
413 break;
414 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
415 if (rc != VINF_SUCCESS)
416 break;
417
418 pu8Virt += offIncrement;
419 Phys += offIncrement;
420 pRegFrame->rsi += offIncrement;
421 pRegFrame->rdi += offIncrement;
422 cTransfers--;
423 }
424#ifdef IN_RC
425 MMGCRamDeregisterTrapHandler(pVM);
426#endif
427 /* Update ecx. */
428 if (pCpu->prefix & PREFIX_REP)
429 pRegFrame->ecx = cTransfers;
430 }
431 else
432 rc = VINF_IOM_HC_MMIO_READ_WRITE;
433 }
434 else
435 {
436 /*
437 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
438 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
439 */
440 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
441
442 /* Check callback. */
443 if (!pRange->CTX_SUFF(pfnReadCallback))
444 return VINF_IOM_HC_MMIO_READ;
445
446 /* Convert destination address. */
447 RTGCUINTPTR pu8Virt;
448 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
449 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
450 (RTGCPTR *)&pu8Virt);
451 if (RT_FAILURE(rc))
452 return VINF_IOM_HC_MMIO_READ;
453
454 /* Check if destination address is MMIO. */
455 PIOMMMIORANGE pMMIODst;
456 RTGCPHYS PhysDst;
457 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
458 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
459 if ( RT_SUCCESS(rc)
460 && (pMMIODst = iomMMIOGetRange(&pVM->iom.s, PhysDst)))
461 {
462 /** @todo implement per-device locks for MMIO access. */
463 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
464
465 /*
466 * Extra: [MMIO] -> [MMIO]
467 */
468 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
469 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
470 return VINF_IOM_HC_MMIO_READ_WRITE;
471
472 /* copy loop. */
473 while (cTransfers)
474 {
475 uint32_t u32Data;
476 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
477 if (rc != VINF_SUCCESS)
478 break;
479 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
480 if (rc != VINF_SUCCESS)
481 break;
482
483 Phys += offIncrement;
484 PhysDst += offIncrement;
485 pRegFrame->rsi += offIncrement;
486 pRegFrame->rdi += offIncrement;
487 cTransfers--;
488 }
489 }
490 else
491 {
492 /*
493 * Normal: [MMIO] -> [Mem]
494 */
495 /* Access verification first; we currently can't recover properly from traps inside this instruction */
496 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
497 if (rc != VINF_SUCCESS)
498 {
499 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
500 return VINF_EM_RAW_EMULATE_INSTR;
501 }
502
503 /* copy loop. */
504#ifdef IN_RC
505 MMGCRamRegisterTrapHandler(pVM);
506#endif
507 while (cTransfers)
508 {
509 uint32_t u32Data;
510 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
511 if (rc != VINF_SUCCESS)
512 break;
513 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
514 if (rc != VINF_SUCCESS)
515 {
516 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
517 break;
518 }
519
520 pu8Virt += offIncrement;
521 Phys += offIncrement;
522 pRegFrame->rsi += offIncrement;
523 pRegFrame->rdi += offIncrement;
524 cTransfers--;
525 }
526#ifdef IN_RC
527 MMGCRamDeregisterTrapHandler(pVM);
528#endif
529 }
530
531 /* Update ecx on exit. */
532 if (pCpu->prefix & PREFIX_REP)
533 pRegFrame->ecx = cTransfers;
534 }
535
536 /* work statistics. */
537 if (rc == VINF_SUCCESS)
538 iomMMIOStatLength(pVM, cb);
539 NOREF(ppStat);
540 return rc;
541}
542#endif /* IOM_WITH_MOVS_SUPPORT */
543
544
545/**
546 * [REP] STOSB
547 * [REP] STOSW
548 * [REP] STOSD
549 *
550 * Restricted implementation.
551 *
552 *
553 * @returns VBox status code.
554 *
555 * @param pVM The virtual machine.
556 * @param pRegFrame Trap register frame.
557 * @param GCPhysFault The GC physical address corresponding to pvFault.
558 * @param pCpu Disassembler CPU state.
559 * @param pRange Pointer MMIO range.
560 */
561static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
562{
563 /*
564 * We do not support segment prefixes or REPNE..
565 */
566 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
567 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
568
569 /*
570 * Get bytes/words/dwords count to copy.
571 */
572 uint32_t cTransfers = 1;
573 if (pCpu->prefix & PREFIX_REP)
574 {
575#ifndef IN_RC
576 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM), pRegFrame)
577 && pRegFrame->rcx >= _4G)
578 return VINF_EM_RAW_EMULATE_INSTR;
579#endif
580
581 cTransfers = pRegFrame->ecx;
582 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
583 cTransfers &= 0xffff;
584
585 if (!cTransfers)
586 return VINF_SUCCESS;
587 }
588
589/** @todo r=bird: bounds checks! */
590
591 /*
592 * Get data size.
593 */
594 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
595 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
596 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
597
598#ifdef VBOX_WITH_STATISTICS
599 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
600 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
601#endif
602
603
604 RTGCPHYS Phys = GCPhysFault;
605 uint32_t u32Data = pRegFrame->eax;
606 int rc;
607 if (pRange->CTX_SUFF(pfnFillCallback))
608 {
609 /*
610 * Use the fill callback.
611 */
612 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
613 if (offIncrement > 0)
614 {
615 /* addr++ variant. */
616 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
617 if (rc == VINF_SUCCESS)
618 {
619 /* Update registers. */
620 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
621 if (pCpu->prefix & PREFIX_REP)
622 pRegFrame->ecx = 0;
623 }
624 }
625 else
626 {
627 /* addr-- variant. */
628 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), (Phys - (cTransfers - 1)) << SIZE_2_SHIFT(cb), u32Data, cb, cTransfers);
629 if (rc == VINF_SUCCESS)
630 {
631 /* Update registers. */
632 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
633 if (pCpu->prefix & PREFIX_REP)
634 pRegFrame->ecx = 0;
635 }
636 }
637 }
638 else
639 {
640 /*
641 * Use the write callback.
642 */
643 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
644
645 /* fill loop. */
646 do
647 {
648 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
649 if (rc != VINF_SUCCESS)
650 break;
651
652 Phys += offIncrement;
653 pRegFrame->rdi += offIncrement;
654 cTransfers--;
655 } while (cTransfers);
656
657 /* Update ecx on exit. */
658 if (pCpu->prefix & PREFIX_REP)
659 pRegFrame->ecx = cTransfers;
660 }
661
662 /*
663 * Work statistics and return.
664 */
665 if (rc == VINF_SUCCESS)
666 iomMMIOStatLength(pVM, cb);
667 return rc;
668}
669
670
671/**
672 * [REP] LODSB
673 * [REP] LODSW
674 * [REP] LODSD
675 *
676 * Restricted implementation.
677 *
678 *
679 * @returns VBox status code.
680 *
681 * @param pVM The virtual machine.
682 * @param pRegFrame Trap register frame.
683 * @param GCPhysFault The GC physical address corresponding to pvFault.
684 * @param pCpu Disassembler CPU state.
685 * @param pRange Pointer MMIO range.
686 */
687static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
688{
689 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
690
691 /*
692 * We do not support segment prefixes or REP*.
693 */
694 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
695 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
696
697 /*
698 * Get data size.
699 */
700 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
701 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
702 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
703
704 /*
705 * Perform read.
706 */
707 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
708 if (rc == VINF_SUCCESS)
709 pRegFrame->rsi += offIncrement;
710
711 /*
712 * Work statistics and return.
713 */
714 if (rc == VINF_SUCCESS)
715 iomMMIOStatLength(pVM, cb);
716 return rc;
717}
718
719
720/**
721 * CMP [MMIO], reg|imm
722 * CMP reg|imm, [MMIO]
723 *
724 * Restricted implementation.
725 *
726 *
727 * @returns VBox status code.
728 *
729 * @param pVM The virtual machine.
730 * @param pRegFrame Trap register frame.
731 * @param GCPhysFault The GC physical address corresponding to pvFault.
732 * @param pCpu Disassembler CPU state.
733 * @param pRange Pointer MMIO range.
734 */
735static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
736{
737 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
738
739 /*
740 * Get the operands.
741 */
742 unsigned cb = 0;
743 uint64_t uData1 = 0;
744 uint64_t uData2 = 0;
745 int rc;
746 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
747 /* cmp reg, [MMIO]. */
748 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
749 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
750 /* cmp [MMIO], reg|imm. */
751 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
752 else
753 {
754 AssertMsgFailed(("Disassember CMP problem..\n"));
755 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
756 }
757
758 if (rc == VINF_SUCCESS)
759 {
760 /* Emulate CMP and update guest flags. */
761 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
762 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
763 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
764 iomMMIOStatLength(pVM, cb);
765 }
766
767 return rc;
768}
769
770
771/**
772 * AND [MMIO], reg|imm
773 * AND reg, [MMIO]
774 * OR [MMIO], reg|imm
775 * OR reg, [MMIO]
776 *
777 * Restricted implementation.
778 *
779 *
780 * @returns VBox status code.
781 *
782 * @param pVM The virtual machine.
783 * @param pRegFrame Trap register frame.
784 * @param GCPhysFault The GC physical address corresponding to pvFault.
785 * @param pCpu Disassembler CPU state.
786 * @param pRange Pointer MMIO range.
787 * @param pfnEmulate Instruction emulation function.
788 */
789static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
790{
791 unsigned cb = 0;
792 uint64_t uData1 = 0;
793 uint64_t uData2 = 0;
794 bool fAndWrite;
795 int rc;
796
797#ifdef LOG_ENABLED
798 const char *pszInstr;
799
800 if (pCpu->pCurInstr->opcode == OP_XOR)
801 pszInstr = "Xor";
802 else if (pCpu->pCurInstr->opcode == OP_OR)
803 pszInstr = "Or";
804 else if (pCpu->pCurInstr->opcode == OP_AND)
805 pszInstr = "And";
806 else
807 pszInstr = "OrXorAnd??";
808#endif
809
810 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
811 {
812 /* and reg, [MMIO]. */
813 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
814 fAndWrite = false;
815 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
816 }
817 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
818 {
819 /* and [MMIO], reg|imm. */
820 fAndWrite = true;
821 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
822 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
823 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
824 else
825 rc = VINF_IOM_HC_MMIO_READ_WRITE;
826 }
827 else
828 {
829 AssertMsgFailed(("Disassember AND problem..\n"));
830 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
831 }
832
833 if (rc == VINF_SUCCESS)
834 {
835 /* Emulate AND and update guest flags. */
836 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
837
838 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
839
840 if (fAndWrite)
841 /* Store result to MMIO. */
842 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
843 else
844 {
845 /* Store result to register. */
846 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
847 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
848 }
849 if (rc == VINF_SUCCESS)
850 {
851 /* Update guest's eflags and finish. */
852 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
853 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
854 iomMMIOStatLength(pVM, cb);
855 }
856 }
857
858 return rc;
859}
860
861
862/**
863 * TEST [MMIO], reg|imm
864 * TEST reg, [MMIO]
865 *
866 * Restricted implementation.
867 *
868 *
869 * @returns VBox status code.
870 *
871 * @param pVM The virtual machine.
872 * @param pRegFrame Trap register frame.
873 * @param GCPhysFault The GC physical address corresponding to pvFault.
874 * @param pCpu Disassembler CPU state.
875 * @param pRange Pointer MMIO range.
876 */
877static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
878{
879 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
880
881 unsigned cb = 0;
882 uint64_t uData1 = 0;
883 uint64_t uData2 = 0;
884 int rc;
885
886 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
887 {
888 /* and test, [MMIO]. */
889 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
890 }
891 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
892 {
893 /* test [MMIO], reg|imm. */
894 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
895 }
896 else
897 {
898 AssertMsgFailed(("Disassember TEST problem..\n"));
899 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
900 }
901
902 if (rc == VINF_SUCCESS)
903 {
904 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
905 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
906 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
907 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
908 iomMMIOStatLength(pVM, cb);
909 }
910
911 return rc;
912}
913
914
915/**
916 * BT [MMIO], reg|imm
917 *
918 * Restricted implementation.
919 *
920 *
921 * @returns VBox status code.
922 *
923 * @param pVM The virtual machine.
924 * @param pRegFrame Trap register frame.
925 * @param GCPhysFault The GC physical address corresponding to pvFault.
926 * @param pCpu Disassembler CPU state.
927 * @param pRange Pointer MMIO range.
928 */
929static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
930{
931 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
932
933 uint64_t uBit = 0;
934 uint64_t uData = 0;
935 unsigned cbIgnored;
936
937 if (!iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cbIgnored))
938 {
939 AssertMsgFailed(("Disassember BT problem..\n"));
940 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
941 }
942 /* The size of the memory operand only matters here. */
943 unsigned cbData = DISGetParamSize(pCpu, &pCpu->param1);
944
945 /* bt [MMIO], reg|imm. */
946 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
947 if (rc == VINF_SUCCESS)
948 {
949 /* Find the bit inside the faulting address */
950 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
951 iomMMIOStatLength(pVM, cbData);
952 }
953
954 return rc;
955}
956
957/**
958 * XCHG [MMIO], reg
959 * XCHG reg, [MMIO]
960 *
961 * Restricted implementation.
962 *
963 *
964 * @returns VBox status code.
965 *
966 * @param pVM The virtual machine.
967 * @param pRegFrame Trap register frame.
968 * @param GCPhysFault The GC physical address corresponding to pvFault.
969 * @param pCpu Disassembler CPU state.
970 * @param pRange Pointer MMIO range.
971 */
972static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
973{
974 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
975 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
976 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
977 return VINF_IOM_HC_MMIO_READ_WRITE;
978
979 int rc;
980 unsigned cb = 0;
981 uint64_t uData1 = 0;
982 uint64_t uData2 = 0;
983 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
984 {
985 /* xchg reg, [MMIO]. */
986 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
987 if (rc == VINF_SUCCESS)
988 {
989 /* Store result to MMIO. */
990 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
991
992 if (rc == VINF_SUCCESS)
993 {
994 /* Store result to register. */
995 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
996 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
997 }
998 else
999 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1000 }
1001 else
1002 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1003 }
1004 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1005 {
1006 /* xchg [MMIO], reg. */
1007 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1008 if (rc == VINF_SUCCESS)
1009 {
1010 /* Store result to MMIO. */
1011 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1012 if (rc == VINF_SUCCESS)
1013 {
1014 /* Store result to register. */
1015 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1016 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1017 }
1018 else
1019 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1020 }
1021 else
1022 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1023 }
1024 else
1025 {
1026 AssertMsgFailed(("Disassember XCHG problem..\n"));
1027 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1028 }
1029 return rc;
1030}
1031
1032
1033/**
1034 * \#PF Handler callback for MMIO ranges.
1035 *
1036 * @returns VBox status code (appropriate for GC return).
1037 * @param pVM VM Handle.
1038 * @param uErrorCode CPU Error code.
1039 * @param pCtxCore Trap register frame.
1040 * @param GCPhysFault The GC physical address corresponding to pvFault.
1041 * @param pvUser Pointer to the MMIO ring-3 range entry.
1042 */
1043int iomMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1044{
1045 /* Take the IOM lock before performing any MMIO. */
1046 int rc = iomLock(pVM);
1047#ifndef IN_RING3
1048 if (rc == VERR_SEM_BUSY)
1049 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1050#endif
1051 AssertRC(rc);
1052
1053 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1054 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1055 GCPhysFault, (uint32_t)uErrorCode, (RTGCPTR)pCtxCore->rip));
1056
1057 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1058 Assert(pRange);
1059 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1060 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1061 * lock for most of the code, provided that we retake the lock while
1062 * deregistering PIOMMMIORANGE to deal with remapping/access races
1063 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1064 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1065
1066#ifdef VBOX_WITH_STATISTICS
1067 /*
1068 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1069 */
1070 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
1071 if (!pStats)
1072 {
1073# ifdef IN_RING3
1074 iomUnlock(pVM);
1075 return VERR_NO_MEMORY;
1076# else
1077 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1078 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1079 iomUnlock(pVM);
1080 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1081# endif
1082 }
1083#endif
1084
1085#ifndef IN_RING3
1086 /*
1087 * Should we defer the request right away?
1088 */
1089 if (uErrorCode & X86_TRAP_PF_RW
1090 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1091 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1092 {
1093 if (uErrorCode & X86_TRAP_PF_RW)
1094 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1095 else
1096 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1097
1098 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1099 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1100 iomUnlock(pVM);
1101 return (uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ);
1102 }
1103#endif /* !IN_RING3 */
1104
1105 /*
1106 * Disassemble the instruction and interpret it.
1107 */
1108 PVMCPU pVCpu = VMMGetCpu(pVM);
1109 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1110 unsigned cbOp;
1111 rc = EMInterpretDisasOne(pVM, pVCpu, pCtxCore, pDis, &cbOp);
1112 AssertRC(rc);
1113 if (RT_FAILURE(rc))
1114 {
1115 iomUnlock(pVM);
1116 return rc;
1117 }
1118 switch (pDis->pCurInstr->opcode)
1119 {
1120 case OP_MOV:
1121 case OP_MOVZX:
1122 case OP_MOVSX:
1123 {
1124 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1125 if (uErrorCode & X86_TRAP_PF_RW)
1126 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1127 else
1128 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1129 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1130 break;
1131 }
1132
1133
1134#ifdef IOM_WITH_MOVS_SUPPORT
1135 case OP_MOVSB:
1136 case OP_MOVSWD:
1137 {
1138 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1139 PSTAMPROFILE pStat = NULL;
1140 rc = iomInterpretMOVS(pVM, uErrorCode, pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1141 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1142 break;
1143 }
1144#endif
1145
1146 case OP_STOSB:
1147 case OP_STOSWD:
1148 Assert(uErrorCode & X86_TRAP_PF_RW);
1149 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1150 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1151 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1152 break;
1153
1154 case OP_LODSB:
1155 case OP_LODSWD:
1156 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1157 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1158 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1159 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1160 break;
1161
1162 case OP_CMP:
1163 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1164 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1165 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1166 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1167 break;
1168
1169 case OP_AND:
1170 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1171 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1172 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1173 break;
1174
1175 case OP_OR:
1176 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1177 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1178 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1179 break;
1180
1181 case OP_XOR:
1182 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1183 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1184 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1185 break;
1186
1187 case OP_TEST:
1188 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1189 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1190 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1191 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1192 break;
1193
1194 case OP_BT:
1195 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1196 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1197 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1198 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1199 break;
1200
1201 case OP_XCHG:
1202 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1203 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1204 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1205 break;
1206
1207
1208 /*
1209 * The instruction isn't supported. Hand it on to ring-3.
1210 */
1211 default:
1212 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1213 rc = (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1214 break;
1215 }
1216
1217 /*
1218 * On success advance EIP.
1219 */
1220 if (rc == VINF_SUCCESS)
1221 pCtxCore->rip += cbOp;
1222 else
1223 {
1224 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1225#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1226 switch (rc)
1227 {
1228 case VINF_IOM_HC_MMIO_READ:
1229 case VINF_IOM_HC_MMIO_READ_WRITE:
1230 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1231 break;
1232 case VINF_IOM_HC_MMIO_WRITE:
1233 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1234 break;
1235 }
1236#endif
1237 }
1238
1239 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1240 iomUnlock(pVM);
1241 return rc;
1242}
1243
1244/**
1245 * \#PF Handler callback for MMIO ranges.
1246 *
1247 * @returns VBox status code (appropriate for GC return).
1248 * @param pVM VM Handle.
1249 * @param uErrorCode CPU Error code.
1250 * @param pCtxCore Trap register frame.
1251 * @param pvFault The fault address (cr2).
1252 * @param GCPhysFault The GC physical address corresponding to pvFault.
1253 * @param pvUser Pointer to the MMIO ring-3 range entry.
1254 */
1255VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1256{
1257 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1258 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1259 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, pvUser);
1260 return VBOXSTRICTRC_VAL(rcStrict);
1261}
1262
1263/**
1264 * Physical access handler for MMIO ranges.
1265 *
1266 * @returns VBox status code (appropriate for GC return).
1267 * @param pVM VM Handle.
1268 * @param uErrorCode CPU Error code.
1269 * @param pCtxCore Trap register frame.
1270 * @param GCPhysFault The GC physical address.
1271 */
1272VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1273{
1274 int rc2 = iomLock(pVM);
1275#ifndef IN_RING3
1276 if (rc2 == VERR_SEM_BUSY)
1277 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1278#endif
1279 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1280 iomUnlock(pVM);
1281 return VBOXSTRICTRC_VAL(rcStrict);
1282}
1283
1284#ifdef IN_RING3
1285/**
1286 * \#PF Handler callback for MMIO ranges.
1287 *
1288 * @returns VINF_SUCCESS if the handler have carried out the operation.
1289 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1290 * @param pVM VM Handle.
1291 * @param GCPhys The physical address the guest is writing to.
1292 * @param pvPhys The HC mapping of that address.
1293 * @param pvBuf What the guest is reading/writing.
1294 * @param cbBuf How much it's reading/writing.
1295 * @param enmAccessType The access type.
1296 * @param pvUser Pointer to the MMIO range entry.
1297 */
1298DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1299{
1300 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1301 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1302
1303 /* Take the IOM lock before performing any MMIO. */
1304 int rc = iomLock(pVM);
1305 AssertRC(rc);
1306
1307 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1308
1309 Assert(pRange);
1310 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1311 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1312 * lock for most of the code, provided that we retake the lock while
1313 * deregistering PIOMMMIORANGE to deal with remapping/access races
1314 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1315 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1316
1317 if (enmAccessType == PGMACCESSTYPE_READ)
1318 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1319 else
1320 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1321
1322 AssertRC(rc);
1323 iomUnlock(pVM);
1324 return rc;
1325}
1326#endif /* IN_RING3 */
1327
1328/**
1329 * Reads a MMIO register.
1330 *
1331 * @returns VBox status code.
1332 *
1333 * @param pVM VM handle.
1334 * @param GCPhys The physical address to read.
1335 * @param pu32Value Where to store the value read.
1336 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1337 */
1338VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1339{
1340 /* Take the IOM lock before performing any MMIO. */
1341 int rc = iomLock(pVM);
1342#ifndef IN_RING3
1343 if (rc == VERR_SEM_BUSY)
1344 return VINF_IOM_HC_MMIO_WRITE;
1345#endif
1346 AssertRC(rc);
1347
1348 /*
1349 * Lookup the current context range node and statistics.
1350 */
1351 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1352 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1353 if (!pRange)
1354 {
1355 iomUnlock(pVM);
1356 return VERR_INTERNAL_ERROR;
1357 }
1358 /** @todo implement per-device locks for MMIO access. */
1359 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1360#ifdef VBOX_WITH_STATISTICS
1361 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1362 if (!pStats)
1363 {
1364 iomUnlock(pVM);
1365# ifdef IN_RING3
1366 return VERR_NO_MEMORY;
1367# else
1368 return VINF_IOM_HC_MMIO_READ;
1369# endif
1370 }
1371 STAM_COUNTER_INC(&pStats->Accesses);
1372#endif /* VBOX_WITH_STATISTICS */
1373
1374 if (pRange->CTX_SUFF(pfnReadCallback))
1375 {
1376 /*
1377 * Perform the read and deal with the result.
1378 */
1379 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1380 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1381 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1382 switch (rc)
1383 {
1384 case VINF_SUCCESS:
1385 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1386 iomUnlock(pVM);
1387 return rc;
1388#ifndef IN_RING3
1389 case VINF_IOM_HC_MMIO_READ:
1390 case VINF_IOM_HC_MMIO_READ_WRITE:
1391 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1392#endif
1393 default:
1394 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1395 iomUnlock(pVM);
1396 return rc;
1397
1398 case VINF_IOM_MMIO_UNUSED_00:
1399 switch (cbValue)
1400 {
1401 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1402 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1403 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1404 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1405 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1406 }
1407 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1408 iomUnlock(pVM);
1409 return VINF_SUCCESS;
1410
1411 case VINF_IOM_MMIO_UNUSED_FF:
1412 switch (cbValue)
1413 {
1414 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1415 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1416 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1417 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1418 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1419 }
1420 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1421 iomUnlock(pVM);
1422 return VINF_SUCCESS;
1423 }
1424 }
1425#ifndef IN_RING3
1426 if (pRange->pfnReadCallbackR3)
1427 {
1428 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1429 iomUnlock(pVM);
1430 return VINF_IOM_HC_MMIO_READ;
1431 }
1432#endif
1433
1434 /*
1435 * Lookup the ring-3 range.
1436 */
1437 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1438 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1439 /* Unassigned memory; this is actually not supposed to happen. */
1440 switch (cbValue)
1441 {
1442 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1443 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1444 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1445 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1446 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1447 }
1448 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1449 iomUnlock(pVM);
1450 return VINF_SUCCESS;
1451}
1452
1453
1454/**
1455 * Writes to a MMIO register.
1456 *
1457 * @returns VBox status code.
1458 *
1459 * @param pVM VM handle.
1460 * @param GCPhys The physical address to write to.
1461 * @param u32Value The value to write.
1462 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1463 */
1464VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1465{
1466 /* Take the IOM lock before performing any MMIO. */
1467 int rc = iomLock(pVM);
1468#ifndef IN_RING3
1469 if (rc == VERR_SEM_BUSY)
1470 return VINF_IOM_HC_MMIO_WRITE;
1471#endif
1472 AssertRC(rc);
1473
1474 /*
1475 * Lookup the current context range node.
1476 */
1477 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1478 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1479 if (!pRange)
1480 {
1481 iomUnlock(pVM);
1482 return VERR_INTERNAL_ERROR;
1483 }
1484 /** @todo implement per-device locks for MMIO access. */
1485 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1486#ifdef VBOX_WITH_STATISTICS
1487 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1488 if (!pStats)
1489 {
1490 iomUnlock(pVM);
1491# ifdef IN_RING3
1492 return VERR_NO_MEMORY;
1493# else
1494 return VINF_IOM_HC_MMIO_WRITE;
1495# endif
1496 }
1497 STAM_COUNTER_INC(&pStats->Accesses);
1498#endif /* VBOX_WITH_STATISTICS */
1499
1500 /*
1501 * Perform the write if there's a write handler. R0/GC may have
1502 * to defer it to ring-3.
1503 */
1504 if (pRange->CTX_SUFF(pfnWriteCallback))
1505 {
1506 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1507 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue);
1508 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1509#ifndef IN_RING3
1510 if ( rc == VINF_IOM_HC_MMIO_WRITE
1511 || rc == VINF_IOM_HC_MMIO_READ_WRITE)
1512 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1513#endif
1514 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1515 iomUnlock(pVM);
1516 return rc;
1517 }
1518#ifndef IN_RING3
1519 if (pRange->pfnWriteCallbackR3)
1520 {
1521 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1522 iomUnlock(pVM);
1523 return VINF_IOM_HC_MMIO_WRITE;
1524 }
1525#endif
1526
1527 /*
1528 * No write handler, nothing to do.
1529 */
1530 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1531 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1532 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1533 iomUnlock(pVM);
1534 return VINF_SUCCESS;
1535}
1536
1537/**
1538 * [REP*] INSB/INSW/INSD
1539 * ES:EDI,DX[,ECX]
1540 *
1541 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1542 *
1543 * @returns Strict VBox status code. Informational status codes other than the one documented
1544 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1545 * @retval VINF_SUCCESS Success.
1546 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1547 * status code must be passed on to EM.
1548 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1549 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1550 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1551 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1552 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1553 *
1554 * @param pVM The virtual machine.
1555 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1556 * @param uPort IO Port
1557 * @param uPrefix IO instruction prefix
1558 * @param cbTransfer Size of transfer unit
1559 */
1560VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1561{
1562 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1563
1564 /*
1565 * We do not support REPNE or decrementing destination
1566 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1567 */
1568 if ( (uPrefix & PREFIX_REPNE)
1569 || pRegFrame->eflags.Bits.u1DF)
1570 return VINF_EM_RAW_EMULATE_INSTR;
1571
1572 PVMCPU pVCpu = VMMGetCpu(pVM);
1573
1574 /*
1575 * Get bytes/words/dwords count to transfer.
1576 */
1577 RTGCUINTREG cTransfers = 1;
1578 if (uPrefix & PREFIX_REP)
1579 {
1580#ifndef IN_RC
1581 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1582 && pRegFrame->rcx >= _4G)
1583 return VINF_EM_RAW_EMULATE_INSTR;
1584#endif
1585 cTransfers = pRegFrame->ecx;
1586
1587 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1588 cTransfers &= 0xffff;
1589
1590 if (!cTransfers)
1591 return VINF_SUCCESS;
1592 }
1593
1594 /* Convert destination address es:edi. */
1595 RTGCPTR GCPtrDst;
1596 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1597 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1598 &GCPtrDst);
1599 if (RT_FAILURE(rc2))
1600 {
1601 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
1602 return VINF_EM_RAW_EMULATE_INSTR;
1603 }
1604
1605 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1606 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1607
1608 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1609 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1610 if (rc2 != VINF_SUCCESS)
1611 {
1612 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
1613 return VINF_EM_RAW_EMULATE_INSTR;
1614 }
1615
1616 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1617 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1618 if (cTransfers > 1)
1619 {
1620 /* If the device supports string transfers, ask it to do as
1621 * much as it wants. The rest is done with single-word transfers. */
1622 const RTGCUINTREG cTransfersOrg = cTransfers;
1623 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1624 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1625 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1626 }
1627
1628#ifdef IN_RC
1629 MMGCRamRegisterTrapHandler(pVM);
1630#endif
1631 while (cTransfers && rcStrict == VINF_SUCCESS)
1632 {
1633 uint32_t u32Value;
1634 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1635 if (!IOM_SUCCESS(rcStrict))
1636 break;
1637 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1638 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1639 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1640 pRegFrame->rdi += cbTransfer;
1641 cTransfers--;
1642 }
1643#ifdef IN_RC
1644 MMGCRamDeregisterTrapHandler(pVM);
1645#endif
1646
1647 /* Update ecx on exit. */
1648 if (uPrefix & PREFIX_REP)
1649 pRegFrame->ecx = cTransfers;
1650
1651 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1652 return rcStrict;
1653}
1654
1655
1656/**
1657 * [REP*] INSB/INSW/INSD
1658 * ES:EDI,DX[,ECX]
1659 *
1660 * @returns Strict VBox status code. Informational status codes other than the one documented
1661 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1662 * @retval VINF_SUCCESS Success.
1663 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1664 * status code must be passed on to EM.
1665 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1666 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1667 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1668 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1669 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1670 *
1671 * @param pVM The virtual machine.
1672 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1673 * @param pCpu Disassembler CPU state.
1674 */
1675VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1676{
1677 /*
1678 * Get port number directly from the register (no need to bother the
1679 * disassembler). And get the I/O register size from the opcode / prefix.
1680 */
1681 RTIOPORT Port = pRegFrame->edx & 0xffff;
1682 unsigned cb = 0;
1683 if (pCpu->pCurInstr->opcode == OP_INSB)
1684 cb = 1;
1685 else
1686 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1687
1688 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1689 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1690 {
1691 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1692 return rcStrict;
1693 }
1694
1695 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1696}
1697
1698
1699/**
1700 * [REP*] OUTSB/OUTSW/OUTSD
1701 * DS:ESI,DX[,ECX]
1702 *
1703 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1704 *
1705 * @returns Strict VBox status code. Informational status codes other than the one documented
1706 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1707 * @retval VINF_SUCCESS Success.
1708 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1709 * status code must be passed on to EM.
1710 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1711 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1712 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1713 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1714 *
1715 * @param pVM The virtual machine.
1716 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1717 * @param uPort IO Port
1718 * @param uPrefix IO instruction prefix
1719 * @param cbTransfer Size of transfer unit
1720 */
1721VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1722{
1723 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1724
1725 /*
1726 * We do not support segment prefixes, REPNE or
1727 * decrementing source pointer.
1728 */
1729 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1730 || pRegFrame->eflags.Bits.u1DF)
1731 return VINF_EM_RAW_EMULATE_INSTR;
1732
1733 PVMCPU pVCpu = VMMGetCpu(pVM);
1734
1735 /*
1736 * Get bytes/words/dwords count to transfer.
1737 */
1738 RTGCUINTREG cTransfers = 1;
1739 if (uPrefix & PREFIX_REP)
1740 {
1741#ifndef IN_RC
1742 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1743 && pRegFrame->rcx >= _4G)
1744 return VINF_EM_RAW_EMULATE_INSTR;
1745#endif
1746 cTransfers = pRegFrame->ecx;
1747 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1748 cTransfers &= 0xffff;
1749
1750 if (!cTransfers)
1751 return VINF_SUCCESS;
1752 }
1753
1754 /* Convert source address ds:esi. */
1755 RTGCPTR GCPtrSrc;
1756 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1757 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1758 &GCPtrSrc);
1759 if (RT_FAILURE(rc2))
1760 {
1761 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
1762 return VINF_EM_RAW_EMULATE_INSTR;
1763 }
1764
1765 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1766 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1767 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1768 (cpl == 3) ? X86_PTE_US : 0);
1769 if (rc2 != VINF_SUCCESS)
1770 {
1771 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
1772 return VINF_EM_RAW_EMULATE_INSTR;
1773 }
1774
1775 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1776 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1777 if (cTransfers > 1)
1778 {
1779 /*
1780 * If the device supports string transfers, ask it to do as
1781 * much as it wants. The rest is done with single-word transfers.
1782 */
1783 const RTGCUINTREG cTransfersOrg = cTransfers;
1784 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1785 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1786 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1787 }
1788
1789#ifdef IN_RC
1790 MMGCRamRegisterTrapHandler(pVM);
1791#endif
1792
1793 while (cTransfers && rcStrict == VINF_SUCCESS)
1794 {
1795 uint32_t u32Value = 0;
1796 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
1797 if (rcStrict != VINF_SUCCESS)
1798 break;
1799 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1800 if (!IOM_SUCCESS(rcStrict))
1801 break;
1802 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1803 pRegFrame->rsi += cbTransfer;
1804 cTransfers--;
1805 }
1806
1807#ifdef IN_RC
1808 MMGCRamDeregisterTrapHandler(pVM);
1809#endif
1810
1811 /* Update ecx on exit. */
1812 if (uPrefix & PREFIX_REP)
1813 pRegFrame->ecx = cTransfers;
1814
1815 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1816 return rcStrict;
1817}
1818
1819
1820/**
1821 * [REP*] OUTSB/OUTSW/OUTSD
1822 * DS:ESI,DX[,ECX]
1823 *
1824 * @returns Strict VBox status code. Informational status codes other than the one documented
1825 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1826 * @retval VINF_SUCCESS Success.
1827 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1828 * status code must be passed on to EM.
1829 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1830 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1831 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1832 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1833 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1834 *
1835 * @param pVM The virtual machine.
1836 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1837 * @param pCpu Disassembler CPU state.
1838 */
1839VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1840{
1841 /*
1842 * Get port number from the first parameter.
1843 * And get the I/O register size from the opcode / prefix.
1844 */
1845 uint64_t Port = 0;
1846 unsigned cb = 0;
1847 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1848 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1849 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1850 cb = 1;
1851 else
1852 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1853
1854 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1855 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1856 {
1857 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1858 return rcStrict;
1859 }
1860
1861 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1862}
1863
1864
1865#ifndef IN_RC
1866/**
1867 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1868 *
1869 * (This is a special optimization used by the VGA device.)
1870 *
1871 * @returns VBox status code.
1872 *
1873 * @param pVM The virtual machine.
1874 * @param GCPhys The address of the MMIO page to be changed.
1875 * @param GCPhysRemapped The address of the MMIO2 page.
1876 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1877 * for the time being.
1878 */
1879VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1880{
1881 /* Currently only called from the VGA device during MMIO. */
1882 Assert(IOMIsLockOwner(pVM));
1883 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1884
1885 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1886
1887 PVMCPU pVCpu = VMMGetCpu(pVM);
1888
1889 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1890 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1891 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1892 && !HWACCMIsNestedPagingActive(pVM)))
1893 return VINF_SUCCESS; /* ignore */
1894
1895 /*
1896 * Lookup the context range node the page belongs to.
1897 */
1898 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1899 AssertMsgReturn(pRange,
1900 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1901
1902 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1903 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1904
1905 /*
1906 * Do the aliasing; page align the addresses since PGM is picky.
1907 */
1908 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1909 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1910
1911 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1912 AssertRCReturn(rc, rc);
1913
1914 /*
1915 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1916 * can simply prefetch it.
1917 *
1918 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1919 */
1920#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1921# ifdef VBOX_STRICT
1922 uint64_t fFlags;
1923 RTHCPHYS HCPhys;
1924 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1925 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1926# endif
1927#endif
1928 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1929 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1930 return VINF_SUCCESS;
1931}
1932
1933/**
1934 * Mapping a HC page in place of an MMIO page for direct access.
1935 *
1936 * (This is a special optimization used by the APIC in the VT-x case.)
1937 *
1938 * @returns VBox status code.
1939 *
1940 * @param pVM The virtual machine.
1941 * @param GCPhys The address of the MMIO page to be changed.
1942 * @param HCPhys The address of the host physical page.
1943 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1944 * for the time being.
1945 */
1946VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1947{
1948 /* Currently only called from VT-x code during a page fault. */
1949 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1950
1951 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1952 Assert(HWACCMIsEnabled(pVM));
1953
1954 PVMCPU pVCpu = VMMGetCpu(pVM);
1955
1956 /*
1957 * Lookup the context range node the page belongs to.
1958 */
1959#ifdef VBOX_STRICT
1960 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1961 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(&pVM->iom.s, GCPhys);
1962 AssertMsgReturn(pRange,
1963 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1964 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1965 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1966#endif
1967
1968 /*
1969 * Do the aliasing; page align the addresses since PGM is picky.
1970 */
1971 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1972 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
1973
1974 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
1975 AssertRCReturn(rc, rc);
1976
1977 /*
1978 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1979 * can simply prefetch it.
1980 *
1981 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1982 */
1983 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1984 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1985 return VINF_SUCCESS;
1986}
1987
1988/**
1989 * Reset a previously modified MMIO region; restore the access flags.
1990 *
1991 * @returns VBox status code.
1992 *
1993 * @param pVM The virtual machine.
1994 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1995 */
1996VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1997{
1998 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1999
2000 PVMCPU pVCpu = VMMGetCpu(pVM);
2001
2002 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2003 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2004 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2005 && !HWACCMIsNestedPagingActive(pVM)))
2006 return VINF_SUCCESS; /* ignore */
2007
2008 /*
2009 * Lookup the context range node the page belongs to.
2010 */
2011#ifdef VBOX_STRICT
2012 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2013 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(&pVM->iom.s, GCPhys);
2014 AssertMsgReturn(pRange,
2015 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2016 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2017 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2018#endif
2019
2020 /*
2021 * Call PGM to do the job work.
2022 *
2023 * After the call, all the pages should be non-present... unless there is
2024 * a page pool flush pending (unlikely).
2025 */
2026 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2027 AssertRC(rc);
2028
2029#ifdef VBOX_STRICT
2030 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2031 {
2032 uint32_t cb = pRange->cb;
2033 GCPhys = pRange->GCPhys;
2034 while (cb)
2035 {
2036 uint64_t fFlags;
2037 RTHCPHYS HCPhys;
2038 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2039 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2040 cb -= PAGE_SIZE;
2041 GCPhys += PAGE_SIZE;
2042 }
2043 }
2044#endif
2045 return rc;
2046}
2047#endif /* !IN_RC */
2048
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette