VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 18729

Last change on this file since 18729 was 18666, checked in by vboxsync, 16 years ago

VMM: Clean out the VBOX_WITH_NEW_PHYS_CODE #ifdefs. (part 2)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 63.8 KB
Line 
1/* $Id: IOMAllMMIO.cpp 18666 2009-04-02 23:10:12Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_IOM
27#include <VBox/iom.h>
28#include <VBox/cpum.h>
29#include <VBox/pgm.h>
30#include <VBox/selm.h>
31#include <VBox/mm.h>
32#include <VBox/em.h>
33#include <VBox/pgm.h>
34#include <VBox/trpm.h>
35#include "IOMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/hwaccm.h>
38
39#include <VBox/dis.h>
40#include <VBox/disopcode.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*******************************************************************************
50* Global Variables *
51*******************************************************************************/
52
53/**
54 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
55 */
56static const unsigned g_aSize2Shift[] =
57{
58 ~0, /* 0 - invalid */
59 0, /* *1 == 2^0 */
60 1, /* *2 == 2^1 */
61 ~0, /* 3 - invalid */
62 2, /* *4 == 2^2 */
63 ~0, /* 5 - invalid */
64 ~0, /* 6 - invalid */
65 ~0, /* 7 - invalid */
66 3 /* *8 == 2^3 */
67};
68
69/**
70 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
71 */
72#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
73
74
75/**
76 * Wrapper which does the write and updates range statistics when such are enabled.
77 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
78 */
79DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
80{
81#ifdef VBOX_WITH_STATISTICS
82 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
83 Assert(pStats);
84#endif
85
86 int rc;
87 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
88 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /* @todo fix const!! */
89 else
90 rc = VINF_SUCCESS;
91 if (rc != VINF_IOM_HC_MMIO_WRITE)
92 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
93 return rc;
94}
95
96
97/**
98 * Wrapper which does the read and updates range statistics when such are enabled.
99 */
100DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
101{
102#ifdef VBOX_WITH_STATISTICS
103 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
104 Assert(pStats);
105#endif
106
107 int rc;
108 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
109 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
110 else
111 rc = VINF_IOM_MMIO_UNUSED_FF;
112 if (rc != VINF_SUCCESS)
113 {
114 switch (rc)
115 {
116 case VINF_IOM_MMIO_UNUSED_FF:
117 switch (cbValue)
118 {
119 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
120 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
121 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
122 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
123 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
124 }
125 rc = VINF_SUCCESS;
126 break;
127
128 case VINF_IOM_MMIO_UNUSED_00:
129 switch (cbValue)
130 {
131 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
132 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
133 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
134 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
135 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
136 }
137 rc = VINF_SUCCESS;
138 break;
139 }
140 if (rc != VINF_IOM_HC_MMIO_READ)
141 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
142 }
143 return rc;
144}
145
146
147/**
148 * Internal - statistics only.
149 */
150DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
151{
152#ifdef VBOX_WITH_STATISTICS
153 switch (cb)
154 {
155 case 1:
156 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
157 break;
158 case 2:
159 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
160 break;
161 case 4:
162 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
163 break;
164 case 8:
165 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
166 break;
167 default:
168 /* No way. */
169 AssertMsgFailed(("Invalid data length %d\n", cb));
170 break;
171 }
172#else
173 NOREF(pVM); NOREF(cb);
174#endif
175}
176
177
178/**
179 * MOV reg, mem (read)
180 * MOVZX reg, mem (read)
181 * MOVSX reg, mem (read)
182 *
183 * @returns VBox status code.
184 *
185 * @param pVM The virtual machine.
186 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
187 * @param pCpu Disassembler CPU state.
188 * @param pRange Pointer MMIO range.
189 * @param GCPhysFault The GC physical address corresponding to pvFault.
190 */
191static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
192{
193 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
194
195 /*
196 * Get the data size from parameter 2,
197 * and call the handler function to get the data.
198 */
199 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
200 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
201
202 uint64_t u64Data = 0;
203 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
204 if (rc == VINF_SUCCESS)
205 {
206 /*
207 * Do sign extension for MOVSX.
208 */
209 /** @todo checkup MOVSX implementation! */
210 if (pCpu->pCurInstr->opcode == OP_MOVSX)
211 {
212 if (cb == 1)
213 {
214 /* DWORD <- BYTE */
215 int64_t iData = (int8_t)u64Data;
216 u64Data = (uint64_t)iData;
217 }
218 else
219 {
220 /* DWORD <- WORD */
221 int64_t iData = (int16_t)u64Data;
222 u64Data = (uint64_t)iData;
223 }
224 }
225
226 /*
227 * Store the result to register (parameter 1).
228 */
229 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
230 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
231 }
232
233 if (rc == VINF_SUCCESS)
234 iomMMIOStatLength(pVM, cb);
235 return rc;
236}
237
238
239/**
240 * MOV mem, reg|imm (write)
241 *
242 * @returns VBox status code.
243 *
244 * @param pVM The virtual machine.
245 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
246 * @param pCpu Disassembler CPU state.
247 * @param pRange Pointer MMIO range.
248 * @param GCPhysFault The GC physical address corresponding to pvFault.
249 */
250static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
251{
252 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
253
254 /*
255 * Get data to write from second parameter,
256 * and call the callback to write it.
257 */
258 unsigned cb = 0;
259 uint64_t u64Data = 0;
260 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
261 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
262
263 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
264 if (rc == VINF_SUCCESS)
265 iomMMIOStatLength(pVM, cb);
266 return rc;
267}
268
269
270/** Wrapper for reading virtual memory. */
271DECLINLINE(int) iomRamRead(PVM pVM, void *pDest, RTGCPTR GCSrc, uint32_t cb)
272{
273 /* Note: This will fail in R0 or RC if it hits an access handler. That
274 isn't a problem though since the operation can be restarted in REM. */
275#ifdef IN_RC
276 return MMGCRamReadNoTrapHandler(pDest, (void *)GCSrc, cb);
277#else
278 return PGMPhysReadGCPtr(pVM, pDest, GCSrc, cb);
279#endif
280}
281
282
283/** Wrapper for writing virtual memory. */
284DECLINLINE(int) iomRamWrite(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
285{
286 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
287 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
288 * as well since we're not behind the pgm lock and handler may change between calls.
289 * MMGCRamWriteNoTrapHandler may also trap if the page isn't shadowed, or was kicked
290 * out from both the shadow pt (SMP or our changes) and TLB.
291 *
292 * Currently MMGCRamWriteNoTrapHandler may also fail when it hits a write access handler.
293 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr OTOH may mess up the state
294 * of some shadowed structure in R0. */
295#ifdef IN_RC
296 NOREF(pCtxCore);
297 return MMGCRamWriteNoTrapHandler((void *)GCPtrDst, pvSrc, cb);
298#elif IN_RING0
299 return PGMPhysInterpretedWriteNoHandlers(pVM, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
300#else
301 NOREF(pCtxCore);
302 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
303#endif
304}
305
306
307#ifdef IOM_WITH_MOVS_SUPPORT
308/**
309 * [REP] MOVSB
310 * [REP] MOVSW
311 * [REP] MOVSD
312 *
313 * Restricted implementation.
314 *
315 *
316 * @returns VBox status code.
317 *
318 * @param pVM The virtual machine.
319 * @param uErrorCode CPU Error code.
320 * @param pRegFrame Trap register frame.
321 * @param GCPhysFault The GC physical address corresponding to pvFault.
322 * @param pCpu Disassembler CPU state.
323 * @param pRange Pointer MMIO range.
324 * @param ppStat Which sub-sample to attribute this call to.
325 */
326static int iomInterpretMOVS(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
327{
328 /*
329 * We do not support segment prefixes or REPNE.
330 */
331 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
332 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
333
334
335 /*
336 * Get bytes/words/dwords/qword count to copy.
337 */
338 uint32_t cTransfers = 1;
339 if (pCpu->prefix & PREFIX_REP)
340 {
341#ifndef IN_RC
342 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
343 && pRegFrame->rcx >= _4G)
344 return VINF_EM_RAW_EMULATE_INSTR;
345#endif
346
347 cTransfers = pRegFrame->ecx;
348 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
349 cTransfers &= 0xffff;
350
351 if (!cTransfers)
352 return VINF_SUCCESS;
353 }
354
355 /* Get the current privilege level. */
356 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
357
358 /*
359 * Get data size.
360 */
361 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
362 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
363 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
364
365#ifdef VBOX_WITH_STATISTICS
366 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
367 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
368#endif
369
370/** @todo re-evaluate on page boundraries. */
371
372 RTGCPHYS Phys = GCPhysFault;
373 int rc;
374 if (uErrorCode & X86_TRAP_PF_RW)
375 {
376 /*
377 * Write operation: [Mem] -> [MMIO]
378 * ds:esi (Virt Src) -> es:edi (Phys Dst)
379 */
380 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
381
382 /* Check callback. */
383 if (!pRange->CTX_SUFF(pfnWriteCallback))
384 return VINF_IOM_HC_MMIO_WRITE;
385
386 /* Convert source address ds:esi. */
387 RTGCUINTPTR pu8Virt;
388 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
389 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
390 (PRTGCPTR)&pu8Virt);
391 if (RT_SUCCESS(rc))
392 {
393
394 /* Access verification first; we currently can't recover properly from traps inside this instruction */
395 rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
396 if (rc != VINF_SUCCESS)
397 {
398 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
399 return VINF_EM_RAW_EMULATE_INSTR;
400 }
401
402#ifdef IN_RC
403 MMGCRamRegisterTrapHandler(pVM);
404#endif
405
406 /* copy loop. */
407 while (cTransfers)
408 {
409 uint32_t u32Data = 0;
410 rc = iomRamRead(pVM, &u32Data, (RTGCPTR)pu8Virt, cb);
411 if (rc != VINF_SUCCESS)
412 break;
413 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
414 if (rc != VINF_SUCCESS)
415 break;
416
417 pu8Virt += offIncrement;
418 Phys += offIncrement;
419 pRegFrame->rsi += offIncrement;
420 pRegFrame->rdi += offIncrement;
421 cTransfers--;
422 }
423#ifdef IN_RC
424 MMGCRamDeregisterTrapHandler(pVM);
425#endif
426 /* Update ecx. */
427 if (pCpu->prefix & PREFIX_REP)
428 pRegFrame->ecx = cTransfers;
429 }
430 else
431 rc = VINF_IOM_HC_MMIO_READ_WRITE;
432 }
433 else
434 {
435 /*
436 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
437 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
438 */
439 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
440
441 /* Check callback. */
442 if (!pRange->CTX_SUFF(pfnReadCallback))
443 return VINF_IOM_HC_MMIO_READ;
444
445 /* Convert destination address. */
446 RTGCUINTPTR pu8Virt;
447 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
448 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
449 (RTGCPTR *)&pu8Virt);
450 if (RT_FAILURE(rc))
451 return VINF_IOM_HC_MMIO_READ;
452
453 /* Check if destination address is MMIO. */
454 PIOMMMIORANGE pMMIODst;
455 RTGCPHYS PhysDst;
456 rc = PGMGstGetPage(pVM, (RTGCPTR)pu8Virt, NULL, &PhysDst);
457 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
458 if ( RT_SUCCESS(rc)
459 && (pMMIODst = iomMMIOGetRange(&pVM->iom.s, PhysDst)))
460 {
461 /*
462 * Extra: [MMIO] -> [MMIO]
463 */
464 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
465 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
466 return VINF_IOM_HC_MMIO_READ_WRITE;
467
468 /* copy loop. */
469 while (cTransfers)
470 {
471 uint32_t u32Data;
472 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
473 if (rc != VINF_SUCCESS)
474 break;
475 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
476 if (rc != VINF_SUCCESS)
477 break;
478
479 Phys += offIncrement;
480 PhysDst += offIncrement;
481 pRegFrame->rsi += offIncrement;
482 pRegFrame->rdi += offIncrement;
483 cTransfers--;
484 }
485 }
486 else
487 {
488 /*
489 * Normal: [MMIO] -> [Mem]
490 */
491 /* Access verification first; we currently can't recover properly from traps inside this instruction */
492 rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
493 if (rc != VINF_SUCCESS)
494 {
495 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
496 return VINF_EM_RAW_EMULATE_INSTR;
497 }
498
499 /* copy loop. */
500#ifdef IN_RC
501 MMGCRamRegisterTrapHandler(pVM);
502#endif
503 while (cTransfers)
504 {
505 uint32_t u32Data;
506 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
507 if (rc != VINF_SUCCESS)
508 break;
509 rc = iomRamWrite(pVM, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
510 if (rc != VINF_SUCCESS)
511 {
512 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
513 break;
514 }
515
516 pu8Virt += offIncrement;
517 Phys += offIncrement;
518 pRegFrame->rsi += offIncrement;
519 pRegFrame->rdi += offIncrement;
520 cTransfers--;
521 }
522#ifdef IN_RC
523 MMGCRamDeregisterTrapHandler(pVM);
524#endif
525 }
526
527 /* Update ecx on exit. */
528 if (pCpu->prefix & PREFIX_REP)
529 pRegFrame->ecx = cTransfers;
530 }
531
532 /* work statistics. */
533 if (rc == VINF_SUCCESS)
534 iomMMIOStatLength(pVM, cb);
535 NOREF(ppStat);
536 return rc;
537}
538#endif /* IOM_WITH_MOVS_SUPPORT */
539
540
541/**
542 * [REP] STOSB
543 * [REP] STOSW
544 * [REP] STOSD
545 *
546 * Restricted implementation.
547 *
548 *
549 * @returns VBox status code.
550 *
551 * @param pVM The virtual machine.
552 * @param pRegFrame Trap register frame.
553 * @param GCPhysFault The GC physical address corresponding to pvFault.
554 * @param pCpu Disassembler CPU state.
555 * @param pRange Pointer MMIO range.
556 */
557static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
558{
559 /*
560 * We do not support segment prefixes or REPNE..
561 */
562 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
563 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
564
565 /*
566 * Get bytes/words/dwords count to copy.
567 */
568 uint32_t cTransfers = 1;
569 if (pCpu->prefix & PREFIX_REP)
570 {
571#ifndef IN_RC
572 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
573 && pRegFrame->rcx >= _4G)
574 return VINF_EM_RAW_EMULATE_INSTR;
575#endif
576
577 cTransfers = pRegFrame->ecx;
578 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
579 cTransfers &= 0xffff;
580
581 if (!cTransfers)
582 return VINF_SUCCESS;
583 }
584
585/** @todo r=bird: bounds checks! */
586
587 /*
588 * Get data size.
589 */
590 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
591 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
592 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
593
594#ifdef VBOX_WITH_STATISTICS
595 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
596 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
597#endif
598
599
600 RTGCPHYS Phys = GCPhysFault;
601 uint32_t u32Data = pRegFrame->eax;
602 int rc;
603 if (pRange->CTX_SUFF(pfnFillCallback))
604 {
605 /*
606 * Use the fill callback.
607 */
608 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
609 if (offIncrement > 0)
610 {
611 /* addr++ variant. */
612 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
613 if (rc == VINF_SUCCESS)
614 {
615 /* Update registers. */
616 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
617 if (pCpu->prefix & PREFIX_REP)
618 pRegFrame->ecx = 0;
619 }
620 }
621 else
622 {
623 /* addr-- variant. */
624 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), (Phys - (cTransfers - 1)) << SIZE_2_SHIFT(cb), u32Data, cb, cTransfers);
625 if (rc == VINF_SUCCESS)
626 {
627 /* Update registers. */
628 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
629 if (pCpu->prefix & PREFIX_REP)
630 pRegFrame->ecx = 0;
631 }
632 }
633 }
634 else
635 {
636 /*
637 * Use the write callback.
638 */
639 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
640
641 /* fill loop. */
642 do
643 {
644 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
645 if (rc != VINF_SUCCESS)
646 break;
647
648 Phys += offIncrement;
649 pRegFrame->rdi += offIncrement;
650 cTransfers--;
651 } while (cTransfers);
652
653 /* Update ecx on exit. */
654 if (pCpu->prefix & PREFIX_REP)
655 pRegFrame->ecx = cTransfers;
656 }
657
658 /*
659 * Work statistics and return.
660 */
661 if (rc == VINF_SUCCESS)
662 iomMMIOStatLength(pVM, cb);
663 return rc;
664}
665
666
667/**
668 * [REP] LODSB
669 * [REP] LODSW
670 * [REP] LODSD
671 *
672 * Restricted implementation.
673 *
674 *
675 * @returns VBox status code.
676 *
677 * @param pVM The virtual machine.
678 * @param pRegFrame Trap register frame.
679 * @param GCPhysFault The GC physical address corresponding to pvFault.
680 * @param pCpu Disassembler CPU state.
681 * @param pRange Pointer MMIO range.
682 */
683static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
684{
685 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
686
687 /*
688 * We do not support segment prefixes or REP*.
689 */
690 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
691 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
692
693 /*
694 * Get data size.
695 */
696 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
697 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
698 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
699
700 /*
701 * Perform read.
702 */
703 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
704 if (rc == VINF_SUCCESS)
705 pRegFrame->rsi += offIncrement;
706
707 /*
708 * Work statistics and return.
709 */
710 if (rc == VINF_SUCCESS)
711 iomMMIOStatLength(pVM, cb);
712 return rc;
713}
714
715
716/**
717 * CMP [MMIO], reg|imm
718 * CMP reg|imm, [MMIO]
719 *
720 * Restricted implementation.
721 *
722 *
723 * @returns VBox status code.
724 *
725 * @param pVM The virtual machine.
726 * @param pRegFrame Trap register frame.
727 * @param GCPhysFault The GC physical address corresponding to pvFault.
728 * @param pCpu Disassembler CPU state.
729 * @param pRange Pointer MMIO range.
730 */
731static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
732{
733 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
734
735 /*
736 * Get the operands.
737 */
738 unsigned cb = 0;
739 uint64_t uData1 = 0;
740 uint64_t uData2 = 0;
741 int rc;
742 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
743 /* cmp reg, [MMIO]. */
744 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
745 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
746 /* cmp [MMIO], reg|imm. */
747 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
748 else
749 {
750 AssertMsgFailed(("Disassember CMP problem..\n"));
751 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
752 }
753
754 if (rc == VINF_SUCCESS)
755 {
756 /* Emulate CMP and update guest flags. */
757 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
758 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
759 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
760 iomMMIOStatLength(pVM, cb);
761 }
762
763 return rc;
764}
765
766
767/**
768 * AND [MMIO], reg|imm
769 * AND reg, [MMIO]
770 * OR [MMIO], reg|imm
771 * OR reg, [MMIO]
772 *
773 * Restricted implementation.
774 *
775 *
776 * @returns VBox status code.
777 *
778 * @param pVM The virtual machine.
779 * @param pRegFrame Trap register frame.
780 * @param GCPhysFault The GC physical address corresponding to pvFault.
781 * @param pCpu Disassembler CPU state.
782 * @param pRange Pointer MMIO range.
783 * @param pfnEmulate Instruction emulation function.
784 */
785static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
786{
787 unsigned cb = 0;
788 uint64_t uData1 = 0;
789 uint64_t uData2 = 0;
790 bool fAndWrite;
791 int rc;
792
793#ifdef LOG_ENABLED
794 const char *pszInstr;
795
796 if (pCpu->pCurInstr->opcode == OP_XOR)
797 pszInstr = "Xor";
798 else if (pCpu->pCurInstr->opcode == OP_OR)
799 pszInstr = "Or";
800 else if (pCpu->pCurInstr->opcode == OP_AND)
801 pszInstr = "And";
802 else
803 pszInstr = "OrXorAnd??";
804#endif
805
806 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
807 {
808 /* and reg, [MMIO]. */
809 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
810 fAndWrite = false;
811 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
812 }
813 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
814 {
815 /* and [MMIO], reg|imm. */
816 fAndWrite = true;
817 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
818 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
819 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
820 else
821 rc = VINF_IOM_HC_MMIO_READ_WRITE;
822 }
823 else
824 {
825 AssertMsgFailed(("Disassember AND problem..\n"));
826 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
827 }
828
829 if (rc == VINF_SUCCESS)
830 {
831 /* Emulate AND and update guest flags. */
832 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
833
834 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
835
836 if (fAndWrite)
837 /* Store result to MMIO. */
838 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
839 else
840 {
841 /* Store result to register. */
842 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
843 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
844 }
845 if (rc == VINF_SUCCESS)
846 {
847 /* Update guest's eflags and finish. */
848 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
849 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
850 iomMMIOStatLength(pVM, cb);
851 }
852 }
853
854 return rc;
855}
856
857
858/**
859 * TEST [MMIO], reg|imm
860 * TEST reg, [MMIO]
861 *
862 * Restricted implementation.
863 *
864 *
865 * @returns VBox status code.
866 *
867 * @param pVM The virtual machine.
868 * @param pRegFrame Trap register frame.
869 * @param GCPhysFault The GC physical address corresponding to pvFault.
870 * @param pCpu Disassembler CPU state.
871 * @param pRange Pointer MMIO range.
872 */
873static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
874{
875 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
876
877 unsigned cb = 0;
878 uint64_t uData1 = 0;
879 uint64_t uData2 = 0;
880 int rc;
881
882 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
883 {
884 /* and test, [MMIO]. */
885 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
886 }
887 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
888 {
889 /* test [MMIO], reg|imm. */
890 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
891 }
892 else
893 {
894 AssertMsgFailed(("Disassember TEST problem..\n"));
895 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
896 }
897
898 if (rc == VINF_SUCCESS)
899 {
900 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
901 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
902 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
903 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
904 iomMMIOStatLength(pVM, cb);
905 }
906
907 return rc;
908}
909
910
911/**
912 * BT [MMIO], reg|imm
913 *
914 * Restricted implementation.
915 *
916 *
917 * @returns VBox status code.
918 *
919 * @param pVM The virtual machine.
920 * @param pRegFrame Trap register frame.
921 * @param GCPhysFault The GC physical address corresponding to pvFault.
922 * @param pCpu Disassembler CPU state.
923 * @param pRange Pointer MMIO range.
924 */
925static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
926{
927 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
928
929 uint64_t uBit = 0;
930 uint64_t uData1 = 0;
931 unsigned cb = 0;
932 int rc;
933
934 if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cb))
935 {
936 /* bt [MMIO], reg|imm. */
937 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
938 }
939 else
940 {
941 AssertMsgFailed(("Disassember BT problem..\n"));
942 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
943 }
944
945 if (rc == VINF_SUCCESS)
946 {
947 /* The size of the memory operand only matters here. */
948 cb = DISGetParamSize(pCpu, &pCpu->param1);
949
950 /* Find the bit inside the faulting address */
951 uBit &= (cb*8 - 1);
952
953 pRegFrame->eflags.Bits.u1CF = (uData1 >> uBit);
954 iomMMIOStatLength(pVM, cb);
955 }
956
957 return rc;
958}
959
960/**
961 * XCHG [MMIO], reg
962 * XCHG reg, [MMIO]
963 *
964 * Restricted implementation.
965 *
966 *
967 * @returns VBox status code.
968 *
969 * @param pVM The virtual machine.
970 * @param pRegFrame Trap register frame.
971 * @param GCPhysFault The GC physical address corresponding to pvFault.
972 * @param pCpu Disassembler CPU state.
973 * @param pRange Pointer MMIO range.
974 */
975static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
976{
977 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
978 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
979 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
980 return VINF_IOM_HC_MMIO_READ_WRITE;
981
982 int rc;
983 unsigned cb = 0;
984 uint64_t uData1 = 0;
985 uint64_t uData2 = 0;
986 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
987 {
988 /* xchg reg, [MMIO]. */
989 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
990 if (rc == VINF_SUCCESS)
991 {
992 /* Store result to MMIO. */
993 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
994
995 if (rc == VINF_SUCCESS)
996 {
997 /* Store result to register. */
998 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
999 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1000 }
1001 else
1002 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1003 }
1004 else
1005 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1006 }
1007 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1008 {
1009 /* xchg [MMIO], reg. */
1010 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1011 if (rc == VINF_SUCCESS)
1012 {
1013 /* Store result to MMIO. */
1014 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1015 if (rc == VINF_SUCCESS)
1016 {
1017 /* Store result to register. */
1018 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1019 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1020 }
1021 else
1022 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1023 }
1024 else
1025 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1026 }
1027 else
1028 {
1029 AssertMsgFailed(("Disassember XCHG problem..\n"));
1030 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1031 }
1032 return rc;
1033}
1034
1035
1036/**
1037 * \#PF Handler callback for MMIO ranges.
1038 *
1039 * @returns VBox status code (appropriate for GC return).
1040 * @param pVM VM Handle.
1041 * @param uErrorCode CPU Error code.
1042 * @param pCtxCore Trap register frame.
1043 * @param pvFault The fault address (cr2).
1044 * @param GCPhysFault The GC physical address corresponding to pvFault.
1045 * @param pvUser Pointer to the MMIO ring-3 range entry.
1046 */
1047VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1048{
1049 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1050 Log(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1051 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1052
1053 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1054 Assert(pRange);
1055 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1056
1057#ifdef VBOX_WITH_STATISTICS
1058 /*
1059 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1060 */
1061 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
1062 if (!pStats)
1063 {
1064# ifdef IN_RING3
1065 return VERR_NO_MEMORY;
1066# else
1067 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1068 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1069 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1070# endif
1071 }
1072#endif
1073
1074#ifndef IN_RING3
1075 /*
1076 * Should we defer the request right away?
1077 */
1078 if (uErrorCode & X86_TRAP_PF_RW
1079 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1080 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1081 {
1082# ifdef VBOX_WITH_STATISTICS
1083 if (uErrorCode & X86_TRAP_PF_RW)
1084 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1085 else
1086 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1087# endif
1088
1089 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1090 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1091 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1092 }
1093#endif /* !IN_RING3 */
1094
1095 /*
1096 * Disassemble the instruction and interpret it.
1097 */
1098 DISCPUSTATE Cpu;
1099 unsigned cbOp;
1100 int rc = EMInterpretDisasOne(pVM, pCtxCore, &Cpu, &cbOp);
1101 AssertRCReturn(rc, rc);
1102 switch (Cpu.pCurInstr->opcode)
1103 {
1104 case OP_MOV:
1105 case OP_MOVZX:
1106 case OP_MOVSX:
1107 {
1108 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1109 if (uErrorCode & X86_TRAP_PF_RW)
1110 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1111 else
1112 rc = iomInterpretMOVxXRead(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1113 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1114 break;
1115 }
1116
1117
1118#ifdef IOM_WITH_MOVS_SUPPORT
1119 case OP_MOVSB:
1120 case OP_MOVSWD:
1121 {
1122 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1123 PSTAMPROFILE pStat = NULL;
1124 rc = iomInterpretMOVS(pVM, uErrorCode, pCtxCore, GCPhysFault, &Cpu, pRange, &pStat);
1125 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1126 break;
1127 }
1128#endif
1129
1130 case OP_STOSB:
1131 case OP_STOSWD:
1132 Assert(uErrorCode & X86_TRAP_PF_RW);
1133 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1134 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1135 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1136 break;
1137
1138 case OP_LODSB:
1139 case OP_LODSWD:
1140 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1141 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1142 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1143 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1144 break;
1145
1146 case OP_CMP:
1147 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1148 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1149 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1150 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1151 break;
1152
1153 case OP_AND:
1154 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1155 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateAnd);
1156 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1157 break;
1158
1159 case OP_OR:
1160 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1161 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateOr);
1162 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1163 break;
1164
1165 case OP_XOR:
1166 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1167 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateXor);
1168 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1169 break;
1170
1171 case OP_TEST:
1172 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1173 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1174 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1175 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1176 break;
1177
1178 case OP_BT:
1179 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1180 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1181 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1182 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1183 break;
1184
1185 case OP_XCHG:
1186 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1187 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1188 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1189 break;
1190
1191
1192 /*
1193 * The instruction isn't supported. Hand it on to ring-3.
1194 */
1195 default:
1196 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1197 rc = (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1198 break;
1199 }
1200
1201 /*
1202 * On success advance EIP.
1203 */
1204 if (rc == VINF_SUCCESS)
1205 pCtxCore->rip += cbOp;
1206 else
1207 {
1208 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1209#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1210 switch (rc)
1211 {
1212 case VINF_IOM_HC_MMIO_READ:
1213 case VINF_IOM_HC_MMIO_READ_WRITE:
1214 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1215 break;
1216 case VINF_IOM_HC_MMIO_WRITE:
1217 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1218 break;
1219 }
1220#endif
1221 }
1222
1223 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1224 return rc;
1225}
1226
1227
1228#ifdef IN_RING3
1229/**
1230 * \#PF Handler callback for MMIO ranges.
1231 *
1232 * @returns VINF_SUCCESS if the handler have carried out the operation.
1233 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1234 * @param pVM VM Handle.
1235 * @param GCPhys The physical address the guest is writing to.
1236 * @param pvPhys The HC mapping of that address.
1237 * @param pvBuf What the guest is reading/writing.
1238 * @param cbBuf How much it's reading/writing.
1239 * @param enmAccessType The access type.
1240 * @param pvUser Pointer to the MMIO range entry.
1241 */
1242DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1243{
1244 int rc;
1245 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1246 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1247
1248 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1249
1250 Assert(pRange);
1251 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1252
1253 if (enmAccessType == PGMACCESSTYPE_READ)
1254 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1255 else
1256 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1257
1258 AssertRC(rc);
1259 return rc;
1260}
1261#endif /* IN_RING3 */
1262
1263
1264/**
1265 * Reads a MMIO register.
1266 *
1267 * @returns VBox status code.
1268 *
1269 * @param pVM VM handle.
1270 * @param GCPhys The physical address to read.
1271 * @param pu32Value Where to store the value read.
1272 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1273 */
1274VMMDECL(int) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1275{
1276 /*
1277 * Lookup the current context range node and statistics.
1278 */
1279 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1280 AssertMsgReturn(pRange,
1281 ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue),
1282 VERR_INTERNAL_ERROR);
1283#ifdef VBOX_WITH_STATISTICS
1284 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1285 if (!pStats)
1286# ifdef IN_RING3
1287 return VERR_NO_MEMORY;
1288# else
1289 return VINF_IOM_HC_MMIO_READ;
1290# endif
1291#endif /* VBOX_WITH_STATISTICS */
1292 if (pRange->CTX_SUFF(pfnReadCallback))
1293 {
1294 /*
1295 * Perform the read and deal with the result.
1296 */
1297#ifdef VBOX_WITH_STATISTICS
1298 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1299#endif
1300 int rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1301#ifdef VBOX_WITH_STATISTICS
1302 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1303 if (rc != VINF_IOM_HC_MMIO_READ)
1304 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1305#endif
1306 switch (rc)
1307 {
1308 case VINF_SUCCESS:
1309 default:
1310 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1311 return rc;
1312
1313 case VINF_IOM_MMIO_UNUSED_00:
1314 switch (cbValue)
1315 {
1316 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1317 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1318 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1319 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1320 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1321 }
1322 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1323 return VINF_SUCCESS;
1324
1325 case VINF_IOM_MMIO_UNUSED_FF:
1326 switch (cbValue)
1327 {
1328 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1329 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1330 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1331 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1332 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1333 }
1334 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1335 return VINF_SUCCESS;
1336 }
1337 }
1338#ifndef IN_RING3
1339 if (pRange->pfnReadCallbackR3)
1340 {
1341 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1342 return VINF_IOM_HC_MMIO_READ;
1343 }
1344#endif
1345
1346 /*
1347 * Lookup the ring-3 range.
1348 */
1349#ifdef VBOX_WITH_STATISTICS
1350 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1351#endif
1352 /* Unassigned memory; this is actually not supposed to happen. */
1353 switch (cbValue)
1354 {
1355 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1356 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1357 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1358 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1359 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1360 }
1361 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1362 return VINF_SUCCESS;
1363}
1364
1365
1366/**
1367 * Writes to a MMIO register.
1368 *
1369 * @returns VBox status code.
1370 *
1371 * @param pVM VM handle.
1372 * @param GCPhys The physical address to write to.
1373 * @param u32Value The value to write.
1374 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1375 */
1376VMMDECL(int) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1377{
1378 /*
1379 * Lookup the current context range node.
1380 */
1381 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1382 AssertMsgReturn(pRange,
1383 ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue),
1384 VERR_INTERNAL_ERROR);
1385#ifdef VBOX_WITH_STATISTICS
1386 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1387 if (!pStats)
1388# ifdef IN_RING3
1389 return VERR_NO_MEMORY;
1390# else
1391 return VINF_IOM_HC_MMIO_WRITE;
1392# endif
1393#endif /* VBOX_WITH_STATISTICS */
1394
1395 /*
1396 * Perform the write if there's a write handler. R0/GC may have
1397 * to defer it to ring-3.
1398 */
1399 if (pRange->CTX_SUFF(pfnWriteCallback))
1400 {
1401#ifdef VBOX_WITH_STATISTICS
1402 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1403#endif
1404 int rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue);
1405#ifdef VBOX_WITH_STATISTICS
1406 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1407 if (rc != VINF_IOM_HC_MMIO_WRITE)
1408 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1409#endif
1410 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1411 return rc;
1412 }
1413#ifndef IN_RING3
1414 if (pRange->pfnWriteCallbackR3)
1415 {
1416 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1417 return VINF_IOM_HC_MMIO_WRITE;
1418 }
1419#endif
1420
1421 /*
1422 * No write handler, nothing to do.
1423 */
1424#ifdef VBOX_WITH_STATISTICS
1425 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1426#endif
1427 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1428 return VINF_SUCCESS;
1429}
1430
1431
1432/**
1433 * [REP*] INSB/INSW/INSD
1434 * ES:EDI,DX[,ECX]
1435 *
1436 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1437 *
1438 * @returns Strict VBox status code. Informational status codes other than the one documented
1439 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1440 * @retval VINF_SUCCESS Success.
1441 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1442 * status code must be passed on to EM.
1443 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1444 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1445 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1446 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1447 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1448 *
1449 * @param pVM The virtual machine.
1450 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1451 * @param uPort IO Port
1452 * @param uPrefix IO instruction prefix
1453 * @param cbTransfer Size of transfer unit
1454 */
1455VMMDECL(int) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1456{
1457#ifdef VBOX_WITH_STATISTICS
1458 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1459#endif
1460
1461 /*
1462 * We do not support REPNE or decrementing destination
1463 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1464 */
1465 if ( (uPrefix & PREFIX_REPNE)
1466 || pRegFrame->eflags.Bits.u1DF)
1467 return VINF_EM_RAW_EMULATE_INSTR;
1468
1469 /*
1470 * Get bytes/words/dwords count to transfer.
1471 */
1472 RTGCUINTREG cTransfers = 1;
1473 if (uPrefix & PREFIX_REP)
1474 {
1475#ifndef IN_RC
1476 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
1477 && pRegFrame->rcx >= _4G)
1478 return VINF_EM_RAW_EMULATE_INSTR;
1479#endif
1480 cTransfers = pRegFrame->ecx;
1481
1482 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1483 cTransfers &= 0xffff;
1484
1485 if (!cTransfers)
1486 return VINF_SUCCESS;
1487 }
1488
1489 /* Convert destination address es:edi. */
1490 RTGCPTR GCPtrDst;
1491 int rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1492 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1493 &GCPtrDst);
1494 if (RT_FAILURE(rc))
1495 {
1496 Log(("INS destination address conversion failed -> fallback, rc=%d\n", rc));
1497 return VINF_EM_RAW_EMULATE_INSTR;
1498 }
1499
1500 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1501 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
1502
1503 rc = PGMVerifyAccess(pVM, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1504 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1505 if (rc != VINF_SUCCESS)
1506 {
1507 Log(("INS will generate a trap -> fallback, rc=%d\n", rc));
1508 return VINF_EM_RAW_EMULATE_INSTR;
1509 }
1510
1511 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1512 if (cTransfers > 1)
1513 {
1514 /* If the device supports string transfers, ask it to do as
1515 * much as it wants. The rest is done with single-word transfers. */
1516 const RTGCUINTREG cTransfersOrg = cTransfers;
1517 rc = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1518 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1519 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1520 }
1521
1522#ifdef IN_RC
1523 MMGCRamRegisterTrapHandler(pVM);
1524#endif
1525
1526 while (cTransfers && rc == VINF_SUCCESS)
1527 {
1528 uint32_t u32Value;
1529 rc = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1530 if (!IOM_SUCCESS(rc))
1531 break;
1532 int rc2 = iomRamWrite(pVM, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1533 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1534 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1535 pRegFrame->rdi += cbTransfer;
1536 cTransfers--;
1537 }
1538#ifdef IN_RC
1539 MMGCRamDeregisterTrapHandler(pVM);
1540#endif
1541
1542 /* Update ecx on exit. */
1543 if (uPrefix & PREFIX_REP)
1544 pRegFrame->ecx = cTransfers;
1545
1546 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_READ || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Rrc\n", rc));
1547 return rc;
1548}
1549
1550
1551/**
1552 * [REP*] INSB/INSW/INSD
1553 * ES:EDI,DX[,ECX]
1554 *
1555 * @returns Strict VBox status code. Informational status codes other than the one documented
1556 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1557 * @retval VINF_SUCCESS Success.
1558 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1559 * status code must be passed on to EM.
1560 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1561 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1562 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1563 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1564 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1565 *
1566 * @param pVM The virtual machine.
1567 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1568 * @param pCpu Disassembler CPU state.
1569 */
1570VMMDECL(int) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1571{
1572 /*
1573 * Get port number directly from the register (no need to bother the
1574 * disassembler). And get the I/O register size from the opcode / prefix.
1575 */
1576 RTIOPORT Port = pRegFrame->edx & 0xffff;
1577 unsigned cb = 0;
1578 if (pCpu->pCurInstr->opcode == OP_INSB)
1579 cb = 1;
1580 else
1581 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1582
1583 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1584 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1585 {
1586 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Rrc\n", rc));
1587 return rc;
1588 }
1589
1590 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1591}
1592
1593
1594/**
1595 * [REP*] OUTSB/OUTSW/OUTSD
1596 * DS:ESI,DX[,ECX]
1597 *
1598 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1599 *
1600 * @returns Strict VBox status code. Informational status codes other than the one documented
1601 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1602 * @retval VINF_SUCCESS Success.
1603 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1604 * status code must be passed on to EM.
1605 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1606 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1607 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1608 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1609 *
1610 * @param pVM The virtual machine.
1611 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1612 * @param uPort IO Port
1613 * @param uPrefix IO instruction prefix
1614 * @param cbTransfer Size of transfer unit
1615 */
1616VMMDECL(int) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1617{
1618#ifdef VBOX_WITH_STATISTICS
1619 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1620#endif
1621
1622 /*
1623 * We do not support segment prefixes, REPNE or
1624 * decrementing source pointer.
1625 */
1626 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1627 || pRegFrame->eflags.Bits.u1DF)
1628 return VINF_EM_RAW_EMULATE_INSTR;
1629
1630 /*
1631 * Get bytes/words/dwords count to transfer.
1632 */
1633 RTGCUINTREG cTransfers = 1;
1634 if (uPrefix & PREFIX_REP)
1635 {
1636#ifndef IN_RC
1637 if ( CPUMIsGuestIn64BitCode(pVM, pRegFrame)
1638 && pRegFrame->rcx >= _4G)
1639 return VINF_EM_RAW_EMULATE_INSTR;
1640#endif
1641 cTransfers = pRegFrame->ecx;
1642 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1643 cTransfers &= 0xffff;
1644
1645 if (!cTransfers)
1646 return VINF_SUCCESS;
1647 }
1648
1649 /* Convert source address ds:esi. */
1650 RTGCPTR GCPtrSrc;
1651 int rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1652 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1653 &GCPtrSrc);
1654 if (RT_FAILURE(rc))
1655 {
1656 Log(("OUTS source address conversion failed -> fallback, rc=%Rrc\n", rc));
1657 return VINF_EM_RAW_EMULATE_INSTR;
1658 }
1659
1660 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1661 uint32_t cpl = CPUMGetGuestCPL(pVM, pRegFrame);
1662 rc = PGMVerifyAccess(pVM, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1663 (cpl == 3) ? X86_PTE_US : 0);
1664 if (rc != VINF_SUCCESS)
1665 {
1666 Log(("OUTS will generate a trap -> fallback, rc=%Rrc\n", rc));
1667 return VINF_EM_RAW_EMULATE_INSTR;
1668 }
1669
1670 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1671 if (cTransfers > 1)
1672 {
1673 /*
1674 * If the device supports string transfers, ask it to do as
1675 * much as it wants. The rest is done with single-word transfers.
1676 */
1677 const RTGCUINTREG cTransfersOrg = cTransfers;
1678 rc = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1679 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1680 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1681 }
1682
1683#ifdef IN_RC
1684 MMGCRamRegisterTrapHandler(pVM);
1685#endif
1686
1687 while (cTransfers && rc == VINF_SUCCESS)
1688 {
1689 uint32_t u32Value;
1690 rc = iomRamRead(pVM, &u32Value, GCPtrSrc, cbTransfer);
1691 if (rc != VINF_SUCCESS)
1692 break;
1693 rc = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1694 if (!IOM_SUCCESS(rc))
1695 break;
1696 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1697 pRegFrame->rsi += cbTransfer;
1698 cTransfers--;
1699 }
1700
1701#ifdef IN_RC
1702 MMGCRamDeregisterTrapHandler(pVM);
1703#endif
1704
1705 /* Update ecx on exit. */
1706 if (uPrefix & PREFIX_REP)
1707 pRegFrame->ecx = cTransfers;
1708
1709 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_WRITE || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Rrc\n", rc));
1710 return rc;
1711}
1712
1713
1714/**
1715 * [REP*] OUTSB/OUTSW/OUTSD
1716 * DS:ESI,DX[,ECX]
1717 *
1718 * @returns Strict VBox status code. Informational status codes other than the one documented
1719 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1720 * @retval VINF_SUCCESS Success.
1721 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1722 * status code must be passed on to EM.
1723 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1724 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1725 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1726 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1727 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1728 *
1729 * @param pVM The virtual machine.
1730 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1731 * @param pCpu Disassembler CPU state.
1732 */
1733VMMDECL(int) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1734{
1735 /*
1736 * Get port number from the first parameter.
1737 * And get the I/O register size from the opcode / prefix.
1738 */
1739 uint64_t Port = 0;
1740 unsigned cb = 0;
1741 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1742 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1743 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1744 cb = 1;
1745 else
1746 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1747
1748 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1749 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1750 {
1751 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Rrc\n", rc));
1752 return rc;
1753 }
1754
1755 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1756}
1757
1758
1759#ifndef IN_RC
1760/**
1761 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1762 *
1763 * (This is a special optimization used by the VGA device.)
1764 *
1765 * @returns VBox status code.
1766 *
1767 * @param pVM The virtual machine.
1768 * @param GCPhys The address of the MMIO page to be changed.
1769 * @param GCPhysRemapped The address of the MMIO2 page.
1770 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1771 * for the time being.
1772 */
1773VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1774{
1775 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1776
1777 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1778
1779 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1780 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1781 || ( CPUMIsGuestInPagedProtectedMode(pVM)
1782 && !HWACCMIsNestedPagingActive(pVM)))
1783 return VINF_SUCCESS; /* ignore */
1784
1785 /*
1786 * Lookup the context range node the page belongs to.
1787 */
1788 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1789 AssertMsgReturn(pRange,
1790 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
1791 VERR_IOM_MMIO_RANGE_NOT_FOUND);
1792 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1793 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1794
1795 /*
1796 * Do the aliasing; page align the addresses since PGM is picky.
1797 */
1798 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1799 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1800
1801 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1802 AssertRCReturn(rc, rc);
1803
1804 /*
1805 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1806 * can simply prefetch it.
1807 *
1808 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1809 */
1810#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1811# ifdef VBOX_STRICT
1812 uint64_t fFlags;
1813 RTHCPHYS HCPhys;
1814 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1815 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1816# endif
1817#endif
1818 rc = PGMPrefetchPage(pVM, (RTGCPTR)GCPhys);
1819 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1820 return VINF_SUCCESS;
1821}
1822
1823
1824/**
1825 * Reset a previously modified MMIO region; restore the access flags.
1826 *
1827 * @returns VBox status code.
1828 *
1829 * @param pVM The virtual machine.
1830 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1831 */
1832VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1833{
1834 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1835
1836 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1837 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1838 || ( CPUMIsGuestInPagedProtectedMode(pVM)
1839 && !HWACCMIsNestedPagingActive(pVM)))
1840 return VINF_SUCCESS; /* ignore */
1841
1842 /*
1843 * Lookup the context range node the page belongs to.
1844 */
1845 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1846 AssertMsgReturn(pRange,
1847 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
1848 VERR_IOM_MMIO_RANGE_NOT_FOUND);
1849
1850 /*
1851 * Call PGM to do the job work.
1852 *
1853 * After the call, all the pages should be non-present... unless there is
1854 * a page pool flush pending (unlikely).
1855 */
1856 int rc = PGMHandlerPhysicalReset(pVM, pRange->GCPhys);
1857 AssertRC(rc);
1858
1859#ifdef VBOX_STRICT
1860 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
1861 {
1862 uint32_t cb = pRange->cb;
1863 GCPhys = pRange->GCPhys;
1864 while (cb)
1865 {
1866 uint64_t fFlags;
1867 RTHCPHYS HCPhys;
1868 rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1869 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1870 cb -= PAGE_SIZE;
1871 GCPhys += PAGE_SIZE;
1872 }
1873 }
1874#endif
1875 return rc;
1876}
1877#endif /* !IN_RC */
1878
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette