VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 29287

Last change on this file since 29287 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 70.9 KB
Line 
1/* $Id: IOMAllMMIO.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/iom.h>
24#include <VBox/cpum.h>
25#include <VBox/pgm.h>
26#include <VBox/selm.h>
27#include <VBox/mm.h>
28#include <VBox/em.h>
29#include <VBox/pgm.h>
30#include <VBox/trpm.h>
31#include "IOMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/vmm.h>
34#include <VBox/hwaccm.h>
35
36#include <VBox/dis.h>
37#include <VBox/disopcode.h>
38#include <VBox/pdmdev.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <iprt/assert.h>
42#include <VBox/log.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45
46
47/*******************************************************************************
48* Global Variables *
49*******************************************************************************/
50
51/**
52 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
53 */
54static const unsigned g_aSize2Shift[] =
55{
56 ~0, /* 0 - invalid */
57 0, /* *1 == 2^0 */
58 1, /* *2 == 2^1 */
59 ~0, /* 3 - invalid */
60 2, /* *4 == 2^2 */
61 ~0, /* 5 - invalid */
62 ~0, /* 6 - invalid */
63 ~0, /* 7 - invalid */
64 3 /* *8 == 2^3 */
65};
66
67/**
68 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
69 */
70#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
71
72
73/**
74 * Wrapper which does the write and updates range statistics when such are enabled.
75 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
76 */
77DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
78{
79#ifdef VBOX_WITH_STATISTICS
80 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
81 Assert(pStats);
82#endif
83
84 int rc;
85 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
86 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /* @todo fix const!! */
87 else
88 rc = VINF_SUCCESS;
89 if (rc != VINF_IOM_HC_MMIO_WRITE)
90 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
91 return rc;
92}
93
94
95/**
96 * Wrapper which does the read and updates range statistics when such are enabled.
97 */
98DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
99{
100#ifdef VBOX_WITH_STATISTICS
101 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
102 Assert(pStats);
103#endif
104
105 int rc;
106 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
107 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
108 else
109 rc = VINF_IOM_MMIO_UNUSED_FF;
110 if (rc != VINF_SUCCESS)
111 {
112 switch (rc)
113 {
114 case VINF_IOM_MMIO_UNUSED_FF:
115 switch (cbValue)
116 {
117 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
118 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
119 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
120 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
121 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
122 }
123 rc = VINF_SUCCESS;
124 break;
125
126 case VINF_IOM_MMIO_UNUSED_00:
127 switch (cbValue)
128 {
129 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
130 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
131 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
132 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
133 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
134 }
135 rc = VINF_SUCCESS;
136 break;
137 }
138 }
139 if (rc != VINF_IOM_HC_MMIO_READ)
140 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
141 return rc;
142}
143
144
145/**
146 * Internal - statistics only.
147 */
148DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
149{
150#ifdef VBOX_WITH_STATISTICS
151 switch (cb)
152 {
153 case 1:
154 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
155 break;
156 case 2:
157 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
158 break;
159 case 4:
160 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
161 break;
162 case 8:
163 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
164 break;
165 default:
166 /* No way. */
167 AssertMsgFailed(("Invalid data length %d\n", cb));
168 break;
169 }
170#else
171 NOREF(pVM); NOREF(cb);
172#endif
173}
174
175
176/**
177 * MOV reg, mem (read)
178 * MOVZX reg, mem (read)
179 * MOVSX reg, mem (read)
180 *
181 * @returns VBox status code.
182 *
183 * @param pVM The virtual machine.
184 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
185 * @param pCpu Disassembler CPU state.
186 * @param pRange Pointer MMIO range.
187 * @param GCPhysFault The GC physical address corresponding to pvFault.
188 */
189static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
190{
191 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
192
193 /*
194 * Get the data size from parameter 2,
195 * and call the handler function to get the data.
196 */
197 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
198 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
199
200 uint64_t u64Data = 0;
201 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
202 if (rc == VINF_SUCCESS)
203 {
204 /*
205 * Do sign extension for MOVSX.
206 */
207 /** @todo checkup MOVSX implementation! */
208 if (pCpu->pCurInstr->opcode == OP_MOVSX)
209 {
210 if (cb == 1)
211 {
212 /* DWORD <- BYTE */
213 int64_t iData = (int8_t)u64Data;
214 u64Data = (uint64_t)iData;
215 }
216 else
217 {
218 /* DWORD <- WORD */
219 int64_t iData = (int16_t)u64Data;
220 u64Data = (uint64_t)iData;
221 }
222 }
223
224 /*
225 * Store the result to register (parameter 1).
226 */
227 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
228 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
229 }
230
231 if (rc == VINF_SUCCESS)
232 iomMMIOStatLength(pVM, cb);
233 return rc;
234}
235
236
237/**
238 * MOV mem, reg|imm (write)
239 *
240 * @returns VBox status code.
241 *
242 * @param pVM The virtual machine.
243 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
244 * @param pCpu Disassembler CPU state.
245 * @param pRange Pointer MMIO range.
246 * @param GCPhysFault The GC physical address corresponding to pvFault.
247 */
248static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
249{
250 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
251
252 /*
253 * Get data to write from second parameter,
254 * and call the callback to write it.
255 */
256 unsigned cb = 0;
257 uint64_t u64Data = 0;
258 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
259 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
260
261 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
262 if (rc == VINF_SUCCESS)
263 iomMMIOStatLength(pVM, cb);
264 return rc;
265}
266
267
268/** Wrapper for reading virtual memory. */
269DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
270{
271 /* Note: This will fail in R0 or RC if it hits an access handler. That
272 isn't a problem though since the operation can be restarted in REM. */
273#ifdef IN_RC
274 return MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
275#else
276 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
277#endif
278}
279
280
281/** Wrapper for writing virtual memory. */
282DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
283{
284 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
285 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
286 * as well since we're not behind the pgm lock and handler may change between calls.
287 * MMGCRamWriteNoTrapHandler may also trap if the page isn't shadowed, or was kicked
288 * out from both the shadow pt (SMP or our changes) and TLB.
289 *
290 * Currently MMGCRamWriteNoTrapHandler may also fail when it hits a write access handler.
291 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr OTOH may mess up the state
292 * of some shadowed structure in R0. */
293#ifdef IN_RC
294 NOREF(pCtxCore);
295 return MMGCRamWriteNoTrapHandler((void *)(uintptr_t)GCPtrDst, pvSrc, cb);
296#elif IN_RING0
297 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
298#else
299 NOREF(pCtxCore);
300 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
301#endif
302}
303
304
305#ifdef IOM_WITH_MOVS_SUPPORT
306/**
307 * [REP] MOVSB
308 * [REP] MOVSW
309 * [REP] MOVSD
310 *
311 * Restricted implementation.
312 *
313 *
314 * @returns VBox status code.
315 *
316 * @param pVM The virtual machine.
317 * @param uErrorCode CPU Error code.
318 * @param pRegFrame Trap register frame.
319 * @param GCPhysFault The GC physical address corresponding to pvFault.
320 * @param pCpu Disassembler CPU state.
321 * @param pRange Pointer MMIO range.
322 * @param ppStat Which sub-sample to attribute this call to.
323 */
324static int iomInterpretMOVS(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
325{
326 /*
327 * We do not support segment prefixes or REPNE.
328 */
329 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
330 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
331
332 PVMCPU pVCpu = VMMGetCpu(pVM);
333
334 /*
335 * Get bytes/words/dwords/qword count to copy.
336 */
337 uint32_t cTransfers = 1;
338 if (pCpu->prefix & PREFIX_REP)
339 {
340#ifndef IN_RC
341 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
342 && pRegFrame->rcx >= _4G)
343 return VINF_EM_RAW_EMULATE_INSTR;
344#endif
345
346 cTransfers = pRegFrame->ecx;
347 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
348 cTransfers &= 0xffff;
349
350 if (!cTransfers)
351 return VINF_SUCCESS;
352 }
353
354 /* Get the current privilege level. */
355 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
356
357 /*
358 * Get data size.
359 */
360 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
361 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
362 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
363
364#ifdef VBOX_WITH_STATISTICS
365 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
366 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
367#endif
368
369/** @todo re-evaluate on page boundraries. */
370
371 RTGCPHYS Phys = GCPhysFault;
372 int rc;
373 if (uErrorCode & X86_TRAP_PF_RW)
374 {
375 /*
376 * Write operation: [Mem] -> [MMIO]
377 * ds:esi (Virt Src) -> es:edi (Phys Dst)
378 */
379 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
380
381 /* Check callback. */
382 if (!pRange->CTX_SUFF(pfnWriteCallback))
383 return VINF_IOM_HC_MMIO_WRITE;
384
385 /* Convert source address ds:esi. */
386 RTGCUINTPTR pu8Virt;
387 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
388 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
389 (PRTGCPTR)&pu8Virt);
390 if (RT_SUCCESS(rc))
391 {
392
393 /* Access verification first; we currently can't recover properly from traps inside this instruction */
394 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
395 if (rc != VINF_SUCCESS)
396 {
397 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
398 return VINF_EM_RAW_EMULATE_INSTR;
399 }
400
401#ifdef IN_RC
402 MMGCRamRegisterTrapHandler(pVM);
403#endif
404
405 /* copy loop. */
406 while (cTransfers)
407 {
408 uint32_t u32Data = 0;
409 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
410 if (rc != VINF_SUCCESS)
411 break;
412 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
413 if (rc != VINF_SUCCESS)
414 break;
415
416 pu8Virt += offIncrement;
417 Phys += offIncrement;
418 pRegFrame->rsi += offIncrement;
419 pRegFrame->rdi += offIncrement;
420 cTransfers--;
421 }
422#ifdef IN_RC
423 MMGCRamDeregisterTrapHandler(pVM);
424#endif
425 /* Update ecx. */
426 if (pCpu->prefix & PREFIX_REP)
427 pRegFrame->ecx = cTransfers;
428 }
429 else
430 rc = VINF_IOM_HC_MMIO_READ_WRITE;
431 }
432 else
433 {
434 /*
435 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
436 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
437 */
438 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
439
440 /* Check callback. */
441 if (!pRange->CTX_SUFF(pfnReadCallback))
442 return VINF_IOM_HC_MMIO_READ;
443
444 /* Convert destination address. */
445 RTGCUINTPTR pu8Virt;
446 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
447 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
448 (RTGCPTR *)&pu8Virt);
449 if (RT_FAILURE(rc))
450 return VINF_IOM_HC_MMIO_READ;
451
452 /* Check if destination address is MMIO. */
453 PIOMMMIORANGE pMMIODst;
454 RTGCPHYS PhysDst;
455 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
456 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
457 if ( RT_SUCCESS(rc)
458 && (pMMIODst = iomMMIOGetRange(&pVM->iom.s, PhysDst)))
459 {
460 /** @todo implement per-device locks for MMIO access. */
461 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
462
463 /*
464 * Extra: [MMIO] -> [MMIO]
465 */
466 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
467 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
468 return VINF_IOM_HC_MMIO_READ_WRITE;
469
470 /* copy loop. */
471 while (cTransfers)
472 {
473 uint32_t u32Data;
474 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
475 if (rc != VINF_SUCCESS)
476 break;
477 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
478 if (rc != VINF_SUCCESS)
479 break;
480
481 Phys += offIncrement;
482 PhysDst += offIncrement;
483 pRegFrame->rsi += offIncrement;
484 pRegFrame->rdi += offIncrement;
485 cTransfers--;
486 }
487 }
488 else
489 {
490 /*
491 * Normal: [MMIO] -> [Mem]
492 */
493 /* Access verification first; we currently can't recover properly from traps inside this instruction */
494 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
495 if (rc != VINF_SUCCESS)
496 {
497 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
498 return VINF_EM_RAW_EMULATE_INSTR;
499 }
500
501 /* copy loop. */
502#ifdef IN_RC
503 MMGCRamRegisterTrapHandler(pVM);
504#endif
505 while (cTransfers)
506 {
507 uint32_t u32Data;
508 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
509 if (rc != VINF_SUCCESS)
510 break;
511 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
512 if (rc != VINF_SUCCESS)
513 {
514 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
515 break;
516 }
517
518 pu8Virt += offIncrement;
519 Phys += offIncrement;
520 pRegFrame->rsi += offIncrement;
521 pRegFrame->rdi += offIncrement;
522 cTransfers--;
523 }
524#ifdef IN_RC
525 MMGCRamDeregisterTrapHandler(pVM);
526#endif
527 }
528
529 /* Update ecx on exit. */
530 if (pCpu->prefix & PREFIX_REP)
531 pRegFrame->ecx = cTransfers;
532 }
533
534 /* work statistics. */
535 if (rc == VINF_SUCCESS)
536 iomMMIOStatLength(pVM, cb);
537 NOREF(ppStat);
538 return rc;
539}
540#endif /* IOM_WITH_MOVS_SUPPORT */
541
542
543/**
544 * [REP] STOSB
545 * [REP] STOSW
546 * [REP] STOSD
547 *
548 * Restricted implementation.
549 *
550 *
551 * @returns VBox status code.
552 *
553 * @param pVM The virtual machine.
554 * @param pRegFrame Trap register frame.
555 * @param GCPhysFault The GC physical address corresponding to pvFault.
556 * @param pCpu Disassembler CPU state.
557 * @param pRange Pointer MMIO range.
558 */
559static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
560{
561 /*
562 * We do not support segment prefixes or REPNE..
563 */
564 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
565 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
566
567 /*
568 * Get bytes/words/dwords count to copy.
569 */
570 uint32_t cTransfers = 1;
571 if (pCpu->prefix & PREFIX_REP)
572 {
573#ifndef IN_RC
574 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM), pRegFrame)
575 && pRegFrame->rcx >= _4G)
576 return VINF_EM_RAW_EMULATE_INSTR;
577#endif
578
579 cTransfers = pRegFrame->ecx;
580 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
581 cTransfers &= 0xffff;
582
583 if (!cTransfers)
584 return VINF_SUCCESS;
585 }
586
587/** @todo r=bird: bounds checks! */
588
589 /*
590 * Get data size.
591 */
592 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
593 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
594 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
595
596#ifdef VBOX_WITH_STATISTICS
597 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
598 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
599#endif
600
601
602 RTGCPHYS Phys = GCPhysFault;
603 uint32_t u32Data = pRegFrame->eax;
604 int rc;
605 if (pRange->CTX_SUFF(pfnFillCallback))
606 {
607 /*
608 * Use the fill callback.
609 */
610 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
611 if (offIncrement > 0)
612 {
613 /* addr++ variant. */
614 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
615 if (rc == VINF_SUCCESS)
616 {
617 /* Update registers. */
618 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
619 if (pCpu->prefix & PREFIX_REP)
620 pRegFrame->ecx = 0;
621 }
622 }
623 else
624 {
625 /* addr-- variant. */
626 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), (Phys - (cTransfers - 1)) << SIZE_2_SHIFT(cb), u32Data, cb, cTransfers);
627 if (rc == VINF_SUCCESS)
628 {
629 /* Update registers. */
630 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
631 if (pCpu->prefix & PREFIX_REP)
632 pRegFrame->ecx = 0;
633 }
634 }
635 }
636 else
637 {
638 /*
639 * Use the write callback.
640 */
641 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
642
643 /* fill loop. */
644 do
645 {
646 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
647 if (rc != VINF_SUCCESS)
648 break;
649
650 Phys += offIncrement;
651 pRegFrame->rdi += offIncrement;
652 cTransfers--;
653 } while (cTransfers);
654
655 /* Update ecx on exit. */
656 if (pCpu->prefix & PREFIX_REP)
657 pRegFrame->ecx = cTransfers;
658 }
659
660 /*
661 * Work statistics and return.
662 */
663 if (rc == VINF_SUCCESS)
664 iomMMIOStatLength(pVM, cb);
665 return rc;
666}
667
668
669/**
670 * [REP] LODSB
671 * [REP] LODSW
672 * [REP] LODSD
673 *
674 * Restricted implementation.
675 *
676 *
677 * @returns VBox status code.
678 *
679 * @param pVM The virtual machine.
680 * @param pRegFrame Trap register frame.
681 * @param GCPhysFault The GC physical address corresponding to pvFault.
682 * @param pCpu Disassembler CPU state.
683 * @param pRange Pointer MMIO range.
684 */
685static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
686{
687 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
688
689 /*
690 * We do not support segment prefixes or REP*.
691 */
692 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
693 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
694
695 /*
696 * Get data size.
697 */
698 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
699 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
700 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
701
702 /*
703 * Perform read.
704 */
705 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
706 if (rc == VINF_SUCCESS)
707 pRegFrame->rsi += offIncrement;
708
709 /*
710 * Work statistics and return.
711 */
712 if (rc == VINF_SUCCESS)
713 iomMMIOStatLength(pVM, cb);
714 return rc;
715}
716
717
718/**
719 * CMP [MMIO], reg|imm
720 * CMP reg|imm, [MMIO]
721 *
722 * Restricted implementation.
723 *
724 *
725 * @returns VBox status code.
726 *
727 * @param pVM The virtual machine.
728 * @param pRegFrame Trap register frame.
729 * @param GCPhysFault The GC physical address corresponding to pvFault.
730 * @param pCpu Disassembler CPU state.
731 * @param pRange Pointer MMIO range.
732 */
733static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
734{
735 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
736
737 /*
738 * Get the operands.
739 */
740 unsigned cb = 0;
741 uint64_t uData1 = 0;
742 uint64_t uData2 = 0;
743 int rc;
744 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
745 /* cmp reg, [MMIO]. */
746 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
747 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
748 /* cmp [MMIO], reg|imm. */
749 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
750 else
751 {
752 AssertMsgFailed(("Disassember CMP problem..\n"));
753 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
754 }
755
756 if (rc == VINF_SUCCESS)
757 {
758 /* Emulate CMP and update guest flags. */
759 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
760 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
761 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
762 iomMMIOStatLength(pVM, cb);
763 }
764
765 return rc;
766}
767
768
769/**
770 * AND [MMIO], reg|imm
771 * AND reg, [MMIO]
772 * OR [MMIO], reg|imm
773 * OR reg, [MMIO]
774 *
775 * Restricted implementation.
776 *
777 *
778 * @returns VBox status code.
779 *
780 * @param pVM The virtual machine.
781 * @param pRegFrame Trap register frame.
782 * @param GCPhysFault The GC physical address corresponding to pvFault.
783 * @param pCpu Disassembler CPU state.
784 * @param pRange Pointer MMIO range.
785 * @param pfnEmulate Instruction emulation function.
786 */
787static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
788{
789 unsigned cb = 0;
790 uint64_t uData1 = 0;
791 uint64_t uData2 = 0;
792 bool fAndWrite;
793 int rc;
794
795#ifdef LOG_ENABLED
796 const char *pszInstr;
797
798 if (pCpu->pCurInstr->opcode == OP_XOR)
799 pszInstr = "Xor";
800 else if (pCpu->pCurInstr->opcode == OP_OR)
801 pszInstr = "Or";
802 else if (pCpu->pCurInstr->opcode == OP_AND)
803 pszInstr = "And";
804 else
805 pszInstr = "OrXorAnd??";
806#endif
807
808 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
809 {
810 /* and reg, [MMIO]. */
811 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
812 fAndWrite = false;
813 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
814 }
815 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
816 {
817 /* and [MMIO], reg|imm. */
818 fAndWrite = true;
819 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
820 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
821 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
822 else
823 rc = VINF_IOM_HC_MMIO_READ_WRITE;
824 }
825 else
826 {
827 AssertMsgFailed(("Disassember AND problem..\n"));
828 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
829 }
830
831 if (rc == VINF_SUCCESS)
832 {
833 /* Emulate AND and update guest flags. */
834 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
835
836 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
837
838 if (fAndWrite)
839 /* Store result to MMIO. */
840 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
841 else
842 {
843 /* Store result to register. */
844 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
845 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
846 }
847 if (rc == VINF_SUCCESS)
848 {
849 /* Update guest's eflags and finish. */
850 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
851 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
852 iomMMIOStatLength(pVM, cb);
853 }
854 }
855
856 return rc;
857}
858
859
860/**
861 * TEST [MMIO], reg|imm
862 * TEST reg, [MMIO]
863 *
864 * Restricted implementation.
865 *
866 *
867 * @returns VBox status code.
868 *
869 * @param pVM The virtual machine.
870 * @param pRegFrame Trap register frame.
871 * @param GCPhysFault The GC physical address corresponding to pvFault.
872 * @param pCpu Disassembler CPU state.
873 * @param pRange Pointer MMIO range.
874 */
875static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
876{
877 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
878
879 unsigned cb = 0;
880 uint64_t uData1 = 0;
881 uint64_t uData2 = 0;
882 int rc;
883
884 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
885 {
886 /* and test, [MMIO]. */
887 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
888 }
889 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
890 {
891 /* test [MMIO], reg|imm. */
892 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
893 }
894 else
895 {
896 AssertMsgFailed(("Disassember TEST problem..\n"));
897 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
898 }
899
900 if (rc == VINF_SUCCESS)
901 {
902 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
903 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
904 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
905 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
906 iomMMIOStatLength(pVM, cb);
907 }
908
909 return rc;
910}
911
912
913/**
914 * BT [MMIO], reg|imm
915 *
916 * Restricted implementation.
917 *
918 *
919 * @returns VBox status code.
920 *
921 * @param pVM The virtual machine.
922 * @param pRegFrame Trap register frame.
923 * @param GCPhysFault The GC physical address corresponding to pvFault.
924 * @param pCpu Disassembler CPU state.
925 * @param pRange Pointer MMIO range.
926 */
927static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
928{
929 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
930
931 uint64_t uBit = 0;
932 uint64_t uData = 0;
933 unsigned cbIgnored;
934
935 if (!iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cbIgnored))
936 {
937 AssertMsgFailed(("Disassember BT problem..\n"));
938 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
939 }
940 /* The size of the memory operand only matters here. */
941 unsigned cbData = DISGetParamSize(pCpu, &pCpu->param1);
942
943 /* bt [MMIO], reg|imm. */
944 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
945 if (rc == VINF_SUCCESS)
946 {
947 /* Find the bit inside the faulting address */
948 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
949 iomMMIOStatLength(pVM, cbData);
950 }
951
952 return rc;
953}
954
955/**
956 * XCHG [MMIO], reg
957 * XCHG reg, [MMIO]
958 *
959 * Restricted implementation.
960 *
961 *
962 * @returns VBox status code.
963 *
964 * @param pVM The virtual machine.
965 * @param pRegFrame Trap register frame.
966 * @param GCPhysFault The GC physical address corresponding to pvFault.
967 * @param pCpu Disassembler CPU state.
968 * @param pRange Pointer MMIO range.
969 */
970static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
971{
972 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
973 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
974 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
975 return VINF_IOM_HC_MMIO_READ_WRITE;
976
977 int rc;
978 unsigned cb = 0;
979 uint64_t uData1 = 0;
980 uint64_t uData2 = 0;
981 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
982 {
983 /* xchg reg, [MMIO]. */
984 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
985 if (rc == VINF_SUCCESS)
986 {
987 /* Store result to MMIO. */
988 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
989
990 if (rc == VINF_SUCCESS)
991 {
992 /* Store result to register. */
993 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
994 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
995 }
996 else
997 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
998 }
999 else
1000 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1001 }
1002 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1003 {
1004 /* xchg [MMIO], reg. */
1005 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1006 if (rc == VINF_SUCCESS)
1007 {
1008 /* Store result to MMIO. */
1009 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1010 if (rc == VINF_SUCCESS)
1011 {
1012 /* Store result to register. */
1013 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1014 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1015 }
1016 else
1017 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1018 }
1019 else
1020 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1021 }
1022 else
1023 {
1024 AssertMsgFailed(("Disassember XCHG problem..\n"));
1025 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1026 }
1027 return rc;
1028}
1029
1030
1031/**
1032 * \#PF Handler callback for MMIO ranges.
1033 *
1034 * @returns VBox status code (appropriate for GC return).
1035 * @param pVM VM Handle.
1036 * @param uErrorCode CPU Error code.
1037 * @param pCtxCore Trap register frame.
1038 * @param GCPhysFault The GC physical address corresponding to pvFault.
1039 * @param pvUser Pointer to the MMIO ring-3 range entry.
1040 */
1041int iomMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1042{
1043 /* Take the IOM lock before performing any MMIO. */
1044 int rc = iomLock(pVM);
1045#ifndef IN_RING3
1046 if (rc == VERR_SEM_BUSY)
1047 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1048#endif
1049 AssertRC(rc);
1050
1051 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1052 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1053 GCPhysFault, (uint32_t)uErrorCode, (RTGCPTR)pCtxCore->rip));
1054
1055 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1056 Assert(pRange);
1057 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1058 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1059 * lock for most of the code, provided that we retake the lock while
1060 * deregistering PIOMMMIORANGE to deal with remapping/access races
1061 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1062 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1063
1064#ifdef VBOX_WITH_STATISTICS
1065 /*
1066 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1067 */
1068 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
1069 if (!pStats)
1070 {
1071# ifdef IN_RING3
1072 iomUnlock(pVM);
1073 return VERR_NO_MEMORY;
1074# else
1075 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1076 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1077 iomUnlock(pVM);
1078 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1079# endif
1080 }
1081#endif
1082
1083#ifndef IN_RING3
1084 /*
1085 * Should we defer the request right away?
1086 */
1087 if (uErrorCode & X86_TRAP_PF_RW
1088 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1089 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1090 {
1091# ifdef VBOX_WITH_STATISTICS
1092 if (uErrorCode & X86_TRAP_PF_RW)
1093 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1094 else
1095 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1096# endif
1097
1098 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1099 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1100 iomUnlock(pVM);
1101 return (uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ);
1102 }
1103#endif /* !IN_RING3 */
1104
1105 /*
1106 * Disassemble the instruction and interpret it.
1107 */
1108 PVMCPU pVCpu = VMMGetCpu(pVM);
1109 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1110 unsigned cbOp;
1111 rc = EMInterpretDisasOne(pVM, pVCpu, pCtxCore, pDis, &cbOp);
1112 AssertRC(rc);
1113 if (RT_FAILURE(rc))
1114 {
1115 iomUnlock(pVM);
1116 return rc;
1117 }
1118 switch (pDis->pCurInstr->opcode)
1119 {
1120 case OP_MOV:
1121 case OP_MOVZX:
1122 case OP_MOVSX:
1123 {
1124 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1125 if (uErrorCode & X86_TRAP_PF_RW)
1126 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1127 else
1128 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1129 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1130 break;
1131 }
1132
1133
1134#ifdef IOM_WITH_MOVS_SUPPORT
1135 case OP_MOVSB:
1136 case OP_MOVSWD:
1137 {
1138 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1139 PSTAMPROFILE pStat = NULL;
1140 rc = iomInterpretMOVS(pVM, uErrorCode, pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1141 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1142 break;
1143 }
1144#endif
1145
1146 case OP_STOSB:
1147 case OP_STOSWD:
1148 Assert(uErrorCode & X86_TRAP_PF_RW);
1149 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1150 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1151 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1152 break;
1153
1154 case OP_LODSB:
1155 case OP_LODSWD:
1156 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1157 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1158 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1159 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1160 break;
1161
1162 case OP_CMP:
1163 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1164 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1165 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1166 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1167 break;
1168
1169 case OP_AND:
1170 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1171 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1172 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1173 break;
1174
1175 case OP_OR:
1176 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1177 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1178 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1179 break;
1180
1181 case OP_XOR:
1182 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1183 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1184 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1185 break;
1186
1187 case OP_TEST:
1188 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1189 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1190 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1191 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1192 break;
1193
1194 case OP_BT:
1195 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1196 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1197 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1198 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1199 break;
1200
1201 case OP_XCHG:
1202 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1203 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1204 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1205 break;
1206
1207
1208 /*
1209 * The instruction isn't supported. Hand it on to ring-3.
1210 */
1211 default:
1212 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1213 rc = (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1214 break;
1215 }
1216
1217 /*
1218 * On success advance EIP.
1219 */
1220 if (rc == VINF_SUCCESS)
1221 pCtxCore->rip += cbOp;
1222 else
1223 {
1224 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1225#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1226 switch (rc)
1227 {
1228 case VINF_IOM_HC_MMIO_READ:
1229 case VINF_IOM_HC_MMIO_READ_WRITE:
1230 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1231 break;
1232 case VINF_IOM_HC_MMIO_WRITE:
1233 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1234 break;
1235 }
1236#endif
1237 }
1238
1239 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1240 iomUnlock(pVM);
1241 return rc;
1242}
1243
1244/**
1245 * \#PF Handler callback for MMIO ranges.
1246 *
1247 * @returns VBox status code (appropriate for GC return).
1248 * @param pVM VM Handle.
1249 * @param uErrorCode CPU Error code.
1250 * @param pCtxCore Trap register frame.
1251 * @param pvFault The fault address (cr2).
1252 * @param GCPhysFault The GC physical address corresponding to pvFault.
1253 * @param pvUser Pointer to the MMIO ring-3 range entry.
1254 */
1255VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1256{
1257 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1258 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1259 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, pvUser);
1260 return VBOXSTRICTRC_VAL(rcStrict);
1261}
1262
1263/**
1264 * Physical access handler for MMIO ranges.
1265 *
1266 * @returns VBox status code (appropriate for GC return).
1267 * @param pVM VM Handle.
1268 * @param uErrorCode CPU Error code.
1269 * @param pCtxCore Trap register frame.
1270 * @param GCPhysFault The GC physical address.
1271 */
1272VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1273{
1274 int rc2 = iomLock(pVM);
1275#ifndef IN_RING3
1276 if (rc2 == VERR_SEM_BUSY)
1277 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1278#endif
1279 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, uErrorCode, pCtxCore, GCPhysFault, iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1280 iomUnlock(pVM);
1281 return VBOXSTRICTRC_VAL(rcStrict);
1282}
1283
1284#ifdef IN_RING3
1285/**
1286 * \#PF Handler callback for MMIO ranges.
1287 *
1288 * @returns VINF_SUCCESS if the handler have carried out the operation.
1289 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1290 * @param pVM VM Handle.
1291 * @param GCPhys The physical address the guest is writing to.
1292 * @param pvPhys The HC mapping of that address.
1293 * @param pvBuf What the guest is reading/writing.
1294 * @param cbBuf How much it's reading/writing.
1295 * @param enmAccessType The access type.
1296 * @param pvUser Pointer to the MMIO range entry.
1297 */
1298DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1299{
1300 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1301 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1302
1303 /* Take the IOM lock before performing any MMIO. */
1304 int rc = iomLock(pVM);
1305 AssertRC(rc);
1306
1307 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1308
1309 Assert(pRange);
1310 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1311 /** @todo implement per-device locks for MMIO access. It can replace the IOM
1312 * lock for most of the code, provided that we retake the lock while
1313 * deregistering PIOMMMIORANGE to deal with remapping/access races
1314 * (unlikely, but an SMP guest shouldn't cause us to crash). */
1315 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1316
1317 if (enmAccessType == PGMACCESSTYPE_READ)
1318 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1319 else
1320 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1321
1322 AssertRC(rc);
1323 iomUnlock(pVM);
1324 return rc;
1325}
1326#endif /* IN_RING3 */
1327
1328/**
1329 * Reads a MMIO register.
1330 *
1331 * @returns VBox status code.
1332 *
1333 * @param pVM VM handle.
1334 * @param GCPhys The physical address to read.
1335 * @param pu32Value Where to store the value read.
1336 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1337 */
1338VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1339{
1340 /* Take the IOM lock before performing any MMIO. */
1341 int rc = iomLock(pVM);
1342#ifndef IN_RING3
1343 if (rc == VERR_SEM_BUSY)
1344 return VINF_IOM_HC_MMIO_WRITE;
1345#endif
1346 AssertRC(rc);
1347
1348 /*
1349 * Lookup the current context range node and statistics.
1350 */
1351 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1352 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1353 if (!pRange)
1354 {
1355 iomUnlock(pVM);
1356 return VERR_INTERNAL_ERROR;
1357 }
1358 /** @todo implement per-device locks for MMIO access. */
1359 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1360#ifdef VBOX_WITH_STATISTICS
1361 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1362 if (!pStats)
1363 {
1364 iomUnlock(pVM);
1365# ifdef IN_RING3
1366 return VERR_NO_MEMORY;
1367# else
1368 return VINF_IOM_HC_MMIO_READ;
1369# endif
1370 }
1371#endif /* VBOX_WITH_STATISTICS */
1372 if (pRange->CTX_SUFF(pfnReadCallback))
1373 {
1374 /*
1375 * Perform the read and deal with the result.
1376 */
1377#ifdef VBOX_WITH_STATISTICS
1378 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1379#endif
1380 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1381#ifdef VBOX_WITH_STATISTICS
1382 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1383 if (rc != VINF_IOM_HC_MMIO_READ)
1384 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1385#endif
1386 switch (rc)
1387 {
1388 case VINF_SUCCESS:
1389 default:
1390 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1391 iomUnlock(pVM);
1392 return rc;
1393
1394 case VINF_IOM_MMIO_UNUSED_00:
1395 switch (cbValue)
1396 {
1397 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1398 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1399 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1400 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1401 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1402 }
1403 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1404 iomUnlock(pVM);
1405 return VINF_SUCCESS;
1406
1407 case VINF_IOM_MMIO_UNUSED_FF:
1408 switch (cbValue)
1409 {
1410 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1411 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1412 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1413 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1414 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1415 }
1416 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1417 iomUnlock(pVM);
1418 return VINF_SUCCESS;
1419 }
1420 }
1421#ifndef IN_RING3
1422 if (pRange->pfnReadCallbackR3)
1423 {
1424 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1425 iomUnlock(pVM);
1426 return VINF_IOM_HC_MMIO_READ;
1427 }
1428#endif
1429
1430 /*
1431 * Lookup the ring-3 range.
1432 */
1433#ifdef VBOX_WITH_STATISTICS
1434 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1435#endif
1436 /* Unassigned memory; this is actually not supposed to happen. */
1437 switch (cbValue)
1438 {
1439 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1440 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1441 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1442 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1443 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1444 }
1445 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1446 iomUnlock(pVM);
1447 return VINF_SUCCESS;
1448}
1449
1450
1451/**
1452 * Writes to a MMIO register.
1453 *
1454 * @returns VBox status code.
1455 *
1456 * @param pVM VM handle.
1457 * @param GCPhys The physical address to write to.
1458 * @param u32Value The value to write.
1459 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1460 */
1461VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1462{
1463 /* Take the IOM lock before performing any MMIO. */
1464 int rc = iomLock(pVM);
1465#ifndef IN_RING3
1466 if (rc == VERR_SEM_BUSY)
1467 return VINF_IOM_HC_MMIO_WRITE;
1468#endif
1469 AssertRC(rc);
1470
1471 /*
1472 * Lookup the current context range node.
1473 */
1474 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1475 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1476 if (!pRange)
1477 {
1478 iomUnlock(pVM);
1479 return VERR_INTERNAL_ERROR;
1480 }
1481 /** @todo implement per-device locks for MMIO access. */
1482 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1483#ifdef VBOX_WITH_STATISTICS
1484 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1485 if (!pStats)
1486 {
1487 iomUnlock(pVM);
1488# ifdef IN_RING3
1489 return VERR_NO_MEMORY;
1490# else
1491 return VINF_IOM_HC_MMIO_WRITE;
1492# endif
1493 }
1494#endif /* VBOX_WITH_STATISTICS */
1495
1496 /*
1497 * Perform the write if there's a write handler. R0/GC may have
1498 * to defer it to ring-3.
1499 */
1500 if (pRange->CTX_SUFF(pfnWriteCallback))
1501 {
1502#ifdef VBOX_WITH_STATISTICS
1503 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1504#endif
1505 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue);
1506#ifdef VBOX_WITH_STATISTICS
1507 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1508 if (rc != VINF_IOM_HC_MMIO_WRITE)
1509 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1510#endif
1511 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1512 iomUnlock(pVM);
1513 return rc;
1514 }
1515#ifndef IN_RING3
1516 if (pRange->pfnWriteCallbackR3)
1517 {
1518 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1519 iomUnlock(pVM);
1520 return VINF_IOM_HC_MMIO_WRITE;
1521 }
1522#endif
1523
1524 /*
1525 * No write handler, nothing to do.
1526 */
1527#ifdef VBOX_WITH_STATISTICS
1528 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1529#endif
1530 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1531 iomUnlock(pVM);
1532 return VINF_SUCCESS;
1533}
1534
1535/**
1536 * [REP*] INSB/INSW/INSD
1537 * ES:EDI,DX[,ECX]
1538 *
1539 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1540 *
1541 * @returns Strict VBox status code. Informational status codes other than the one documented
1542 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1543 * @retval VINF_SUCCESS Success.
1544 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1545 * status code must be passed on to EM.
1546 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1547 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1548 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1549 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1550 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1551 *
1552 * @param pVM The virtual machine.
1553 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1554 * @param uPort IO Port
1555 * @param uPrefix IO instruction prefix
1556 * @param cbTransfer Size of transfer unit
1557 */
1558VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1559{
1560#ifdef VBOX_WITH_STATISTICS
1561 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1562#endif
1563
1564 /*
1565 * We do not support REPNE or decrementing destination
1566 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1567 */
1568 if ( (uPrefix & PREFIX_REPNE)
1569 || pRegFrame->eflags.Bits.u1DF)
1570 return VINF_EM_RAW_EMULATE_INSTR;
1571
1572 PVMCPU pVCpu = VMMGetCpu(pVM);
1573
1574 /*
1575 * Get bytes/words/dwords count to transfer.
1576 */
1577 RTGCUINTREG cTransfers = 1;
1578 if (uPrefix & PREFIX_REP)
1579 {
1580#ifndef IN_RC
1581 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1582 && pRegFrame->rcx >= _4G)
1583 return VINF_EM_RAW_EMULATE_INSTR;
1584#endif
1585 cTransfers = pRegFrame->ecx;
1586
1587 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1588 cTransfers &= 0xffff;
1589
1590 if (!cTransfers)
1591 return VINF_SUCCESS;
1592 }
1593
1594 /* Convert destination address es:edi. */
1595 RTGCPTR GCPtrDst;
1596 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1597 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1598 &GCPtrDst);
1599 if (RT_FAILURE(rc2))
1600 {
1601 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
1602 return VINF_EM_RAW_EMULATE_INSTR;
1603 }
1604
1605 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1606 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1607
1608 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1609 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1610 if (rc2 != VINF_SUCCESS)
1611 {
1612 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
1613 return VINF_EM_RAW_EMULATE_INSTR;
1614 }
1615
1616 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1617 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1618 if (cTransfers > 1)
1619 {
1620 /* If the device supports string transfers, ask it to do as
1621 * much as it wants. The rest is done with single-word transfers. */
1622 const RTGCUINTREG cTransfersOrg = cTransfers;
1623 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1624 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1625 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1626 }
1627
1628#ifdef IN_RC
1629 MMGCRamRegisterTrapHandler(pVM);
1630#endif
1631 while (cTransfers && rcStrict == VINF_SUCCESS)
1632 {
1633 uint32_t u32Value;
1634 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1635 if (!IOM_SUCCESS(rcStrict))
1636 break;
1637 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1638 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1639 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1640 pRegFrame->rdi += cbTransfer;
1641 cTransfers--;
1642 }
1643#ifdef IN_RC
1644 MMGCRamDeregisterTrapHandler(pVM);
1645#endif
1646
1647 /* Update ecx on exit. */
1648 if (uPrefix & PREFIX_REP)
1649 pRegFrame->ecx = cTransfers;
1650
1651 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1652 return rcStrict;
1653}
1654
1655
1656/**
1657 * [REP*] INSB/INSW/INSD
1658 * ES:EDI,DX[,ECX]
1659 *
1660 * @returns Strict VBox status code. Informational status codes other than the one documented
1661 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1662 * @retval VINF_SUCCESS Success.
1663 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1664 * status code must be passed on to EM.
1665 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1666 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1667 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1668 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1669 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1670 *
1671 * @param pVM The virtual machine.
1672 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1673 * @param pCpu Disassembler CPU state.
1674 */
1675VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1676{
1677 /*
1678 * Get port number directly from the register (no need to bother the
1679 * disassembler). And get the I/O register size from the opcode / prefix.
1680 */
1681 RTIOPORT Port = pRegFrame->edx & 0xffff;
1682 unsigned cb = 0;
1683 if (pCpu->pCurInstr->opcode == OP_INSB)
1684 cb = 1;
1685 else
1686 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1687
1688 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1689 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1690 {
1691 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1692 return rcStrict;
1693 }
1694
1695 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1696}
1697
1698
1699/**
1700 * [REP*] OUTSB/OUTSW/OUTSD
1701 * DS:ESI,DX[,ECX]
1702 *
1703 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1704 *
1705 * @returns Strict VBox status code. Informational status codes other than the one documented
1706 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1707 * @retval VINF_SUCCESS Success.
1708 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1709 * status code must be passed on to EM.
1710 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1711 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1712 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1713 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1714 *
1715 * @param pVM The virtual machine.
1716 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1717 * @param uPort IO Port
1718 * @param uPrefix IO instruction prefix
1719 * @param cbTransfer Size of transfer unit
1720 */
1721VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1722{
1723#ifdef VBOX_WITH_STATISTICS
1724 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1725#endif
1726
1727 /*
1728 * We do not support segment prefixes, REPNE or
1729 * decrementing source pointer.
1730 */
1731 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1732 || pRegFrame->eflags.Bits.u1DF)
1733 return VINF_EM_RAW_EMULATE_INSTR;
1734
1735 PVMCPU pVCpu = VMMGetCpu(pVM);
1736
1737 /*
1738 * Get bytes/words/dwords count to transfer.
1739 */
1740 RTGCUINTREG cTransfers = 1;
1741 if (uPrefix & PREFIX_REP)
1742 {
1743#ifndef IN_RC
1744 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1745 && pRegFrame->rcx >= _4G)
1746 return VINF_EM_RAW_EMULATE_INSTR;
1747#endif
1748 cTransfers = pRegFrame->ecx;
1749 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1750 cTransfers &= 0xffff;
1751
1752 if (!cTransfers)
1753 return VINF_SUCCESS;
1754 }
1755
1756 /* Convert source address ds:esi. */
1757 RTGCPTR GCPtrSrc;
1758 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1759 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1760 &GCPtrSrc);
1761 if (RT_FAILURE(rc2))
1762 {
1763 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
1764 return VINF_EM_RAW_EMULATE_INSTR;
1765 }
1766
1767 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1768 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1769 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1770 (cpl == 3) ? X86_PTE_US : 0);
1771 if (rc2 != VINF_SUCCESS)
1772 {
1773 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
1774 return VINF_EM_RAW_EMULATE_INSTR;
1775 }
1776
1777 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1778 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1779 if (cTransfers > 1)
1780 {
1781 /*
1782 * If the device supports string transfers, ask it to do as
1783 * much as it wants. The rest is done with single-word transfers.
1784 */
1785 const RTGCUINTREG cTransfersOrg = cTransfers;
1786 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1787 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1788 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1789 }
1790
1791#ifdef IN_RC
1792 MMGCRamRegisterTrapHandler(pVM);
1793#endif
1794
1795 while (cTransfers && rcStrict == VINF_SUCCESS)
1796 {
1797 uint32_t u32Value = 0;
1798 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
1799 if (rcStrict != VINF_SUCCESS)
1800 break;
1801 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1802 if (!IOM_SUCCESS(rcStrict))
1803 break;
1804 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1805 pRegFrame->rsi += cbTransfer;
1806 cTransfers--;
1807 }
1808
1809#ifdef IN_RC
1810 MMGCRamDeregisterTrapHandler(pVM);
1811#endif
1812
1813 /* Update ecx on exit. */
1814 if (uPrefix & PREFIX_REP)
1815 pRegFrame->ecx = cTransfers;
1816
1817 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1818 return rcStrict;
1819}
1820
1821
1822/**
1823 * [REP*] OUTSB/OUTSW/OUTSD
1824 * DS:ESI,DX[,ECX]
1825 *
1826 * @returns Strict VBox status code. Informational status codes other than the one documented
1827 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1828 * @retval VINF_SUCCESS Success.
1829 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1830 * status code must be passed on to EM.
1831 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1832 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1833 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1834 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1835 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1836 *
1837 * @param pVM The virtual machine.
1838 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1839 * @param pCpu Disassembler CPU state.
1840 */
1841VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1842{
1843 /*
1844 * Get port number from the first parameter.
1845 * And get the I/O register size from the opcode / prefix.
1846 */
1847 uint64_t Port = 0;
1848 unsigned cb = 0;
1849 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1850 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1851 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1852 cb = 1;
1853 else
1854 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1855
1856 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1857 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1858 {
1859 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1860 return rcStrict;
1861 }
1862
1863 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1864}
1865
1866
1867#ifndef IN_RC
1868/**
1869 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1870 *
1871 * (This is a special optimization used by the VGA device.)
1872 *
1873 * @returns VBox status code.
1874 *
1875 * @param pVM The virtual machine.
1876 * @param GCPhys The address of the MMIO page to be changed.
1877 * @param GCPhysRemapped The address of the MMIO2 page.
1878 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1879 * for the time being.
1880 */
1881VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1882{
1883 /* Currently only called from the VGA device during MMIO. */
1884 Assert(IOMIsLockOwner(pVM));
1885 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1886
1887 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1888
1889 PVMCPU pVCpu = VMMGetCpu(pVM);
1890
1891 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1892 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1893 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1894 && !HWACCMIsNestedPagingActive(pVM)))
1895 return VINF_SUCCESS; /* ignore */
1896
1897 /*
1898 * Lookup the context range node the page belongs to.
1899 */
1900 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1901 AssertMsgReturn(pRange,
1902 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1903
1904 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1905 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1906
1907 /*
1908 * Do the aliasing; page align the addresses since PGM is picky.
1909 */
1910 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1911 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1912
1913 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1914 AssertRCReturn(rc, rc);
1915
1916 /*
1917 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1918 * can simply prefetch it.
1919 *
1920 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1921 */
1922#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1923# ifdef VBOX_STRICT
1924 uint64_t fFlags;
1925 RTHCPHYS HCPhys;
1926 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1927 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1928# endif
1929#endif
1930 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1931 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1932 return VINF_SUCCESS;
1933}
1934
1935/**
1936 * Mapping a HC page in place of an MMIO page for direct access.
1937 *
1938 * (This is a special optimization used by the APIC in the VT-x case.)
1939 *
1940 * @returns VBox status code.
1941 *
1942 * @param pVM The virtual machine.
1943 * @param GCPhys The address of the MMIO page to be changed.
1944 * @param HCPhys The address of the host physical page.
1945 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1946 * for the time being.
1947 */
1948VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1949{
1950 /* Currently only called from VT-x code during a page fault. */
1951 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1952
1953 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1954 Assert(HWACCMIsEnabled(pVM));
1955
1956 PVMCPU pVCpu = VMMGetCpu(pVM);
1957
1958 /*
1959 * Lookup the context range node the page belongs to.
1960 */
1961#ifdef VBOX_STRICT
1962 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1963 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(&pVM->iom.s, GCPhys);
1964 AssertMsgReturn(pRange,
1965 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1966 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1967 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1968#endif
1969
1970 /*
1971 * Do the aliasing; page align the addresses since PGM is picky.
1972 */
1973 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1974 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
1975
1976 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
1977 AssertRCReturn(rc, rc);
1978
1979 /*
1980 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1981 * can simply prefetch it.
1982 *
1983 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1984 */
1985 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1986 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1987 return VINF_SUCCESS;
1988}
1989
1990/**
1991 * Reset a previously modified MMIO region; restore the access flags.
1992 *
1993 * @returns VBox status code.
1994 *
1995 * @param pVM The virtual machine.
1996 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1997 */
1998VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1999{
2000 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2001
2002 PVMCPU pVCpu = VMMGetCpu(pVM);
2003
2004 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2005 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2006 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2007 && !HWACCMIsNestedPagingActive(pVM)))
2008 return VINF_SUCCESS; /* ignore */
2009
2010 /*
2011 * Lookup the context range node the page belongs to.
2012 */
2013#ifdef VBOX_STRICT
2014 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2015 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(&pVM->iom.s, GCPhys);
2016 AssertMsgReturn(pRange,
2017 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2018 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2019 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2020#endif
2021
2022 /*
2023 * Call PGM to do the job work.
2024 *
2025 * After the call, all the pages should be non-present... unless there is
2026 * a page pool flush pending (unlikely).
2027 */
2028 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2029 AssertRC(rc);
2030
2031#ifdef VBOX_STRICT
2032 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2033 {
2034 uint32_t cb = pRange->cb;
2035 GCPhys = pRange->GCPhys;
2036 while (cb)
2037 {
2038 uint64_t fFlags;
2039 RTHCPHYS HCPhys;
2040 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2041 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2042 cb -= PAGE_SIZE;
2043 GCPhys += PAGE_SIZE;
2044 }
2045 }
2046#endif
2047 return rc;
2048}
2049#endif /* !IN_RC */
2050
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette