VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 36768

Last change on this file since 36768 was 36768, checked in by vboxsync, 14 years ago

IEM: Initial commit, work in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.3 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 36768 2011-04-20 18:33:29Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56
57/**
58 * Implements 'REP MOVS'.
59 */
60IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
61{
62 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
63
64 /*
65 * Setup.
66 */
67 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
68 if (uCounterReg == 0)
69 return VINF_SUCCESS;
70
71 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
72 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
73 if (rcStrict != VINF_SUCCESS)
74 return rcStrict;
75
76 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
77 if (rcStrict != VINF_SUCCESS)
78 return rcStrict;
79
80 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
81 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
82 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
83
84 /*
85 * The loop.
86 */
87 do
88 {
89 /*
90 * Do segmentation and virtual page stuff.
91 */
92#if ADDR_SIZE != 64
93 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
94 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->esHid.u64Base + uDstAddrReg;
95#else
96 uint64_t uVirtSrcAddr = uSrcAddrReg;
97 uint64_t uVirtDstAddr = uDstAddrReg;
98#endif
99 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
100 if (cLeftSrcPage > uCounterReg)
101 cLeftSrcPage = uCounterReg;
102 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
104
105 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
106 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
107#if ADDR_SIZE != 64
108 && uSrcAddrReg < pSrcHid->u32Limit
109 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
110 && uDstAddrReg < pCtx->esHid.u32Limit
111 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
112#endif
113 )
114 {
115 RTGCPHYS GCPhysSrcMem;
116 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
117 if (rcStrict != VINF_SUCCESS)
118 break;
119
120 RTGCPHYS GCPhysDstMem;
121 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
122 if (rcStrict != VINF_SUCCESS)
123 break;
124
125 /*
126 * If we can map the page without trouble, do a block processing
127 * until the end of the current page.
128 */
129 OP_TYPE *puDstMem;
130 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem);
131 if (rcStrict == VINF_SUCCESS)
132 {
133 OP_TYPE const *puSrcMem;
134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_W, (void **)&puSrcMem);
135 if (rcStrict == VINF_SUCCESS)
136 {
137 /* Perform the operation. */
138 memcpy(puDstMem, puSrcMem, cLeftPage * (OP_SIZE / 8));
139
140 /* Update the registers. */
141 uSrcAddrReg += cLeftPage * cbIncr;
142 uDstAddrReg += cLeftPage * cbIncr;
143 uCounterReg -= cLeftPage;
144 continue;
145 }
146 }
147 }
148
149 /*
150 * Fallback - slow processing till the end of the current page.
151 * In the cross page boundrary case we will end up here with cLeftPage
152 * as 0, we execute one loop then.
153 */
154 do
155 {
156 OP_TYPE uValue;
157 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
158 if (rcStrict != VINF_SUCCESS)
159 break;
160 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
161 if (rcStrict != VINF_SUCCESS)
162 break;
163
164 uSrcAddrReg += cbIncr;
165 uDstAddrReg += cbIncr;
166 uCounterReg--;
167 cLeftPage--;
168 } while ((int32_t)cLeftPage > 0);
169 if (rcStrict != VINF_SUCCESS)
170 break;
171 } while (uCounterReg != 0);
172
173 /*
174 * Update the registers.
175 */
176 pCtx->ADDR_rCX = uCounterReg;
177 pCtx->ADDR_rDI = uDstAddrReg;
178 pCtx->ADDR_rSI = uSrcAddrReg;
179 if (rcStrict == VINF_SUCCESS)
180 iemRegAddToRip(pIemCpu, cbInstr);
181
182 return rcStrict;
183}
184
185
186/**
187 * Implements 'REP STOS'.
188 */
189IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
190{
191 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
192
193 /*
194 * Setup.
195 */
196 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
197 if (uCounterReg == 0)
198 return VINF_SUCCESS;
199
200 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
201 if (rcStrict != VINF_SUCCESS)
202 return rcStrict;
203
204 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
205 OP_TYPE const uValue = pCtx->OP_rAX;
206 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
207
208 /*
209 * The loop.
210 */
211 do
212 {
213 /*
214 * Do segmentation and virtual page stuff.
215 */
216#if ADDR_SIZE != 64
217 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
218#else
219 uint64_t uVirtAddr = uAddrReg;
220#endif
221 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
222 if (cLeftPage > uCounterReg)
223 cLeftPage = uCounterReg;
224 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
225 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
226#if ADDR_SIZE != 64
227 && uAddrReg < pCtx->esHid.u32Limit
228 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
229#endif
230 )
231 {
232 RTGCPHYS GCPhysMem;
233 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
234 if (rcStrict != VINF_SUCCESS)
235 break;
236
237 /*
238 * If we can map the page without trouble, do a block processing
239 * until the end of the current page.
240 */
241 OP_TYPE *puMem;
242 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem);
243 if (rcStrict == VINF_SUCCESS)
244 {
245 /* Update the regs first so we can loop on cLeftPage. */
246 uCounterReg -= cLeftPage;
247 uAddrReg += cLeftPage * cbIncr;
248
249 /* Do the memsetting. */
250#if OP_SIZE == 8
251 memset(puMem, uValue, cLeftPage);
252/*#elif OP_SIZE == 32
253 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
254#else
255 while (cLeftPage-- > 0)
256 *puMem++ = uValue;
257#endif
258
259 /* If unaligned, we drop thru and do the page crossing access
260 below. Otherwise, do the next page. */
261 if (!(uVirtAddr & (OP_SIZE - 1)))
262 continue;
263 if (uCounterReg == 0)
264 break;
265 cLeftPage = 0;
266 }
267 }
268
269 /*
270 * Fallback - slow processing till the end of the current page.
271 * In the cross page boundrary case we will end up here with cLeftPage
272 * as 0, we execute one loop then.
273 */
274 do
275 {
276 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
277 if (rcStrict != VINF_SUCCESS)
278 break;
279 uAddrReg += cbIncr;
280 uCounterReg--;
281 cLeftPage--;
282 } while ((int32_t)cLeftPage > 0);
283 if (rcStrict != VINF_SUCCESS)
284 break;
285 } while (uCounterReg != 0);
286
287 /*
288 * Update the registers.
289 */
290 pCtx->ADDR_rCX = uCounterReg;
291 pCtx->ADDR_rDI = uAddrReg;
292 if (rcStrict == VINF_SUCCESS)
293 iemRegAddToRip(pIemCpu, cbInstr);
294
295 return rcStrict;
296}
297
298
299#if OP_SIZE != 64
300
301/**
302 * Implements 'INS' (no rep)
303 */
304IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
305{
306 PVM pVM = IEMCPU_TO_VM(pIemCpu);
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 VBOXSTRICTRC rcStrict;
309
310 /*
311 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
312 * segmentation and finally any #PF due to virtual address translation.
313 * ASSUMES nothing is read from the I/O port before traps are taken.
314 */
315 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
316 if (rcStrict != VINF_SUCCESS)
317 return rcStrict;
318
319 OP_TYPE *puMem;
320 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
321 if (rcStrict != VINF_SUCCESS)
322 return rcStrict;
323
324 uint32_t u32Value;
325 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
326 if (IOM_SUCCESS(rcStrict))
327 {
328 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
329 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
330 {
331 if (!pCtx->eflags.Bits.u1DF)
332 pCtx->ADDR_rDI += OP_SIZE / 8;
333 else
334 pCtx->ADDR_rDI -= OP_SIZE / 8;
335 iemRegAddToRip(pIemCpu, cbInstr);
336 }
337 /* iemMemMap already check permissions, so this may only be real errors
338 or access handlers medling. The access handler case is going to
339 cause misbehavior if the instruction is re-interpreted or smth. So,
340 we fail with an internal error here instead. */
341 else
342 AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_3);
343 }
344 return rcStrict;
345}
346
347/**
348 * Implements 'REP INS'.
349 */
350IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
351{
352 PVM pVM = IEMCPU_TO_VM(pIemCpu);
353 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
354
355 /*
356 * Setup.
357 */
358 uint16_t const u16Port = pCtx->dx;
359 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
360 if (rcStrict != VINF_SUCCESS)
361 return rcStrict;
362
363 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
364 if (uCounterReg == 0)
365 return VINF_SUCCESS;
366
367 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
368 if (rcStrict != VINF_SUCCESS)
369 return rcStrict;
370
371 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
372 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
373
374 /*
375 * The loop.
376 */
377 do
378 {
379 /*
380 * Do segmentation and virtual page stuff.
381 */
382#if ADDR_SIZE != 64
383 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
384#else
385 uint64_t uVirtAddr = uAddrReg;
386#endif
387 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
388 if (cLeftPage > uCounterReg)
389 cLeftPage = uCounterReg;
390 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
391 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
392#if ADDR_SIZE != 64
393 && uAddrReg < pCtx->esHid.u32Limit
394 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
395#endif
396 )
397 {
398 RTGCPHYS GCPhysMem;
399 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
400 if (rcStrict != VINF_SUCCESS)
401 break;
402
403 /*
404 * If we can map the page without trouble, we would've liked to use
405 * an string I/O method to do the work, but the current IOM
406 * interface doesn't match our current approach. So, do a regular
407 * loop instead.
408 */
409 /** @todo Change the I/O manager interface to make use of
410 * mapped buffers instead of leaving those bits to the
411 * device implementation? */
412 OP_TYPE *puMem;
413 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem);
414 if (rcStrict == VINF_SUCCESS)
415 {
416 while (cLeftPage-- > 0)
417 {
418 uint32_t u32Value;
419 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
420 if (!IOM_SUCCESS(rcStrict))
421 break;
422 *puMem++ = (OP_TYPE)u32Value;
423 uAddrReg += cbIncr;
424 uCounterReg -= 1;
425
426 if (rcStrict != VINF_SUCCESS)
427 {
428 /** @todo massage rc */
429 break;
430 }
431 }
432 if (rcStrict != VINF_SUCCESS)
433 break;
434
435 /* If unaligned, we drop thru and do the page crossing access
436 below. Otherwise, do the next page. */
437 if (!(uVirtAddr & (OP_SIZE - 1)))
438 continue;
439 if (uCounterReg == 0)
440 break;
441 cLeftPage = 0;
442 }
443 }
444
445 /*
446 * Fallback - slow processing till the end of the current page.
447 * In the cross page boundrary case we will end up here with cLeftPage
448 * as 0, we execute one loop then.
449 *
450 * Note! We ASSUME the CPU will raise #PF or #GP before access the
451 * I/O port, otherwise it wouldn't really be restartable.
452 */
453 /** @todo investigate what the CPU actually does with \#PF/\#GP
454 * during INS. */
455 do
456 {
457 OP_TYPE *puMem;
458 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
459 if (rcStrict != VINF_SUCCESS)
460 break;
461
462 uint32_t u32Value;
463 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
464 if (!IOM_SUCCESS(rcStrict))
465 break;
466
467 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
468 AssertLogRelBreakStmt(rcStrict2 == VINF_SUCCESS, rcStrict = VERR_INTERNAL_ERROR_3); /* See non-rep version. */
469
470 uAddrReg += cbIncr;
471 uCounterReg--;
472 cLeftPage--;
473 if (rcStrict != VINF_SUCCESS)
474 {
475 /** @todo massage IOM status codes! */
476 break;
477 }
478 } while ((int32_t)cLeftPage > 0);
479 if (rcStrict != VINF_SUCCESS)
480 break;
481 } while (uCounterReg != 0);
482
483 /*
484 * Update the registers.
485 */
486 pCtx->ADDR_rCX = uCounterReg;
487 pCtx->ADDR_rDI = uAddrReg;
488 if (rcStrict == VINF_SUCCESS)
489 iemRegAddToRip(pIemCpu, cbInstr);
490
491 return rcStrict;
492}
493
494
495/**
496 * Implements 'OUTS' (no rep)
497 */
498IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE))
499{
500 PVM pVM = IEMCPU_TO_VM(pIemCpu);
501 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
502 VBOXSTRICTRC rcStrict;
503
504 /*
505 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
506 * segmentation and finally any #PF due to virtual address translation.
507 * ASSUMES nothing is read from the I/O port before traps are taken.
508 */
509 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
510 if (rcStrict != VINF_SUCCESS)
511 return rcStrict;
512
513 OP_TYPE uValue;
514 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, X86_SREG_ES, pCtx->ADDR_rDI);
515 if (rcStrict == VINF_SUCCESS)
516 {
517 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
518 if (IOM_SUCCESS(rcStrict))
519 {
520 if (!pCtx->eflags.Bits.u1DF)
521 pCtx->ADDR_rDI += OP_SIZE / 8;
522 else
523 pCtx->ADDR_rDI -= OP_SIZE / 8;
524 iemRegAddToRip(pIemCpu, cbInstr);
525 /** @todo massage IOM status codes. */
526 }
527 }
528 return rcStrict;
529}
530
531/**
532 * Implements 'REP OUTS'.
533 */
534IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE))
535{
536 PVM pVM = IEMCPU_TO_VM(pIemCpu);
537 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
538
539 /*
540 * Setup.
541 */
542 uint16_t const u16Port = pCtx->dx;
543 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
544 if (rcStrict != VINF_SUCCESS)
545 return rcStrict;
546
547 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
548 if (uCounterReg == 0)
549 return VINF_SUCCESS;
550
551 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
552 if (rcStrict != VINF_SUCCESS)
553 return rcStrict;
554
555 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
556 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
557
558 /*
559 * The loop.
560 */
561 do
562 {
563 /*
564 * Do segmentation and virtual page stuff.
565 */
566#if ADDR_SIZE != 64
567 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
568#else
569 uint64_t uVirtAddr = uAddrReg;
570#endif
571 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
572 if (cLeftPage > uCounterReg)
573 cLeftPage = uCounterReg;
574 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
575 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
576#if ADDR_SIZE != 64
577 && uAddrReg < pCtx->esHid.u32Limit
578 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
579#endif
580 )
581 {
582 RTGCPHYS GCPhysMem;
583 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
584 if (rcStrict != VINF_SUCCESS)
585 break;
586
587 /*
588 * If we can map the page without trouble, we would've liked to use
589 * an string I/O method to do the work, but the current IOM
590 * interface doesn't match our current approach. So, do a regular
591 * loop instead.
592 */
593 /** @todo Change the I/O manager interface to make use of
594 * mapped buffers instead of leaving those bits to the
595 * device implementation? */
596 OP_TYPE const *puMem;
597 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
598 if (rcStrict == VINF_SUCCESS)
599 {
600 while (cLeftPage-- > 0)
601 {
602 uint32_t u32Value = *puMem++;
603 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
604 if (!IOM_SUCCESS(rcStrict))
605 break;
606 uAddrReg += cbIncr;
607 uCounterReg -= 1;
608
609 if (rcStrict != VINF_SUCCESS)
610 {
611 /** @todo massage IOM rc */
612 break;
613 }
614 }
615 if (rcStrict != VINF_SUCCESS)
616 break;
617
618 /* If unaligned, we drop thru and do the page crossing access
619 below. Otherwise, do the next page. */
620 if (!(uVirtAddr & (OP_SIZE - 1)))
621 continue;
622 if (uCounterReg == 0)
623 break;
624 cLeftPage = 0;
625 }
626 }
627
628 /*
629 * Fallback - slow processing till the end of the current page.
630 * In the cross page boundrary case we will end up here with cLeftPage
631 * as 0, we execute one loop then.
632 *
633 * Note! We ASSUME the CPU will raise #PF or #GP before access the
634 * I/O port, otherwise it wouldn't really be restartable.
635 */
636 /** @todo investigate what the CPU actually does with \#PF/\#GP
637 * during INS. */
638 do
639 {
640 OP_TYPE uValue;
641 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, X86_SREG_ES, uAddrReg);
642 if (rcStrict != VINF_SUCCESS)
643 break;
644
645 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
646 if (!IOM_SUCCESS(rcStrict))
647 break;
648
649 uAddrReg += cbIncr;
650 uCounterReg--;
651 cLeftPage--;
652 if (rcStrict != VINF_SUCCESS)
653 {
654 /** @todo massage IOM status codes! */
655 break;
656 }
657 } while ((int32_t)cLeftPage > 0);
658 if (rcStrict != VINF_SUCCESS)
659 break;
660 } while (uCounterReg != 0);
661
662 /*
663 * Update the registers.
664 */
665 pCtx->ADDR_rCX = uCounterReg;
666 pCtx->ADDR_rDI = uAddrReg;
667 if (rcStrict == VINF_SUCCESS)
668 iemRegAddToRip(pIemCpu, cbInstr);
669
670 return rcStrict;
671}
672
673#endif /* OP_SIZE != 64-bit */
674
675
676#undef OP_rAX
677#undef OP_SIZE
678#undef ADDR_SIZE
679#undef ADDR_rDI
680#undef ADDR_rSI
681#undef ADDR_rCX
682#undef ADDR_rIP
683#undef ADDR2_TYPE
684#undef ADDR_TYPE
685#undef ADDR2_TYPE
686
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette