VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 39127

Last change on this file since 39127 was 38092, checked in by vboxsync, 13 years ago

Fixed assertion (last item may or may not match).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 46.8 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 38092 2011-07-21 11:59:24Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->esHid.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->esHid.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 break;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 break;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 OP_TYPE const *puSrc2Mem;
133 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem);
134 if (rcStrict == VINF_SUCCESS)
135 {
136 OP_TYPE const *puSrc1Mem;
137 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem);
138 if (rcStrict == VINF_SUCCESS)
139 {
140 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
141 {
142 /* All matches, only compare the last itme to get the right eflags. */
143 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
144 uSrc1AddrReg += cLeftPage * cbIncr;
145 uSrc2AddrReg += cLeftPage * cbIncr;
146 uCounterReg -= cLeftPage;
147 }
148 else
149 {
150 /* Some mismatch, compare each item (and keep volatile
151 memory in mind). */
152 do
153 {
154 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)puSrc1Mem, *puSrc2Mem, &uEFlags);
155 uSrc1AddrReg += cbIncr;
156 uSrc2AddrReg += cbIncr;
157 uCounterReg--;
158 puSrc1Mem++;
159 puSrc2Mem++;
160 cLeftPage--;
161 } while ( (int32_t)cLeftPage > 0
162 && (uEFlags & X86_EFL_ZF));
163 }
164 continue;
165 }
166 }
167 }
168
169 /*
170 * Fallback - slow processing till the end of the current page.
171 * In the cross page boundrary case we will end up here with cLeftPage
172 * as 0, we execute one loop then.
173 */
174 do
175 {
176 OP_TYPE uValue1;
177 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
178 if (rcStrict != VINF_SUCCESS)
179 break;
180 OP_TYPE uValue2;
181 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
182 if (rcStrict != VINF_SUCCESS)
183 break;
184 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
185
186 uSrc1AddrReg += cbIncr;
187 uSrc2AddrReg += cbIncr;
188 uCounterReg--;
189 cLeftPage--;
190 } while ( (int32_t)cLeftPage > 0
191 && (uEFlags & X86_EFL_ZF));
192 if (rcStrict != VINF_SUCCESS)
193 break;
194 } while ( uCounterReg != 0
195 && (uEFlags & X86_EFL_ZF));
196
197 /*
198 * Update the registers.
199 */
200 pCtx->ADDR_rCX = uCounterReg;
201 pCtx->ADDR_rSI = uSrc1AddrReg;
202 pCtx->ADDR_rDI = uSrc2AddrReg;
203 pCtx->eflags.u = uEFlags;
204 if (rcStrict == VINF_SUCCESS)
205 iemRegAddToRip(pIemCpu, cbInstr);
206
207 return rcStrict;
208}
209
210
211/**
212 * Implements 'REPNE CMPS'.
213 */
214IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
215{
216 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
217
218 /*
219 * Setup.
220 */
221 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
222 if (uCounterReg == 0)
223 {
224 iemRegAddToRip(pIemCpu, cbInstr);
225 return VINF_SUCCESS;
226 }
227
228 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
229 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
230 if (rcStrict != VINF_SUCCESS)
231 return rcStrict;
232
233 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
234 if (rcStrict != VINF_SUCCESS)
235 return rcStrict;
236
237 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
238 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
239 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
240 uint32_t uEFlags = pCtx->eflags.u;
241
242 /*
243 * The loop.
244 */
245 do
246 {
247 /*
248 * Do segmentation and virtual page stuff.
249 */
250#if ADDR_SIZE != 64
251 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
252 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->esHid.u64Base + uSrc2AddrReg;
253#else
254 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
255 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
256#endif
257 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
258 if (cLeftSrc1Page > uCounterReg)
259 cLeftSrc1Page = uCounterReg;
260 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
261 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
262
263 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
264 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
265#if ADDR_SIZE != 64
266 && uSrc1AddrReg < pSrc1Hid->u32Limit
267 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
268 && uSrc2AddrReg < pCtx->esHid.u32Limit
269 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
270#endif
271 )
272 {
273 RTGCPHYS GCPhysSrc1Mem;
274 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
275 if (rcStrict != VINF_SUCCESS)
276 break;
277
278 RTGCPHYS GCPhysSrc2Mem;
279 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
280 if (rcStrict != VINF_SUCCESS)
281 break;
282
283 /*
284 * If we can map the page without trouble, do a block processing
285 * until the end of the current page.
286 */
287 OP_TYPE const *puSrc2Mem;
288 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem);
289 if (rcStrict == VINF_SUCCESS)
290 {
291 OP_TYPE const *puSrc1Mem;
292 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem);
293 if (rcStrict == VINF_SUCCESS)
294 {
295 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
296 {
297 /* All matches, only compare the last itme to get the right eflags. */
298 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
299 uSrc1AddrReg += cLeftPage * cbIncr;
300 uSrc2AddrReg += cLeftPage * cbIncr;
301 uCounterReg -= cLeftPage;
302 }
303 else
304 {
305 /* Some mismatch, compare each item (and keep volatile
306 memory in mind). */
307 do
308 {
309 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)puSrc1Mem, *puSrc2Mem, &uEFlags);
310 uSrc1AddrReg += cbIncr;
311 uSrc2AddrReg += cbIncr;
312 uCounterReg--;
313 puSrc1Mem++;
314 puSrc2Mem++;
315 cLeftPage--;
316 } while ( (int32_t)cLeftPage > 0
317 && !(uEFlags & X86_EFL_ZF));
318 }
319 continue;
320 }
321 }
322 }
323
324 /*
325 * Fallback - slow processing till the end of the current page.
326 * In the cross page boundrary case we will end up here with cLeftPage
327 * as 0, we execute one loop then.
328 */
329 do
330 {
331 OP_TYPE uValue1;
332 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
333 if (rcStrict != VINF_SUCCESS)
334 break;
335 OP_TYPE uValue2;
336 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
337 if (rcStrict != VINF_SUCCESS)
338 break;
339 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
340
341 uSrc1AddrReg += cbIncr;
342 uSrc2AddrReg += cbIncr;
343 uCounterReg--;
344 cLeftPage--;
345 } while ( (int32_t)cLeftPage > 0
346 && !(uEFlags & X86_EFL_ZF));
347 if (rcStrict != VINF_SUCCESS)
348 break;
349 } while ( uCounterReg != 0
350 && !(uEFlags & X86_EFL_ZF));
351
352 /*
353 * Update the registers.
354 */
355 pCtx->ADDR_rCX = uCounterReg;
356 pCtx->ADDR_rSI = uSrc1AddrReg;
357 pCtx->ADDR_rDI = uSrc2AddrReg;
358 pCtx->eflags.u = uEFlags;
359 if (rcStrict == VINF_SUCCESS)
360 iemRegAddToRip(pIemCpu, cbInstr);
361
362 return rcStrict;
363}
364
365
366/**
367 * Implements 'REPE SCAS'.
368 */
369IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
370{
371 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
372
373 /*
374 * Setup.
375 */
376 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
377 if (uCounterReg == 0)
378 {
379 iemRegAddToRip(pIemCpu, cbInstr);
380 return VINF_SUCCESS;
381 }
382
383 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
384 if (rcStrict != VINF_SUCCESS)
385 return rcStrict;
386
387 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
388 OP_TYPE const uValueReg = pCtx->OP_rAX;
389 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
390 uint32_t uEFlags = pCtx->eflags.u;
391
392 /*
393 * The loop.
394 */
395 do
396 {
397 /*
398 * Do segmentation and virtual page stuff.
399 */
400#if ADDR_SIZE != 64
401 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
402#else
403 uint64_t uVirtAddr = uAddrReg;
404#endif
405 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
406 if (cLeftPage > uCounterReg)
407 cLeftPage = uCounterReg;
408 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
409 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
410#if ADDR_SIZE != 64
411 && uAddrReg < pCtx->esHid.u32Limit
412 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
413#endif
414 )
415 {
416 RTGCPHYS GCPhysMem;
417 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
418 if (rcStrict != VINF_SUCCESS)
419 break;
420
421 /*
422 * If we can map the page without trouble, do a block processing
423 * until the end of the current page.
424 */
425 OP_TYPE const *puMem;
426 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
427 if (rcStrict == VINF_SUCCESS)
428 {
429 /* Search till we find a mismatching item. */
430 OP_TYPE uTmpValue;
431 bool fQuit;
432 uint32_t i = 0;
433 do
434 {
435 uTmpValue = puMem[i++];
436 fQuit = uTmpValue != uValueReg;
437 } while (i < cLeftPage && !fQuit);
438
439 /* Update the regs. */
440 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
441 uCounterReg -= i;
442 uAddrReg += i * cbIncr;
443 Assert(!(uEFlags & X86_EFL_ZF) == (i < cLeftPage));
444 if (fQuit)
445 break;
446
447
448 /* If unaligned, we drop thru and do the page crossing access
449 below. Otherwise, do the next page. */
450 if (!(uVirtAddr & (OP_SIZE - 1)))
451 continue;
452 if (uCounterReg == 0)
453 break;
454 cLeftPage = 0;
455 }
456 }
457
458 /*
459 * Fallback - slow processing till the end of the current page.
460 * In the cross page boundrary case we will end up here with cLeftPage
461 * as 0, we execute one loop then.
462 */
463 do
464 {
465 OP_TYPE uTmpValue;
466 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
467 if (rcStrict != VINF_SUCCESS)
468 break;
469 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
470
471 uAddrReg += cbIncr;
472 uCounterReg--;
473 cLeftPage--;
474 } while ( (int32_t)cLeftPage > 0
475 && (uEFlags & X86_EFL_ZF));
476 if (rcStrict != VINF_SUCCESS)
477 break;
478 } while ( uCounterReg != 0
479 && (uEFlags & X86_EFL_ZF));
480
481 /*
482 * Update the registers.
483 */
484 pCtx->ADDR_rCX = uCounterReg;
485 pCtx->ADDR_rDI = uAddrReg;
486 pCtx->eflags.u = uEFlags;
487 if (rcStrict == VINF_SUCCESS)
488 iemRegAddToRip(pIemCpu, cbInstr);
489
490 return rcStrict;
491}
492
493
494/**
495 * Implements 'REPNE SCAS'.
496 */
497IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
498{
499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
500
501 /*
502 * Setup.
503 */
504 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
505 if (uCounterReg == 0)
506 {
507 iemRegAddToRip(pIemCpu, cbInstr);
508 return VINF_SUCCESS;
509 }
510
511 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
512 if (rcStrict != VINF_SUCCESS)
513 return rcStrict;
514
515 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
516 OP_TYPE const uValueReg = pCtx->OP_rAX;
517 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
518 uint32_t uEFlags = pCtx->eflags.u;
519
520 /*
521 * The loop.
522 */
523 do
524 {
525 /*
526 * Do segmentation and virtual page stuff.
527 */
528#if ADDR_SIZE != 64
529 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
530#else
531 uint64_t uVirtAddr = uAddrReg;
532#endif
533 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
534 if (cLeftPage > uCounterReg)
535 cLeftPage = uCounterReg;
536 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
537 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
538#if ADDR_SIZE != 64
539 && uAddrReg < pCtx->esHid.u32Limit
540 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
541#endif
542 )
543 {
544 RTGCPHYS GCPhysMem;
545 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
546 if (rcStrict != VINF_SUCCESS)
547 break;
548
549 /*
550 * If we can map the page without trouble, do a block processing
551 * until the end of the current page.
552 */
553 OP_TYPE const *puMem;
554 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
555 if (rcStrict == VINF_SUCCESS)
556 {
557 /* Search till we find a mismatching item. */
558 OP_TYPE uTmpValue;
559 bool fQuit;
560 uint32_t i = 0;
561 do
562 {
563 uTmpValue = puMem[i++];
564 fQuit = uTmpValue == uValueReg;
565 } while (i < cLeftPage && !fQuit);
566
567 /* Update the regs. */
568 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
569 uCounterReg -= i;
570 uAddrReg += i * cbIncr;
571 Assert((!(uEFlags & X86_EFL_ZF) != (i < cLeftPage)) || (i == cLeftPage));
572 if (fQuit)
573 break;
574
575
576 /* If unaligned, we drop thru and do the page crossing access
577 below. Otherwise, do the next page. */
578 if (!(uVirtAddr & (OP_SIZE - 1)))
579 continue;
580 if (uCounterReg == 0)
581 break;
582 cLeftPage = 0;
583 }
584 }
585
586 /*
587 * Fallback - slow processing till the end of the current page.
588 * In the cross page boundrary case we will end up here with cLeftPage
589 * as 0, we execute one loop then.
590 */
591 do
592 {
593 OP_TYPE uTmpValue;
594 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
595 if (rcStrict != VINF_SUCCESS)
596 break;
597 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
598
599 uAddrReg += cbIncr;
600 uCounterReg--;
601 cLeftPage--;
602 } while ( (int32_t)cLeftPage > 0
603 && !(uEFlags & X86_EFL_ZF));
604 if (rcStrict != VINF_SUCCESS)
605 break;
606 } while ( uCounterReg != 0
607 && !(uEFlags & X86_EFL_ZF));
608
609 /*
610 * Update the registers.
611 */
612 pCtx->ADDR_rCX = uCounterReg;
613 pCtx->ADDR_rDI = uAddrReg;
614 pCtx->eflags.u = uEFlags;
615 if (rcStrict == VINF_SUCCESS)
616 iemRegAddToRip(pIemCpu, cbInstr);
617
618 return rcStrict;
619}
620
621
622
623
624/**
625 * Implements 'REP MOVS'.
626 */
627IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
628{
629 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
630
631 /*
632 * Setup.
633 */
634 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
635 if (uCounterReg == 0)
636 {
637 iemRegAddToRip(pIemCpu, cbInstr);
638 return VINF_SUCCESS;
639 }
640
641 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
642 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
643 if (rcStrict != VINF_SUCCESS)
644 return rcStrict;
645
646 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
647 if (rcStrict != VINF_SUCCESS)
648 return rcStrict;
649
650 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
651 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
652 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
653
654 /*
655 * The loop.
656 */
657 do
658 {
659 /*
660 * Do segmentation and virtual page stuff.
661 */
662#if ADDR_SIZE != 64
663 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
664 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->esHid.u64Base + uDstAddrReg;
665#else
666 uint64_t uVirtSrcAddr = uSrcAddrReg;
667 uint64_t uVirtDstAddr = uDstAddrReg;
668#endif
669 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
670 if (cLeftSrcPage > uCounterReg)
671 cLeftSrcPage = uCounterReg;
672 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
673 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
674
675 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
676 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
677#if ADDR_SIZE != 64
678 && uSrcAddrReg < pSrcHid->u32Limit
679 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
680 && uDstAddrReg < pCtx->esHid.u32Limit
681 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
682#endif
683 )
684 {
685 RTGCPHYS GCPhysSrcMem;
686 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
687 if (rcStrict != VINF_SUCCESS)
688 break;
689
690 RTGCPHYS GCPhysDstMem;
691 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
692 if (rcStrict != VINF_SUCCESS)
693 break;
694
695 /*
696 * If we can map the page without trouble, do a block processing
697 * until the end of the current page.
698 */
699 OP_TYPE *puDstMem;
700 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem);
701 if (rcStrict == VINF_SUCCESS)
702 {
703 OP_TYPE const *puSrcMem;
704 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem);
705 if (rcStrict == VINF_SUCCESS)
706 {
707 /* Perform the operation. */
708 memcpy(puDstMem, puSrcMem, cLeftPage * (OP_SIZE / 8));
709
710 /* Update the registers. */
711 uSrcAddrReg += cLeftPage * cbIncr;
712 uDstAddrReg += cLeftPage * cbIncr;
713 uCounterReg -= cLeftPage;
714 continue;
715 }
716 }
717 }
718
719 /*
720 * Fallback - slow processing till the end of the current page.
721 * In the cross page boundrary case we will end up here with cLeftPage
722 * as 0, we execute one loop then.
723 */
724 do
725 {
726 OP_TYPE uValue;
727 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
728 if (rcStrict != VINF_SUCCESS)
729 break;
730 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
731 if (rcStrict != VINF_SUCCESS)
732 break;
733
734 uSrcAddrReg += cbIncr;
735 uDstAddrReg += cbIncr;
736 uCounterReg--;
737 cLeftPage--;
738 } while ((int32_t)cLeftPage > 0);
739 if (rcStrict != VINF_SUCCESS)
740 break;
741 } while (uCounterReg != 0);
742
743 /*
744 * Update the registers.
745 */
746 pCtx->ADDR_rCX = uCounterReg;
747 pCtx->ADDR_rDI = uDstAddrReg;
748 pCtx->ADDR_rSI = uSrcAddrReg;
749 if (rcStrict == VINF_SUCCESS)
750 iemRegAddToRip(pIemCpu, cbInstr);
751
752 return rcStrict;
753}
754
755
756/**
757 * Implements 'REP STOS'.
758 */
759IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
760{
761 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
762
763 /*
764 * Setup.
765 */
766 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
767 if (uCounterReg == 0)
768 {
769 iemRegAddToRip(pIemCpu, cbInstr);
770 return VINF_SUCCESS;
771 }
772
773 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
774 if (rcStrict != VINF_SUCCESS)
775 return rcStrict;
776
777 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
778 OP_TYPE const uValue = pCtx->OP_rAX;
779 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
780
781 /*
782 * The loop.
783 */
784 do
785 {
786 /*
787 * Do segmentation and virtual page stuff.
788 */
789#if ADDR_SIZE != 64
790 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
791#else
792 uint64_t uVirtAddr = uAddrReg;
793#endif
794 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
795 if (cLeftPage > uCounterReg)
796 cLeftPage = uCounterReg;
797 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
798 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
799#if ADDR_SIZE != 64
800 && uAddrReg < pCtx->esHid.u32Limit
801 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
802#endif
803 )
804 {
805 RTGCPHYS GCPhysMem;
806 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
807 if (rcStrict != VINF_SUCCESS)
808 break;
809
810 /*
811 * If we can map the page without trouble, do a block processing
812 * until the end of the current page.
813 */
814 OP_TYPE *puMem;
815 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem);
816 if (rcStrict == VINF_SUCCESS)
817 {
818 /* Update the regs first so we can loop on cLeftPage. */
819 uCounterReg -= cLeftPage;
820 uAddrReg += cLeftPage * cbIncr;
821
822 /* Do the memsetting. */
823#if OP_SIZE == 8
824 memset(puMem, uValue, cLeftPage);
825/*#elif OP_SIZE == 32
826 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
827#else
828 while (cLeftPage-- > 0)
829 *puMem++ = uValue;
830#endif
831
832 /* If unaligned, we drop thru and do the page crossing access
833 below. Otherwise, do the next page. */
834 if (!(uVirtAddr & (OP_SIZE - 1)))
835 continue;
836 if (uCounterReg == 0)
837 break;
838 cLeftPage = 0;
839 }
840 }
841
842 /*
843 * Fallback - slow processing till the end of the current page.
844 * In the cross page boundrary case we will end up here with cLeftPage
845 * as 0, we execute one loop then.
846 */
847 do
848 {
849 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
850 if (rcStrict != VINF_SUCCESS)
851 break;
852 uAddrReg += cbIncr;
853 uCounterReg--;
854 cLeftPage--;
855 } while ((int32_t)cLeftPage > 0);
856 if (rcStrict != VINF_SUCCESS)
857 break;
858 } while (uCounterReg != 0);
859
860 /*
861 * Update the registers.
862 */
863 pCtx->ADDR_rCX = uCounterReg;
864 pCtx->ADDR_rDI = uAddrReg;
865 if (rcStrict == VINF_SUCCESS)
866 iemRegAddToRip(pIemCpu, cbInstr);
867
868 return rcStrict;
869}
870
871
872/**
873 * Implements 'REP LODS'.
874 */
875IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
876{
877 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
878
879 /*
880 * Setup.
881 */
882 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
883 if (uCounterReg == 0)
884 {
885 iemRegAddToRip(pIemCpu, cbInstr);
886 return VINF_SUCCESS;
887 }
888
889 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
890 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
891 if (rcStrict != VINF_SUCCESS)
892 return rcStrict;
893
894 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
895 OP_TYPE uValueReg = pCtx->OP_rAX;
896 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
897
898 /*
899 * The loop.
900 */
901 do
902 {
903 /*
904 * Do segmentation and virtual page stuff.
905 */
906#if ADDR_SIZE != 64
907 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
908#else
909 uint64_t uVirtAddr = uAddrReg;
910#endif
911 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
912 if (cLeftPage > uCounterReg)
913 cLeftPage = uCounterReg;
914 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
915 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
916#if ADDR_SIZE != 64
917 && uAddrReg < pSrcHid->u32Limit
918 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
919#endif
920 )
921 {
922 RTGCPHYS GCPhysMem;
923 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
924 if (rcStrict != VINF_SUCCESS)
925 break;
926
927 /*
928 * If we can map the page without trouble, we can get away with
929 * just reading the last value on the page.
930 */
931 OP_TYPE const *puMem;
932 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
933 if (rcStrict == VINF_SUCCESS)
934 {
935 /* Only get the last byte, the rest doesn't matter in direct access mode. */
936 uValueReg = puMem[cLeftPage - 1];
937
938 /* Update the regs. */
939 uCounterReg -= cLeftPage;
940 uAddrReg += cLeftPage * cbIncr;
941
942 /* If unaligned, we drop thru and do the page crossing access
943 below. Otherwise, do the next page. */
944 if (!(uVirtAddr & (OP_SIZE - 1)))
945 continue;
946 if (uCounterReg == 0)
947 break;
948 cLeftPage = 0;
949 }
950 }
951
952 /*
953 * Fallback - slow processing till the end of the current page.
954 * In the cross page boundrary case we will end up here with cLeftPage
955 * as 0, we execute one loop then.
956 */
957 do
958 {
959 OP_TYPE uTmpValue;
960 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
961 if (rcStrict != VINF_SUCCESS)
962 break;
963 uValueReg = uTmpValue;
964 uAddrReg += cbIncr;
965 uCounterReg--;
966 cLeftPage--;
967 } while ((int32_t)cLeftPage > 0);
968 if (rcStrict != VINF_SUCCESS)
969 break;
970 } while (uCounterReg != 0);
971
972 /*
973 * Update the registers.
974 */
975 pCtx->ADDR_rCX = uCounterReg;
976 pCtx->ADDR_rDI = uAddrReg;
977#if OP_SIZE == 32
978 pCtx->rax = uValueReg;
979#else
980 pCtx->OP_rAX = uValueReg;
981#endif
982 if (rcStrict == VINF_SUCCESS)
983 iemRegAddToRip(pIemCpu, cbInstr);
984
985 return rcStrict;
986}
987
988
989#if OP_SIZE != 64
990
991/**
992 * Implements 'INS' (no rep)
993 */
994IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
995{
996 PVM pVM = IEMCPU_TO_VM(pIemCpu);
997 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
998 VBOXSTRICTRC rcStrict;
999
1000 /*
1001 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1002 * segmentation and finally any #PF due to virtual address translation.
1003 * ASSUMES nothing is read from the I/O port before traps are taken.
1004 */
1005 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1006 if (rcStrict != VINF_SUCCESS)
1007 return rcStrict;
1008
1009 OP_TYPE *puMem;
1010 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1011 if (rcStrict != VINF_SUCCESS)
1012 return rcStrict;
1013
1014 uint32_t u32Value;
1015 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1016 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
1017 else
1018 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1019 if (IOM_SUCCESS(rcStrict))
1020 {
1021 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1022 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1023 {
1024 if (!pCtx->eflags.Bits.u1DF)
1025 pCtx->ADDR_rDI += OP_SIZE / 8;
1026 else
1027 pCtx->ADDR_rDI -= OP_SIZE / 8;
1028 iemRegAddToRip(pIemCpu, cbInstr);
1029 }
1030 /* iemMemMap already check permissions, so this may only be real errors
1031 or access handlers medling. The access handler case is going to
1032 cause misbehavior if the instruction is re-interpreted or smth. So,
1033 we fail with an internal error here instead. */
1034 else
1035 AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_3);
1036 }
1037 return rcStrict;
1038}
1039
1040
1041/**
1042 * Implements 'REP INS'.
1043 */
1044IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1045{
1046 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1047 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1048
1049 /*
1050 * Setup.
1051 */
1052 uint16_t const u16Port = pCtx->dx;
1053 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1054 if (rcStrict != VINF_SUCCESS)
1055 return rcStrict;
1056
1057 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1058 if (uCounterReg == 0)
1059 {
1060 iemRegAddToRip(pIemCpu, cbInstr);
1061 return VINF_SUCCESS;
1062 }
1063
1064 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
1065 if (rcStrict != VINF_SUCCESS)
1066 return rcStrict;
1067
1068 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1069 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1070
1071 /*
1072 * The loop.
1073 */
1074 do
1075 {
1076 /*
1077 * Do segmentation and virtual page stuff.
1078 */
1079#if ADDR_SIZE != 64
1080 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
1081#else
1082 uint64_t uVirtAddr = uAddrReg;
1083#endif
1084 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1085 if (cLeftPage > uCounterReg)
1086 cLeftPage = uCounterReg;
1087 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1088 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1089#if ADDR_SIZE != 64
1090 && uAddrReg < pCtx->esHid.u32Limit
1091 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
1092#endif
1093 )
1094 {
1095 RTGCPHYS GCPhysMem;
1096 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1097 if (rcStrict != VINF_SUCCESS)
1098 break;
1099
1100 /*
1101 * If we can map the page without trouble, we would've liked to use
1102 * an string I/O method to do the work, but the current IOM
1103 * interface doesn't match our current approach. So, do a regular
1104 * loop instead.
1105 */
1106 /** @todo Change the I/O manager interface to make use of
1107 * mapped buffers instead of leaving those bits to the
1108 * device implementation? */
1109 OP_TYPE *puMem;
1110 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem);
1111 if (rcStrict == VINF_SUCCESS)
1112 {
1113 while (cLeftPage-- > 0)
1114 {
1115 uint32_t u32Value;
1116 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1117 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1118 else
1119 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1120 if (!IOM_SUCCESS(rcStrict))
1121 break;
1122 *puMem++ = (OP_TYPE)u32Value;
1123 uAddrReg += cbIncr;
1124 uCounterReg -= 1;
1125
1126 if (rcStrict != VINF_SUCCESS)
1127 {
1128 /** @todo massage rc */
1129 break;
1130 }
1131 }
1132 if (rcStrict != VINF_SUCCESS)
1133 break;
1134
1135 /* If unaligned, we drop thru and do the page crossing access
1136 below. Otherwise, do the next page. */
1137 if (!(uVirtAddr & (OP_SIZE - 1)))
1138 continue;
1139 if (uCounterReg == 0)
1140 break;
1141 cLeftPage = 0;
1142 }
1143 }
1144
1145 /*
1146 * Fallback - slow processing till the end of the current page.
1147 * In the cross page boundrary case we will end up here with cLeftPage
1148 * as 0, we execute one loop then.
1149 *
1150 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1151 * I/O port, otherwise it wouldn't really be restartable.
1152 */
1153 /** @todo investigate what the CPU actually does with \#PF/\#GP
1154 * during INS. */
1155 do
1156 {
1157 OP_TYPE *puMem;
1158 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1159 if (rcStrict != VINF_SUCCESS)
1160 break;
1161
1162 uint32_t u32Value;
1163 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1164 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1165 else
1166 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1167 if (!IOM_SUCCESS(rcStrict))
1168 break;
1169
1170 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1171 AssertLogRelBreakStmt(rcStrict2 == VINF_SUCCESS, rcStrict = VERR_INTERNAL_ERROR_3); /* See non-rep version. */
1172
1173 uAddrReg += cbIncr;
1174 uCounterReg--;
1175 cLeftPage--;
1176 if (rcStrict != VINF_SUCCESS)
1177 {
1178 /** @todo massage IOM status codes! */
1179 break;
1180 }
1181 } while ((int32_t)cLeftPage > 0);
1182 if (rcStrict != VINF_SUCCESS)
1183 break;
1184 } while (uCounterReg != 0);
1185
1186 /*
1187 * Update the registers.
1188 */
1189 pCtx->ADDR_rCX = uCounterReg;
1190 pCtx->ADDR_rDI = uAddrReg;
1191 if (rcStrict == VINF_SUCCESS)
1192 iemRegAddToRip(pIemCpu, cbInstr);
1193
1194 return rcStrict;
1195}
1196
1197
1198/**
1199 * Implements 'OUTS' (no rep)
1200 */
1201IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1202{
1203 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1204 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1205 VBOXSTRICTRC rcStrict;
1206
1207 /*
1208 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1209 * segmentation and finally any #PF due to virtual address translation.
1210 * ASSUMES nothing is read from the I/O port before traps are taken.
1211 */
1212 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1213 if (rcStrict != VINF_SUCCESS)
1214 return rcStrict;
1215
1216 OP_TYPE uValue;
1217 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1218 if (rcStrict == VINF_SUCCESS)
1219 {
1220 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1221 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
1222 else
1223 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1224 if (IOM_SUCCESS(rcStrict))
1225 {
1226 if (!pCtx->eflags.Bits.u1DF)
1227 pCtx->ADDR_rSI += OP_SIZE / 8;
1228 else
1229 pCtx->ADDR_rSI -= OP_SIZE / 8;
1230 iemRegAddToRip(pIemCpu, cbInstr);
1231 /** @todo massage IOM status codes. */
1232 }
1233 }
1234 return rcStrict;
1235}
1236
1237
1238/**
1239 * Implements 'REP OUTS'.
1240 */
1241IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1242{
1243 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1244 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1245
1246 /*
1247 * Setup.
1248 */
1249 uint16_t const u16Port = pCtx->dx;
1250 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1251 if (rcStrict != VINF_SUCCESS)
1252 return rcStrict;
1253
1254 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1255 if (uCounterReg == 0)
1256 {
1257 iemRegAddToRip(pIemCpu, cbInstr);
1258 return VINF_SUCCESS;
1259 }
1260
1261 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1262 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1263 if (rcStrict != VINF_SUCCESS)
1264 return rcStrict;
1265
1266 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1267 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1268
1269 /*
1270 * The loop.
1271 */
1272 do
1273 {
1274 /*
1275 * Do segmentation and virtual page stuff.
1276 */
1277#if ADDR_SIZE != 64
1278 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1279#else
1280 uint64_t uVirtAddr = uAddrReg;
1281#endif
1282 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1283 if (cLeftPage > uCounterReg)
1284 cLeftPage = uCounterReg;
1285 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1286 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1287#if ADDR_SIZE != 64
1288 && uAddrReg < pHid->u32Limit
1289 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1290#endif
1291 )
1292 {
1293 RTGCPHYS GCPhysMem;
1294 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1295 if (rcStrict != VINF_SUCCESS)
1296 break;
1297
1298 /*
1299 * If we can map the page without trouble, we would've liked to use
1300 * an string I/O method to do the work, but the current IOM
1301 * interface doesn't match our current approach. So, do a regular
1302 * loop instead.
1303 */
1304 /** @todo Change the I/O manager interface to make use of
1305 * mapped buffers instead of leaving those bits to the
1306 * device implementation? */
1307 OP_TYPE const *puMem;
1308 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
1309 if (rcStrict == VINF_SUCCESS)
1310 {
1311 while (cLeftPage-- > 0)
1312 {
1313 uint32_t u32Value = *puMem++;
1314 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1315 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
1316 else
1317 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1318 if (!IOM_SUCCESS(rcStrict))
1319 break;
1320 uAddrReg += cbIncr;
1321 uCounterReg -= 1;
1322
1323 if (rcStrict != VINF_SUCCESS)
1324 {
1325 /** @todo massage IOM rc */
1326 break;
1327 }
1328 }
1329 if (rcStrict != VINF_SUCCESS)
1330 break;
1331
1332 /* If unaligned, we drop thru and do the page crossing access
1333 below. Otherwise, do the next page. */
1334 if (!(uVirtAddr & (OP_SIZE - 1)))
1335 continue;
1336 if (uCounterReg == 0)
1337 break;
1338 cLeftPage = 0;
1339 }
1340 }
1341
1342 /*
1343 * Fallback - slow processing till the end of the current page.
1344 * In the cross page boundrary case we will end up here with cLeftPage
1345 * as 0, we execute one loop then.
1346 *
1347 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1348 * I/O port, otherwise it wouldn't really be restartable.
1349 */
1350 /** @todo investigate what the CPU actually does with \#PF/\#GP
1351 * during INS. */
1352 do
1353 {
1354 OP_TYPE uValue;
1355 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1356 if (rcStrict != VINF_SUCCESS)
1357 break;
1358
1359 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1360 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
1361 else
1362 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1363 if (!IOM_SUCCESS(rcStrict))
1364 break;
1365
1366 uAddrReg += cbIncr;
1367 uCounterReg--;
1368 cLeftPage--;
1369 if (rcStrict != VINF_SUCCESS)
1370 {
1371 /** @todo massage IOM status codes! */
1372 break;
1373 }
1374 } while ((int32_t)cLeftPage > 0);
1375 if (rcStrict != VINF_SUCCESS)
1376 break;
1377 } while (uCounterReg != 0);
1378
1379 /*
1380 * Update the registers.
1381 */
1382 pCtx->ADDR_rCX = uCounterReg;
1383 pCtx->ADDR_rSI = uAddrReg;
1384 if (rcStrict == VINF_SUCCESS)
1385 iemRegAddToRip(pIemCpu, cbInstr);
1386
1387 return rcStrict;
1388}
1389
1390#endif /* OP_SIZE != 64-bit */
1391
1392
1393#undef OP_rAX
1394#undef OP_SIZE
1395#undef ADDR_SIZE
1396#undef ADDR_rDI
1397#undef ADDR_rSI
1398#undef ADDR_rCX
1399#undef ADDR_rIP
1400#undef ADDR2_TYPE
1401#undef ADDR_TYPE
1402#undef ADDR2_TYPE
1403
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette