VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 42625

Last change on this file since 42625 was 42625, checked in by vboxsync, 13 years ago

Build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.7 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 42625 2012-08-06 13:53:12Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->es.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 return rcStrict;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 return rcStrict;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 PGMPAGEMAPLOCK PgLockSrc2Mem;
133 OP_TYPE const *puSrc2Mem;
134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
135 if (rcStrict == VINF_SUCCESS)
136 {
137 PGMPAGEMAPLOCK PgLockSrc1Mem;
138 OP_TYPE const *puSrc1Mem;
139 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
140 if (rcStrict == VINF_SUCCESS)
141 {
142 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
143 {
144 /* All matches, only compare the last itme to get the right eflags. */
145 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
146 uSrc1AddrReg += cLeftPage * cbIncr;
147 uSrc2AddrReg += cLeftPage * cbIncr;
148 uCounterReg -= cLeftPage;
149 }
150 else
151 {
152 /* Some mismatch, compare each item (and keep volatile
153 memory in mind). */
154 uint32_t off = 0;
155 do
156 {
157 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
158 off++;
159 } while ( off < cLeftPage
160 && (uEFlags & X86_EFL_ZF));
161 uSrc1AddrReg += cbIncr * off;
162 uSrc2AddrReg += cbIncr * off;
163 uCounterReg -= off;
164 }
165
166 /* Update the registers before looping. */
167 pCtx->ADDR_rCX = uCounterReg;
168 pCtx->ADDR_rSI = uSrc1AddrReg;
169 pCtx->ADDR_rDI = uSrc2AddrReg;
170 pCtx->eflags.u = uEFlags;
171
172 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
173 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
174 continue;
175 }
176 }
177 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
178 }
179
180 /*
181 * Fallback - slow processing till the end of the current page.
182 * In the cross page boundrary case we will end up here with cLeftPage
183 * as 0, we execute one loop then.
184 */
185 do
186 {
187 OP_TYPE uValue1;
188 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
189 if (rcStrict != VINF_SUCCESS)
190 return rcStrict;
191 OP_TYPE uValue2;
192 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
193 if (rcStrict != VINF_SUCCESS)
194 return rcStrict;
195 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
196
197 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
198 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
199 pCtx->ADDR_rCX = --uCounterReg;
200 pCtx->eflags.u = uEFlags;
201 cLeftPage--;
202 } while ( (int32_t)cLeftPage > 0
203 && (uEFlags & X86_EFL_ZF));
204 } while ( uCounterReg != 0
205 && (uEFlags & X86_EFL_ZF));
206
207 /*
208 * Done.
209 */
210 iemRegAddToRip(pIemCpu, cbInstr);
211 return VINF_SUCCESS;
212}
213
214
215/**
216 * Implements 'REPNE CMPS'.
217 */
218IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
219{
220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
221
222 /*
223 * Setup.
224 */
225 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
226 if (uCounterReg == 0)
227 {
228 iemRegAddToRip(pIemCpu, cbInstr);
229 return VINF_SUCCESS;
230 }
231
232 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
233 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
234 if (rcStrict != VINF_SUCCESS)
235 return rcStrict;
236
237 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
238 if (rcStrict != VINF_SUCCESS)
239 return rcStrict;
240
241 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
242 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
243 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
244 uint32_t uEFlags = pCtx->eflags.u;
245
246 /*
247 * The loop.
248 */
249 do
250 {
251 /*
252 * Do segmentation and virtual page stuff.
253 */
254#if ADDR_SIZE != 64
255 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
256 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
257#else
258 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
259 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
260#endif
261 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
262 if (cLeftSrc1Page > uCounterReg)
263 cLeftSrc1Page = uCounterReg;
264 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
266
267 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
268 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
269#if ADDR_SIZE != 64
270 && uSrc1AddrReg < pSrc1Hid->u32Limit
271 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
272 && uSrc2AddrReg < pCtx->es.u32Limit
273 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
274#endif
275 )
276 {
277 RTGCPHYS GCPhysSrc1Mem;
278 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
279 if (rcStrict != VINF_SUCCESS)
280 return rcStrict;
281
282 RTGCPHYS GCPhysSrc2Mem;
283 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
284 if (rcStrict != VINF_SUCCESS)
285 return rcStrict;
286
287 /*
288 * If we can map the page without trouble, do a block processing
289 * until the end of the current page.
290 */
291 OP_TYPE const *puSrc2Mem;
292 PGMPAGEMAPLOCK PgLockSrc2Mem;
293 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
294 if (rcStrict == VINF_SUCCESS)
295 {
296 OP_TYPE const *puSrc1Mem;
297 PGMPAGEMAPLOCK PgLockSrc1Mem;
298 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
299 if (rcStrict == VINF_SUCCESS)
300 {
301 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
302 {
303 /* All matches, only compare the last item to get the right eflags. */
304 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
305 uSrc1AddrReg += cLeftPage * cbIncr;
306 uSrc2AddrReg += cLeftPage * cbIncr;
307 uCounterReg -= cLeftPage;
308 }
309 else
310 {
311 /* Some mismatch, compare each item (and keep volatile
312 memory in mind). */
313 uint32_t off = 0;
314 do
315 {
316 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
317 off++;
318 } while ( off < cLeftPage
319 && !(uEFlags & X86_EFL_ZF));
320 uSrc1AddrReg += cbIncr * off;
321 uSrc2AddrReg += cbIncr * off;
322 uCounterReg -= off;
323 }
324
325 /* Update the registers before looping. */
326 pCtx->ADDR_rCX = uCounterReg;
327 pCtx->ADDR_rSI = uSrc1AddrReg;
328 pCtx->ADDR_rDI = uSrc2AddrReg;
329 pCtx->eflags.u = uEFlags;
330
331 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
332 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
333 continue;
334 }
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 }
337 }
338
339 /*
340 * Fallback - slow processing till the end of the current page.
341 * In the cross page boundrary case we will end up here with cLeftPage
342 * as 0, we execute one loop then.
343 */
344 do
345 {
346 OP_TYPE uValue1;
347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
348 if (rcStrict != VINF_SUCCESS)
349 return rcStrict;
350 OP_TYPE uValue2;
351 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
352 if (rcStrict != VINF_SUCCESS)
353 return rcStrict;
354 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
355
356 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
357 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
358 pCtx->ADDR_rCX = --uCounterReg;
359 pCtx->eflags.u = uEFlags;
360 cLeftPage--;
361 } while ( (int32_t)cLeftPage > 0
362 && !(uEFlags & X86_EFL_ZF));
363 } while ( uCounterReg != 0
364 && !(uEFlags & X86_EFL_ZF));
365
366 /*
367 * Done.
368 */
369 iemRegAddToRip(pIemCpu, cbInstr);
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Implements 'REPE SCAS'.
376 */
377IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380
381 /*
382 * Setup.
383 */
384 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
385 if (uCounterReg == 0)
386 {
387 iemRegAddToRip(pIemCpu, cbInstr);
388 return VINF_SUCCESS;
389 }
390
391 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
392 if (rcStrict != VINF_SUCCESS)
393 return rcStrict;
394
395 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
396 OP_TYPE const uValueReg = pCtx->OP_rAX;
397 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
398 uint32_t uEFlags = pCtx->eflags.u;
399
400 /*
401 * The loop.
402 */
403 do
404 {
405 /*
406 * Do segmentation and virtual page stuff.
407 */
408#if ADDR_SIZE != 64
409 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
410#else
411 uint64_t uVirtAddr = uAddrReg;
412#endif
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418#if ADDR_SIZE != 64
419 && uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
421#endif
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == (i < cLeftPage));
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRip(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
517 if (rcStrict != VINF_SUCCESS)
518 return rcStrict;
519
520 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
521 OP_TYPE const uValueReg = pCtx->OP_rAX;
522 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
523 uint32_t uEFlags = pCtx->eflags.u;
524
525 /*
526 * The loop.
527 */
528 do
529 {
530 /*
531 * Do segmentation and virtual page stuff.
532 */
533#if ADDR_SIZE != 64
534 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
535#else
536 uint64_t uVirtAddr = uAddrReg;
537#endif
538 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
539 if (cLeftPage > uCounterReg)
540 cLeftPage = uCounterReg;
541 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
542 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
543#if ADDR_SIZE != 64
544 && uAddrReg < pCtx->es.u32Limit
545 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
546#endif
547 )
548 {
549 RTGCPHYS GCPhysMem;
550 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
551 if (rcStrict != VINF_SUCCESS)
552 return rcStrict;
553
554 /*
555 * If we can map the page without trouble, do a block processing
556 * until the end of the current page.
557 */
558 PGMPAGEMAPLOCK PgLockMem;
559 OP_TYPE const *puMem;
560 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
561 if (rcStrict == VINF_SUCCESS)
562 {
563 /* Search till we find a mismatching item. */
564 OP_TYPE uTmpValue;
565 bool fQuit;
566 uint32_t i = 0;
567 do
568 {
569 uTmpValue = puMem[i++];
570 fQuit = uTmpValue == uValueReg;
571 } while (i < cLeftPage && !fQuit);
572
573 /* Update the regs. */
574 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
575 pCtx->ADDR_rCX = uCounterReg -= i;
576 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
577 pCtx->eflags.u = uEFlags;
578 Assert((!(uEFlags & X86_EFL_ZF) != (i < cLeftPage)) || (i == cLeftPage));
579 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
580 if (fQuit)
581 break;
582
583
584 /* If unaligned, we drop thru and do the page crossing access
585 below. Otherwise, do the next page. */
586 if (!(uVirtAddr & (OP_SIZE - 1)))
587 continue;
588 if (uCounterReg == 0)
589 break;
590 cLeftPage = 0;
591 }
592 }
593
594 /*
595 * Fallback - slow processing till the end of the current page.
596 * In the cross page boundrary case we will end up here with cLeftPage
597 * as 0, we execute one loop then.
598 */
599 do
600 {
601 OP_TYPE uTmpValue;
602 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
603 if (rcStrict != VINF_SUCCESS)
604 return rcStrict;
605 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
606
607 pCtx->ADDR_rDI = uAddrReg += cbIncr;
608 pCtx->ADDR_rCX = --uCounterReg;
609 pCtx->eflags.u = uEFlags;
610 cLeftPage--;
611 } while ( (int32_t)cLeftPage > 0
612 && !(uEFlags & X86_EFL_ZF));
613 } while ( uCounterReg != 0
614 && !(uEFlags & X86_EFL_ZF));
615
616 /*
617 * Done.
618 */
619 iemRegAddToRip(pIemCpu, cbInstr);
620 return VINF_SUCCESS;
621}
622
623
624
625
626/**
627 * Implements 'REP MOVS'.
628 */
629IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
630{
631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
632
633 /*
634 * Setup.
635 */
636 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
637 if (uCounterReg == 0)
638 {
639 iemRegAddToRip(pIemCpu, cbInstr);
640 return VINF_SUCCESS;
641 }
642
643 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
644 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
645 if (rcStrict != VINF_SUCCESS)
646 return rcStrict;
647
648 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
649 if (rcStrict != VINF_SUCCESS)
650 return rcStrict;
651
652 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
653 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
654 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
655
656 /*
657 * If we're reading back what we write, we have to let the verfication code
658 * to prevent a false positive.
659 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
660 */
661#ifdef IEM_VERIFICATION_MODE
662 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
663 && (cbIncr > 0
664 ? uSrcAddrReg <= uDstAddrReg
665 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
666 : uDstAddrReg <= uSrcAddrReg
667 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
668 pIemCpu->fOverlappingMovs = true;
669#endif
670
671 /*
672 * The loop.
673 */
674 do
675 {
676 /*
677 * Do segmentation and virtual page stuff.
678 */
679#if ADDR_SIZE != 64
680 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
681 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg;
682#else
683 uint64_t uVirtSrcAddr = uSrcAddrReg;
684 uint64_t uVirtDstAddr = uDstAddrReg;
685#endif
686 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
687 if (cLeftSrcPage > uCounterReg)
688 cLeftSrcPage = uCounterReg;
689 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
690 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
691
692 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
693 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
694#if ADDR_SIZE != 64
695 && uSrcAddrReg < pSrcHid->u32Limit
696 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
697 && uDstAddrReg < pCtx->es.u32Limit
698 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
699#endif
700 )
701 {
702 RTGCPHYS GCPhysSrcMem;
703 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
704 if (rcStrict != VINF_SUCCESS)
705 return rcStrict;
706
707 RTGCPHYS GCPhysDstMem;
708 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
709 if (rcStrict != VINF_SUCCESS)
710 return rcStrict;
711
712 /*
713 * If we can map the page without trouble, do a block processing
714 * until the end of the current page.
715 */
716 PGMPAGEMAPLOCK PgLockDstMem;
717 OP_TYPE *puDstMem;
718 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
719 if (rcStrict == VINF_SUCCESS)
720 {
721 PGMPAGEMAPLOCK PgLockSrcMem;
722 OP_TYPE const *puSrcMem;
723 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
724 if (rcStrict == VINF_SUCCESS)
725 {
726 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
727 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
728
729 /* Perform the operation exactly (don't use memcpy to avoid
730 having to consider how its implementation would affect
731 any overlapping source and destination area). */
732 OP_TYPE const *puSrcCur = puSrcMem;
733 OP_TYPE *puDstCur = puDstMem;
734 uint32_t cTodo = cLeftPage;
735 while (cTodo-- > 0)
736 *puDstCur++ = *puSrcCur++;
737
738 /* Update the registers. */
739 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
740 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
741 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
742
743 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
744 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
745 continue;
746 }
747 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
748 }
749 }
750
751 /*
752 * Fallback - slow processing till the end of the current page.
753 * In the cross page boundrary case we will end up here with cLeftPage
754 * as 0, we execute one loop then.
755 */
756 do
757 {
758 OP_TYPE uValue;
759 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
763 if (rcStrict != VINF_SUCCESS)
764 return rcStrict;
765
766 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
767 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
768 pCtx->ADDR_rCX = --uCounterReg;
769 cLeftPage--;
770 } while ((int32_t)cLeftPage > 0);
771 } while (uCounterReg != 0);
772
773 /*
774 * Done.
775 */
776 iemRegAddToRip(pIemCpu, cbInstr);
777 return VINF_SUCCESS;
778}
779
780
781/**
782 * Implements 'REP STOS'.
783 */
784IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
785{
786 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
787
788 /*
789 * Setup.
790 */
791 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
792 if (uCounterReg == 0)
793 {
794 iemRegAddToRip(pIemCpu, cbInstr);
795 return VINF_SUCCESS;
796 }
797
798 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
799 if (rcStrict != VINF_SUCCESS)
800 return rcStrict;
801
802 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
803 OP_TYPE const uValue = pCtx->OP_rAX;
804 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
805
806 /*
807 * The loop.
808 */
809 do
810 {
811 /*
812 * Do segmentation and virtual page stuff.
813 */
814#if ADDR_SIZE != 64
815 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
816#else
817 uint64_t uVirtAddr = uAddrReg;
818#endif
819 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
820 if (cLeftPage > uCounterReg)
821 cLeftPage = uCounterReg;
822 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
823 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
824#if ADDR_SIZE != 64
825 && uAddrReg < pCtx->es.u32Limit
826 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
827#endif
828 )
829 {
830 RTGCPHYS GCPhysMem;
831 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
832 if (rcStrict != VINF_SUCCESS)
833 return rcStrict;
834
835 /*
836 * If we can map the page without trouble, do a block processing
837 * until the end of the current page.
838 */
839 PGMPAGEMAPLOCK PgLockMem;
840 OP_TYPE *puMem;
841 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
842 if (rcStrict == VINF_SUCCESS)
843 {
844 /* Update the regs first so we can loop on cLeftPage. */
845 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
846 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
847
848 /* Do the memsetting. */
849#if OP_SIZE == 8
850 memset(puMem, uValue, cLeftPage);
851/*#elif OP_SIZE == 32
852 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
853#else
854 while (cLeftPage-- > 0)
855 *puMem++ = uValue;
856#endif
857
858 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
859
860 /* If unaligned, we drop thru and do the page crossing access
861 below. Otherwise, do the next page. */
862 if (!(uVirtAddr & (OP_SIZE - 1)))
863 continue;
864 if (uCounterReg == 0)
865 break;
866 cLeftPage = 0;
867 }
868 }
869
870 /*
871 * Fallback - slow processing till the end of the current page.
872 * In the cross page boundrary case we will end up here with cLeftPage
873 * as 0, we execute one loop then.
874 */
875 do
876 {
877 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
878 if (rcStrict != VINF_SUCCESS)
879 return rcStrict;
880 pCtx->ADDR_rDI = uAddrReg += cbIncr;
881 pCtx->ADDR_rCX = --uCounterReg;
882 cLeftPage--;
883 } while ((int32_t)cLeftPage > 0);
884 } while (uCounterReg != 0);
885
886 /*
887 * Done.
888 */
889 iemRegAddToRip(pIemCpu, cbInstr);
890 return VINF_SUCCESS;
891}
892
893
894/**
895 * Implements 'REP LODS'.
896 */
897IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
898{
899 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
900
901 /*
902 * Setup.
903 */
904 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
905 if (uCounterReg == 0)
906 {
907 iemRegAddToRip(pIemCpu, cbInstr);
908 return VINF_SUCCESS;
909 }
910
911 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
912 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
913 if (rcStrict != VINF_SUCCESS)
914 return rcStrict;
915
916 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
917 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
918
919 /*
920 * The loop.
921 */
922 do
923 {
924 /*
925 * Do segmentation and virtual page stuff.
926 */
927#if ADDR_SIZE != 64
928 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
929#else
930 uint64_t uVirtAddr = uAddrReg;
931#endif
932 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
933 if (cLeftPage > uCounterReg)
934 cLeftPage = uCounterReg;
935 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
936 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
937#if ADDR_SIZE != 64
938 && uAddrReg < pSrcHid->u32Limit
939 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
940#endif
941 )
942 {
943 RTGCPHYS GCPhysMem;
944 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
945 if (rcStrict != VINF_SUCCESS)
946 return rcStrict;
947
948 /*
949 * If we can map the page without trouble, we can get away with
950 * just reading the last value on the page.
951 */
952 PGMPAGEMAPLOCK PgLockMem;
953 OP_TYPE const *puMem;
954 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
955 if (rcStrict == VINF_SUCCESS)
956 {
957 /* Only get the last byte, the rest doesn't matter in direct access mode. */
958#if OP_SIZE == 32
959 pCtx->rax = puMem[cLeftPage - 1];
960#else
961 pCtx->OP_rAX = puMem[cLeftPage - 1];
962#endif
963 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
964 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
965 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
966
967 /* If unaligned, we drop thru and do the page crossing access
968 below. Otherwise, do the next page. */
969 if (!(uVirtAddr & (OP_SIZE - 1)))
970 continue;
971 if (uCounterReg == 0)
972 break;
973 cLeftPage = 0;
974 }
975 }
976
977 /*
978 * Fallback - slow processing till the end of the current page.
979 * In the cross page boundrary case we will end up here with cLeftPage
980 * as 0, we execute one loop then.
981 */
982 do
983 {
984 OP_TYPE uTmpValue;
985 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
986 if (rcStrict != VINF_SUCCESS)
987 return rcStrict;
988#if OP_SIZE == 32
989 pCtx->rax = uTmpValue;
990#else
991 pCtx->OP_rAX = uTmpValue;
992#endif
993 pCtx->ADDR_rSI = uAddrReg += cbIncr;
994 pCtx->ADDR_rCX = --uCounterReg;
995 cLeftPage--;
996 } while ((int32_t)cLeftPage > 0);
997 if (rcStrict != VINF_SUCCESS)
998 break;
999 } while (uCounterReg != 0);
1000
1001 /*
1002 * Done.
1003 */
1004 iemRegAddToRip(pIemCpu, cbInstr);
1005 return VINF_SUCCESS;
1006}
1007
1008
1009#if OP_SIZE != 64
1010
1011/**
1012 * Implements 'INS' (no rep)
1013 */
1014IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1015{
1016 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1017 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1018 VBOXSTRICTRC rcStrict;
1019
1020 /*
1021 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1022 * segmentation and finally any #PF due to virtual address translation.
1023 * ASSUMES nothing is read from the I/O port before traps are taken.
1024 */
1025 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1026 if (rcStrict != VINF_SUCCESS)
1027 return rcStrict;
1028
1029 OP_TYPE *puMem;
1030 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1031 if (rcStrict != VINF_SUCCESS)
1032 return rcStrict;
1033
1034 uint32_t u32Value;
1035 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1036 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
1037 else
1038 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1039 if (IOM_SUCCESS(rcStrict))
1040 {
1041 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1042 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1043 {
1044 if (!pCtx->eflags.Bits.u1DF)
1045 pCtx->ADDR_rDI += OP_SIZE / 8;
1046 else
1047 pCtx->ADDR_rDI -= OP_SIZE / 8;
1048 iemRegAddToRip(pIemCpu, cbInstr);
1049 }
1050 /* iemMemMap already check permissions, so this may only be real errors
1051 or access handlers medling. The access handler case is going to
1052 cause misbehavior if the instruction is re-interpreted or smth. So,
1053 we fail with an internal error here instead. */
1054 else
1055 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1056 }
1057 return rcStrict;
1058}
1059
1060
1061/**
1062 * Implements 'REP INS'.
1063 */
1064IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1065{
1066 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1067 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1068
1069 /*
1070 * Setup.
1071 */
1072 uint16_t const u16Port = pCtx->dx;
1073 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1074 if (rcStrict != VINF_SUCCESS)
1075 return rcStrict;
1076
1077 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1078 if (uCounterReg == 0)
1079 {
1080 iemRegAddToRip(pIemCpu, cbInstr);
1081 return VINF_SUCCESS;
1082 }
1083
1084 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
1085 if (rcStrict != VINF_SUCCESS)
1086 return rcStrict;
1087
1088 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1089 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1090
1091 /*
1092 * The loop.
1093 */
1094 do
1095 {
1096 /*
1097 * Do segmentation and virtual page stuff.
1098 */
1099#if ADDR_SIZE != 64
1100 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
1101#else
1102 uint64_t uVirtAddr = uAddrReg;
1103#endif
1104 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1105 if (cLeftPage > uCounterReg)
1106 cLeftPage = uCounterReg;
1107 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1108 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1109#if ADDR_SIZE != 64
1110 && uAddrReg < pCtx->es.u32Limit
1111 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
1112#endif
1113 )
1114 {
1115 RTGCPHYS GCPhysMem;
1116 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1117 if (rcStrict != VINF_SUCCESS)
1118 return rcStrict;
1119
1120 /*
1121 * If we can map the page without trouble, we would've liked to use
1122 * an string I/O method to do the work, but the current IOM
1123 * interface doesn't match our current approach. So, do a regular
1124 * loop instead.
1125 */
1126 /** @todo Change the I/O manager interface to make use of
1127 * mapped buffers instead of leaving those bits to the
1128 * device implementation? */
1129 PGMPAGEMAPLOCK PgLockMem;
1130 OP_TYPE *puMem;
1131 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1132 if (rcStrict == VINF_SUCCESS)
1133 {
1134 uint32_t off = 0;
1135 while (off < cLeftPage)
1136 {
1137 uint32_t u32Value;
1138 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1139 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1140 else
1141 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1142 if (IOM_SUCCESS(rcStrict))
1143 {
1144 puMem[off] = (OP_TYPE)u32Value;
1145 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1146 pCtx->ADDR_rCX = --uCounterReg;
1147 }
1148 if (rcStrict != VINF_SUCCESS)
1149 {
1150 if (IOM_SUCCESS(rcStrict))
1151 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1152 if (uCounterReg == 0)
1153 iemRegAddToRip(pIemCpu, cbInstr);
1154 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1155 return rcStrict;
1156 }
1157 off++;
1158 }
1159 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1160
1161 /* If unaligned, we drop thru and do the page crossing access
1162 below. Otherwise, do the next page. */
1163 if (!(uVirtAddr & (OP_SIZE - 1)))
1164 continue;
1165 if (uCounterReg == 0)
1166 break;
1167 cLeftPage = 0;
1168 }
1169 }
1170
1171 /*
1172 * Fallback - slow processing till the end of the current page.
1173 * In the cross page boundrary case we will end up here with cLeftPage
1174 * as 0, we execute one loop then.
1175 *
1176 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1177 * I/O port, otherwise it wouldn't really be restartable.
1178 */
1179 /** @todo investigate what the CPU actually does with \#PF/\#GP
1180 * during INS. */
1181 do
1182 {
1183 OP_TYPE *puMem;
1184 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1185 if (rcStrict != VINF_SUCCESS)
1186 return rcStrict;
1187
1188 uint32_t u32Value;
1189 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1190 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1191 else
1192 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1193 if (!IOM_SUCCESS(rcStrict))
1194 return rcStrict;
1195
1196 *puMem = (OP_TYPE)u32Value;
1197 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1198 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1199
1200 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1201 pCtx->ADDR_rCX = --uCounterReg;
1202
1203 cLeftPage--;
1204 if (rcStrict != VINF_SUCCESS)
1205 {
1206 if (IOM_SUCCESS(rcStrict))
1207 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1208 if (uCounterReg == 0)
1209 iemRegAddToRip(pIemCpu, cbInstr);
1210 return rcStrict;
1211 }
1212 } while ((int32_t)cLeftPage > 0);
1213 } while (uCounterReg != 0);
1214
1215 /*
1216 * Done.
1217 */
1218 iemRegAddToRip(pIemCpu, cbInstr);
1219 return VINF_SUCCESS;
1220}
1221
1222
1223/**
1224 * Implements 'OUTS' (no rep)
1225 */
1226IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1227{
1228 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1229 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1230 VBOXSTRICTRC rcStrict;
1231
1232 /*
1233 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1234 * segmentation and finally any #PF due to virtual address translation.
1235 * ASSUMES nothing is read from the I/O port before traps are taken.
1236 */
1237 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1238 if (rcStrict != VINF_SUCCESS)
1239 return rcStrict;
1240
1241 OP_TYPE uValue;
1242 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1243 if (rcStrict == VINF_SUCCESS)
1244 {
1245 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1246 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
1247 else
1248 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1249 if (IOM_SUCCESS(rcStrict))
1250 {
1251 if (!pCtx->eflags.Bits.u1DF)
1252 pCtx->ADDR_rSI += OP_SIZE / 8;
1253 else
1254 pCtx->ADDR_rSI -= OP_SIZE / 8;
1255 iemRegAddToRip(pIemCpu, cbInstr);
1256 if (rcStrict != VINF_SUCCESS)
1257 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1258 }
1259 }
1260 return rcStrict;
1261}
1262
1263
1264/**
1265 * Implements 'REP OUTS'.
1266 */
1267IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1268{
1269 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1270 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1271
1272 /*
1273 * Setup.
1274 */
1275 uint16_t const u16Port = pCtx->dx;
1276 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1277 if (rcStrict != VINF_SUCCESS)
1278 return rcStrict;
1279
1280 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1281 if (uCounterReg == 0)
1282 {
1283 iemRegAddToRip(pIemCpu, cbInstr);
1284 return VINF_SUCCESS;
1285 }
1286
1287 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1288 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1289 if (rcStrict != VINF_SUCCESS)
1290 return rcStrict;
1291
1292 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1293 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1294
1295 /*
1296 * The loop.
1297 */
1298 do
1299 {
1300 /*
1301 * Do segmentation and virtual page stuff.
1302 */
1303#if ADDR_SIZE != 64
1304 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1305#else
1306 uint64_t uVirtAddr = uAddrReg;
1307#endif
1308 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1309 if (cLeftPage > uCounterReg)
1310 cLeftPage = uCounterReg;
1311 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1312 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1313#if ADDR_SIZE != 64
1314 && uAddrReg < pHid->u32Limit
1315 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1316#endif
1317 )
1318 {
1319 RTGCPHYS GCPhysMem;
1320 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1321 if (rcStrict != VINF_SUCCESS)
1322 return rcStrict;
1323
1324 /*
1325 * If we can map the page without trouble, we would've liked to use
1326 * an string I/O method to do the work, but the current IOM
1327 * interface doesn't match our current approach. So, do a regular
1328 * loop instead.
1329 */
1330 /** @todo Change the I/O manager interface to make use of
1331 * mapped buffers instead of leaving those bits to the
1332 * device implementation? */
1333 PGMPAGEMAPLOCK PgLockMem;
1334 OP_TYPE const *puMem;
1335 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1336 if (rcStrict == VINF_SUCCESS)
1337 {
1338 uint32_t off = 0;
1339 while (off < cLeftPage)
1340 {
1341 uint32_t u32Value = *puMem++;
1342 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1343 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
1344 else
1345 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1346 if (IOM_SUCCESS(rcStrict))
1347 {
1348 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1349 pCtx->ADDR_rCX = --uCounterReg;
1350 }
1351 if (rcStrict != VINF_SUCCESS)
1352 {
1353 if (IOM_SUCCESS(rcStrict))
1354 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1355 if (uCounterReg == 0)
1356 iemRegAddToRip(pIemCpu, cbInstr);
1357 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1358 return rcStrict;
1359 }
1360 off++;
1361 }
1362 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1363
1364 /* If unaligned, we drop thru and do the page crossing access
1365 below. Otherwise, do the next page. */
1366 if (!(uVirtAddr & (OP_SIZE - 1)))
1367 continue;
1368 if (uCounterReg == 0)
1369 break;
1370 cLeftPage = 0;
1371 }
1372 }
1373
1374 /*
1375 * Fallback - slow processing till the end of the current page.
1376 * In the cross page boundrary case we will end up here with cLeftPage
1377 * as 0, we execute one loop then.
1378 *
1379 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1380 * I/O port, otherwise it wouldn't really be restartable.
1381 */
1382 /** @todo investigate what the CPU actually does with \#PF/\#GP
1383 * during INS. */
1384 do
1385 {
1386 OP_TYPE uValue;
1387 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1388 if (rcStrict != VINF_SUCCESS)
1389 return rcStrict;
1390
1391 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1392 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
1393 else
1394 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1395 if (IOM_SUCCESS(rcStrict))
1396 {
1397 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1398 pCtx->ADDR_rCX = --uCounterReg;
1399 cLeftPage--;
1400 }
1401 if (rcStrict != VINF_SUCCESS)
1402 {
1403 if (IOM_SUCCESS(rcStrict))
1404 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1405 if (uCounterReg == 0)
1406 iemRegAddToRip(pIemCpu, cbInstr);
1407 return rcStrict;
1408 }
1409 } while ((int32_t)cLeftPage > 0);
1410 } while (uCounterReg != 0);
1411
1412 /*
1413 * Done.
1414 */
1415 iemRegAddToRip(pIemCpu, cbInstr);
1416 return VINF_SUCCESS;
1417}
1418
1419#endif /* OP_SIZE != 64-bit */
1420
1421
1422#undef OP_rAX
1423#undef OP_SIZE
1424#undef ADDR_SIZE
1425#undef ADDR_rDI
1426#undef ADDR_rSI
1427#undef ADDR_rCX
1428#undef ADDR_rIP
1429#undef ADDR2_TYPE
1430#undef ADDR_TYPE
1431#undef ADDR2_TYPE
1432
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette