VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 45849

Last change on this file since 45849 was 45305, checked in by vboxsync, 12 years ago

IOM: Adding pVCpu to a lot of calls and moving the lookup caches from VM to VMCPU.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.8 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 45305 2013-04-03 11:15:02Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->es.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 return rcStrict;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 return rcStrict;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 PGMPAGEMAPLOCK PgLockSrc2Mem;
133 OP_TYPE const *puSrc2Mem;
134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
135 if (rcStrict == VINF_SUCCESS)
136 {
137 PGMPAGEMAPLOCK PgLockSrc1Mem;
138 OP_TYPE const *puSrc1Mem;
139 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
140 if (rcStrict == VINF_SUCCESS)
141 {
142 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
143 {
144 /* All matches, only compare the last itme to get the right eflags. */
145 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
146 uSrc1AddrReg += cLeftPage * cbIncr;
147 uSrc2AddrReg += cLeftPage * cbIncr;
148 uCounterReg -= cLeftPage;
149 }
150 else
151 {
152 /* Some mismatch, compare each item (and keep volatile
153 memory in mind). */
154 uint32_t off = 0;
155 do
156 {
157 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
158 off++;
159 } while ( off < cLeftPage
160 && (uEFlags & X86_EFL_ZF));
161 uSrc1AddrReg += cbIncr * off;
162 uSrc2AddrReg += cbIncr * off;
163 uCounterReg -= off;
164 }
165
166 /* Update the registers before looping. */
167 pCtx->ADDR_rCX = uCounterReg;
168 pCtx->ADDR_rSI = uSrc1AddrReg;
169 pCtx->ADDR_rDI = uSrc2AddrReg;
170 pCtx->eflags.u = uEFlags;
171
172 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
173 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
174 continue;
175 }
176 }
177 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
178 }
179
180 /*
181 * Fallback - slow processing till the end of the current page.
182 * In the cross page boundrary case we will end up here with cLeftPage
183 * as 0, we execute one loop then.
184 */
185 do
186 {
187 OP_TYPE uValue1;
188 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
189 if (rcStrict != VINF_SUCCESS)
190 return rcStrict;
191 OP_TYPE uValue2;
192 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
193 if (rcStrict != VINF_SUCCESS)
194 return rcStrict;
195 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
196
197 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
198 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
199 pCtx->ADDR_rCX = --uCounterReg;
200 pCtx->eflags.u = uEFlags;
201 cLeftPage--;
202 } while ( (int32_t)cLeftPage > 0
203 && (uEFlags & X86_EFL_ZF));
204 } while ( uCounterReg != 0
205 && (uEFlags & X86_EFL_ZF));
206
207 /*
208 * Done.
209 */
210 iemRegAddToRip(pIemCpu, cbInstr);
211 return VINF_SUCCESS;
212}
213
214
215/**
216 * Implements 'REPNE CMPS'.
217 */
218IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
219{
220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
221
222 /*
223 * Setup.
224 */
225 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
226 if (uCounterReg == 0)
227 {
228 iemRegAddToRip(pIemCpu, cbInstr);
229 return VINF_SUCCESS;
230 }
231
232 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
233 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
234 if (rcStrict != VINF_SUCCESS)
235 return rcStrict;
236
237 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
238 if (rcStrict != VINF_SUCCESS)
239 return rcStrict;
240
241 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
242 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
243 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
244 uint32_t uEFlags = pCtx->eflags.u;
245
246 /*
247 * The loop.
248 */
249 do
250 {
251 /*
252 * Do segmentation and virtual page stuff.
253 */
254#if ADDR_SIZE != 64
255 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
256 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
257#else
258 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
259 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
260#endif
261 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
262 if (cLeftSrc1Page > uCounterReg)
263 cLeftSrc1Page = uCounterReg;
264 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
266
267 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
268 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
269#if ADDR_SIZE != 64
270 && uSrc1AddrReg < pSrc1Hid->u32Limit
271 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
272 && uSrc2AddrReg < pCtx->es.u32Limit
273 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
274#endif
275 )
276 {
277 RTGCPHYS GCPhysSrc1Mem;
278 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
279 if (rcStrict != VINF_SUCCESS)
280 return rcStrict;
281
282 RTGCPHYS GCPhysSrc2Mem;
283 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
284 if (rcStrict != VINF_SUCCESS)
285 return rcStrict;
286
287 /*
288 * If we can map the page without trouble, do a block processing
289 * until the end of the current page.
290 */
291 OP_TYPE const *puSrc2Mem;
292 PGMPAGEMAPLOCK PgLockSrc2Mem;
293 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
294 if (rcStrict == VINF_SUCCESS)
295 {
296 OP_TYPE const *puSrc1Mem;
297 PGMPAGEMAPLOCK PgLockSrc1Mem;
298 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
299 if (rcStrict == VINF_SUCCESS)
300 {
301 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
302 {
303 /* All matches, only compare the last item to get the right eflags. */
304 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
305 uSrc1AddrReg += cLeftPage * cbIncr;
306 uSrc2AddrReg += cLeftPage * cbIncr;
307 uCounterReg -= cLeftPage;
308 }
309 else
310 {
311 /* Some mismatch, compare each item (and keep volatile
312 memory in mind). */
313 uint32_t off = 0;
314 do
315 {
316 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
317 off++;
318 } while ( off < cLeftPage
319 && !(uEFlags & X86_EFL_ZF));
320 uSrc1AddrReg += cbIncr * off;
321 uSrc2AddrReg += cbIncr * off;
322 uCounterReg -= off;
323 }
324
325 /* Update the registers before looping. */
326 pCtx->ADDR_rCX = uCounterReg;
327 pCtx->ADDR_rSI = uSrc1AddrReg;
328 pCtx->ADDR_rDI = uSrc2AddrReg;
329 pCtx->eflags.u = uEFlags;
330
331 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
332 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
333 continue;
334 }
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 }
337 }
338
339 /*
340 * Fallback - slow processing till the end of the current page.
341 * In the cross page boundrary case we will end up here with cLeftPage
342 * as 0, we execute one loop then.
343 */
344 do
345 {
346 OP_TYPE uValue1;
347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
348 if (rcStrict != VINF_SUCCESS)
349 return rcStrict;
350 OP_TYPE uValue2;
351 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
352 if (rcStrict != VINF_SUCCESS)
353 return rcStrict;
354 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
355
356 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
357 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
358 pCtx->ADDR_rCX = --uCounterReg;
359 pCtx->eflags.u = uEFlags;
360 cLeftPage--;
361 } while ( (int32_t)cLeftPage > 0
362 && !(uEFlags & X86_EFL_ZF));
363 } while ( uCounterReg != 0
364 && !(uEFlags & X86_EFL_ZF));
365
366 /*
367 * Done.
368 */
369 iemRegAddToRip(pIemCpu, cbInstr);
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Implements 'REPE SCAS'.
376 */
377IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380
381 /*
382 * Setup.
383 */
384 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
385 if (uCounterReg == 0)
386 {
387 iemRegAddToRip(pIemCpu, cbInstr);
388 return VINF_SUCCESS;
389 }
390
391 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
392 if (rcStrict != VINF_SUCCESS)
393 return rcStrict;
394
395 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
396 OP_TYPE const uValueReg = pCtx->OP_rAX;
397 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
398 uint32_t uEFlags = pCtx->eflags.u;
399
400 /*
401 * The loop.
402 */
403 do
404 {
405 /*
406 * Do segmentation and virtual page stuff.
407 */
408#if ADDR_SIZE != 64
409 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
410#else
411 uint64_t uVirtAddr = uAddrReg;
412#endif
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418#if ADDR_SIZE != 64
419 && uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
421#endif
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRip(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
517 if (rcStrict != VINF_SUCCESS)
518 return rcStrict;
519
520 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
521 OP_TYPE const uValueReg = pCtx->OP_rAX;
522 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
523 uint32_t uEFlags = pCtx->eflags.u;
524
525 /*
526 * The loop.
527 */
528 do
529 {
530 /*
531 * Do segmentation and virtual page stuff.
532 */
533#if ADDR_SIZE != 64
534 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
535#else
536 uint64_t uVirtAddr = uAddrReg;
537#endif
538 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
539 if (cLeftPage > uCounterReg)
540 cLeftPage = uCounterReg;
541 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
542 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
543#if ADDR_SIZE != 64
544 && uAddrReg < pCtx->es.u32Limit
545 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
546#endif
547 )
548 {
549 RTGCPHYS GCPhysMem;
550 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
551 if (rcStrict != VINF_SUCCESS)
552 return rcStrict;
553
554 /*
555 * If we can map the page without trouble, do a block processing
556 * until the end of the current page.
557 */
558 PGMPAGEMAPLOCK PgLockMem;
559 OP_TYPE const *puMem;
560 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
561 if (rcStrict == VINF_SUCCESS)
562 {
563 /* Search till we find a mismatching item. */
564 OP_TYPE uTmpValue;
565 bool fQuit;
566 uint32_t i = 0;
567 do
568 {
569 uTmpValue = puMem[i++];
570 fQuit = uTmpValue == uValueReg;
571 } while (i < cLeftPage && !fQuit);
572
573 /* Update the regs. */
574 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
575 pCtx->ADDR_rCX = uCounterReg -= i;
576 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
577 pCtx->eflags.u = uEFlags;
578 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
579 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
580 if (fQuit)
581 break;
582
583
584 /* If unaligned, we drop thru and do the page crossing access
585 below. Otherwise, do the next page. */
586 if (!(uVirtAddr & (OP_SIZE - 1)))
587 continue;
588 if (uCounterReg == 0)
589 break;
590 cLeftPage = 0;
591 }
592 }
593
594 /*
595 * Fallback - slow processing till the end of the current page.
596 * In the cross page boundrary case we will end up here with cLeftPage
597 * as 0, we execute one loop then.
598 */
599 do
600 {
601 OP_TYPE uTmpValue;
602 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
603 if (rcStrict != VINF_SUCCESS)
604 return rcStrict;
605 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
606 pCtx->ADDR_rDI = uAddrReg += cbIncr;
607 pCtx->ADDR_rCX = --uCounterReg;
608 pCtx->eflags.u = uEFlags;
609 cLeftPage--;
610 } while ( (int32_t)cLeftPage > 0
611 && !(uEFlags & X86_EFL_ZF));
612 } while ( uCounterReg != 0
613 && !(uEFlags & X86_EFL_ZF));
614
615 /*
616 * Done.
617 */
618 iemRegAddToRip(pIemCpu, cbInstr);
619 return VINF_SUCCESS;
620}
621
622
623
624
625/**
626 * Implements 'REP MOVS'.
627 */
628IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
629{
630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
631
632 /*
633 * Setup.
634 */
635 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
636 if (uCounterReg == 0)
637 {
638 iemRegAddToRip(pIemCpu, cbInstr);
639 return VINF_SUCCESS;
640 }
641
642 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
643 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
644 if (rcStrict != VINF_SUCCESS)
645 return rcStrict;
646
647 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
648 if (rcStrict != VINF_SUCCESS)
649 return rcStrict;
650
651 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
652 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
653 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
654
655 /*
656 * Be careful with handle bypassing.
657 */
658 if (pIemCpu->fBypassHandlers)
659 {
660 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
661 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
662 }
663
664 /*
665 * If we're reading back what we write, we have to let the verfication code
666 * to prevent a false positive.
667 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
668 */
669#ifdef IEM_VERIFICATION_MODE_FULL
670 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
671 && (cbIncr > 0
672 ? uSrcAddrReg <= uDstAddrReg
673 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
674 : uDstAddrReg <= uSrcAddrReg
675 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
676 pIemCpu->fOverlappingMovs = true;
677#endif
678
679 /*
680 * The loop.
681 */
682 do
683 {
684 /*
685 * Do segmentation and virtual page stuff.
686 */
687#if ADDR_SIZE != 64
688 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
689 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg;
690#else
691 uint64_t uVirtSrcAddr = uSrcAddrReg;
692 uint64_t uVirtDstAddr = uDstAddrReg;
693#endif
694 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
695 if (cLeftSrcPage > uCounterReg)
696 cLeftSrcPage = uCounterReg;
697 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
698 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
699
700 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
701 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
702#if ADDR_SIZE != 64
703 && uSrcAddrReg < pSrcHid->u32Limit
704 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
705 && uDstAddrReg < pCtx->es.u32Limit
706 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
707#endif
708 )
709 {
710 RTGCPHYS GCPhysSrcMem;
711 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
712 if (rcStrict != VINF_SUCCESS)
713 return rcStrict;
714
715 RTGCPHYS GCPhysDstMem;
716 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
717 if (rcStrict != VINF_SUCCESS)
718 return rcStrict;
719
720 /*
721 * If we can map the page without trouble, do a block processing
722 * until the end of the current page.
723 */
724 PGMPAGEMAPLOCK PgLockDstMem;
725 OP_TYPE *puDstMem;
726 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
727 if (rcStrict == VINF_SUCCESS)
728 {
729 PGMPAGEMAPLOCK PgLockSrcMem;
730 OP_TYPE const *puSrcMem;
731 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
732 if (rcStrict == VINF_SUCCESS)
733 {
734 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
735 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
736
737 /* Perform the operation exactly (don't use memcpy to avoid
738 having to consider how its implementation would affect
739 any overlapping source and destination area). */
740 OP_TYPE const *puSrcCur = puSrcMem;
741 OP_TYPE *puDstCur = puDstMem;
742 uint32_t cTodo = cLeftPage;
743 while (cTodo-- > 0)
744 *puDstCur++ = *puSrcCur++;
745
746 /* Update the registers. */
747 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
748 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
749 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
750
751 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
752 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
753 continue;
754 }
755 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
756 }
757 }
758
759 /*
760 * Fallback - slow processing till the end of the current page.
761 * In the cross page boundrary case we will end up here with cLeftPage
762 * as 0, we execute one loop then.
763 */
764 do
765 {
766 OP_TYPE uValue;
767 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
768 if (rcStrict != VINF_SUCCESS)
769 return rcStrict;
770 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
771 if (rcStrict != VINF_SUCCESS)
772 return rcStrict;
773
774 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
775 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
776 pCtx->ADDR_rCX = --uCounterReg;
777 cLeftPage--;
778 } while ((int32_t)cLeftPage > 0);
779 } while (uCounterReg != 0);
780
781 /*
782 * Done.
783 */
784 iemRegAddToRip(pIemCpu, cbInstr);
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Implements 'REP STOS'.
791 */
792IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
793{
794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
795
796 /*
797 * Setup.
798 */
799 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
800 if (uCounterReg == 0)
801 {
802 iemRegAddToRip(pIemCpu, cbInstr);
803 return VINF_SUCCESS;
804 }
805
806 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
807 if (rcStrict != VINF_SUCCESS)
808 return rcStrict;
809
810 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
811 OP_TYPE const uValue = pCtx->OP_rAX;
812 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
813
814 /*
815 * Be careful with handle bypassing.
816 */
817 /** @todo Permit doing a page if correctly aligned. */
818 if (pIemCpu->fBypassHandlers)
819 {
820 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
821 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
822 }
823
824 /*
825 * The loop.
826 */
827 do
828 {
829 /*
830 * Do segmentation and virtual page stuff.
831 */
832#if ADDR_SIZE != 64
833 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
834#else
835 uint64_t uVirtAddr = uAddrReg;
836#endif
837 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
838 if (cLeftPage > uCounterReg)
839 cLeftPage = uCounterReg;
840 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
841 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
842#if ADDR_SIZE != 64
843 && uAddrReg < pCtx->es.u32Limit
844 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
845#endif
846 )
847 {
848 RTGCPHYS GCPhysMem;
849 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
850 if (rcStrict != VINF_SUCCESS)
851 return rcStrict;
852
853 /*
854 * If we can map the page without trouble, do a block processing
855 * until the end of the current page.
856 */
857 PGMPAGEMAPLOCK PgLockMem;
858 OP_TYPE *puMem;
859 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
860 if (rcStrict == VINF_SUCCESS)
861 {
862 /* Update the regs first so we can loop on cLeftPage. */
863 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
864 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
865
866 /* Do the memsetting. */
867#if OP_SIZE == 8
868 memset(puMem, uValue, cLeftPage);
869/*#elif OP_SIZE == 32
870 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
871#else
872 while (cLeftPage-- > 0)
873 *puMem++ = uValue;
874#endif
875
876 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
877
878 /* If unaligned, we drop thru and do the page crossing access
879 below. Otherwise, do the next page. */
880 if (!(uVirtAddr & (OP_SIZE - 1)))
881 continue;
882 if (uCounterReg == 0)
883 break;
884 cLeftPage = 0;
885 }
886 }
887
888 /*
889 * Fallback - slow processing till the end of the current page.
890 * In the cross page boundrary case we will end up here with cLeftPage
891 * as 0, we execute one loop then.
892 */
893 do
894 {
895 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
896 if (rcStrict != VINF_SUCCESS)
897 return rcStrict;
898 pCtx->ADDR_rDI = uAddrReg += cbIncr;
899 pCtx->ADDR_rCX = --uCounterReg;
900 cLeftPage--;
901 } while ((int32_t)cLeftPage > 0);
902 } while (uCounterReg != 0);
903
904 /*
905 * Done.
906 */
907 iemRegAddToRip(pIemCpu, cbInstr);
908 return VINF_SUCCESS;
909}
910
911
912/**
913 * Implements 'REP LODS'.
914 */
915IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
916{
917 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
918
919 /*
920 * Setup.
921 */
922 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
923 if (uCounterReg == 0)
924 {
925 iemRegAddToRip(pIemCpu, cbInstr);
926 return VINF_SUCCESS;
927 }
928
929 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
930 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
931 if (rcStrict != VINF_SUCCESS)
932 return rcStrict;
933
934 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
935 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
936
937 /*
938 * The loop.
939 */
940 do
941 {
942 /*
943 * Do segmentation and virtual page stuff.
944 */
945#if ADDR_SIZE != 64
946 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
947#else
948 uint64_t uVirtAddr = uAddrReg;
949#endif
950 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
951 if (cLeftPage > uCounterReg)
952 cLeftPage = uCounterReg;
953 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
954 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
955#if ADDR_SIZE != 64
956 && uAddrReg < pSrcHid->u32Limit
957 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
958#endif
959 )
960 {
961 RTGCPHYS GCPhysMem;
962 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
963 if (rcStrict != VINF_SUCCESS)
964 return rcStrict;
965
966 /*
967 * If we can map the page without trouble, we can get away with
968 * just reading the last value on the page.
969 */
970 PGMPAGEMAPLOCK PgLockMem;
971 OP_TYPE const *puMem;
972 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
973 if (rcStrict == VINF_SUCCESS)
974 {
975 /* Only get the last byte, the rest doesn't matter in direct access mode. */
976#if OP_SIZE == 32
977 pCtx->rax = puMem[cLeftPage - 1];
978#else
979 pCtx->OP_rAX = puMem[cLeftPage - 1];
980#endif
981 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
982 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
983 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
984
985 /* If unaligned, we drop thru and do the page crossing access
986 below. Otherwise, do the next page. */
987 if (!(uVirtAddr & (OP_SIZE - 1)))
988 continue;
989 if (uCounterReg == 0)
990 break;
991 cLeftPage = 0;
992 }
993 }
994
995 /*
996 * Fallback - slow processing till the end of the current page.
997 * In the cross page boundrary case we will end up here with cLeftPage
998 * as 0, we execute one loop then.
999 */
1000 do
1001 {
1002 OP_TYPE uTmpValue;
1003 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
1004 if (rcStrict != VINF_SUCCESS)
1005 return rcStrict;
1006#if OP_SIZE == 32
1007 pCtx->rax = uTmpValue;
1008#else
1009 pCtx->OP_rAX = uTmpValue;
1010#endif
1011 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1012 pCtx->ADDR_rCX = --uCounterReg;
1013 cLeftPage--;
1014 } while ((int32_t)cLeftPage > 0);
1015 if (rcStrict != VINF_SUCCESS)
1016 break;
1017 } while (uCounterReg != 0);
1018
1019 /*
1020 * Done.
1021 */
1022 iemRegAddToRip(pIemCpu, cbInstr);
1023 return VINF_SUCCESS;
1024}
1025
1026
1027#if OP_SIZE != 64
1028
1029/**
1030 * Implements 'INS' (no rep)
1031 */
1032IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1033{
1034 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1035 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1036 VBOXSTRICTRC rcStrict;
1037
1038 /*
1039 * Be careful with handle bypassing.
1040 */
1041 if (pIemCpu->fBypassHandlers)
1042 {
1043 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1044 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1045 }
1046
1047 /*
1048 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1049 * segmentation and finally any #PF due to virtual address translation.
1050 * ASSUMES nothing is read from the I/O port before traps are taken.
1051 */
1052 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1053 if (rcStrict != VINF_SUCCESS)
1054 return rcStrict;
1055
1056 OP_TYPE *puMem;
1057 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1058 if (rcStrict != VINF_SUCCESS)
1059 return rcStrict;
1060
1061 uint32_t u32Value;
1062 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1063 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1064 else
1065 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1066 if (IOM_SUCCESS(rcStrict))
1067 {
1068 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1069 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1070 {
1071 if (!pCtx->eflags.Bits.u1DF)
1072 pCtx->ADDR_rDI += OP_SIZE / 8;
1073 else
1074 pCtx->ADDR_rDI -= OP_SIZE / 8;
1075 iemRegAddToRip(pIemCpu, cbInstr);
1076 }
1077 /* iemMemMap already check permissions, so this may only be real errors
1078 or access handlers medling. The access handler case is going to
1079 cause misbehavior if the instruction is re-interpreted or smth. So,
1080 we fail with an internal error here instead. */
1081 else
1082 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1083 }
1084 return rcStrict;
1085}
1086
1087
1088/**
1089 * Implements 'REP INS'.
1090 */
1091IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1092{
1093 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1094 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1095 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1096
1097 /*
1098 * Setup.
1099 */
1100 uint16_t const u16Port = pCtx->dx;
1101 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1102 if (rcStrict != VINF_SUCCESS)
1103 return rcStrict;
1104
1105 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1106 if (uCounterReg == 0)
1107 {
1108 iemRegAddToRip(pIemCpu, cbInstr);
1109 return VINF_SUCCESS;
1110 }
1111
1112 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
1113 if (rcStrict != VINF_SUCCESS)
1114 return rcStrict;
1115
1116 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1117 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1118
1119 /*
1120 * Be careful with handle bypassing.
1121 */
1122 if (pIemCpu->fBypassHandlers)
1123 {
1124 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1125 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1126 }
1127
1128 /*
1129 * The loop.
1130 */
1131 do
1132 {
1133 /*
1134 * Do segmentation and virtual page stuff.
1135 */
1136#if ADDR_SIZE != 64
1137 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
1138#else
1139 uint64_t uVirtAddr = uAddrReg;
1140#endif
1141 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1142 if (cLeftPage > uCounterReg)
1143 cLeftPage = uCounterReg;
1144 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1145 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1146#if ADDR_SIZE != 64
1147 && uAddrReg < pCtx->es.u32Limit
1148 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
1149#endif
1150 )
1151 {
1152 RTGCPHYS GCPhysMem;
1153 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1154 if (rcStrict != VINF_SUCCESS)
1155 return rcStrict;
1156
1157 /*
1158 * If we can map the page without trouble, we would've liked to use
1159 * an string I/O method to do the work, but the current IOM
1160 * interface doesn't match our current approach. So, do a regular
1161 * loop instead.
1162 */
1163 /** @todo Change the I/O manager interface to make use of
1164 * mapped buffers instead of leaving those bits to the
1165 * device implementation? */
1166 PGMPAGEMAPLOCK PgLockMem;
1167 OP_TYPE *puMem;
1168 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1169 if (rcStrict == VINF_SUCCESS)
1170 {
1171 uint32_t off = 0;
1172 while (off < cLeftPage)
1173 {
1174 uint32_t u32Value;
1175 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1176 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1177 else
1178 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1179 if (IOM_SUCCESS(rcStrict))
1180 {
1181 puMem[off] = (OP_TYPE)u32Value;
1182 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1183 pCtx->ADDR_rCX = --uCounterReg;
1184 }
1185 if (rcStrict != VINF_SUCCESS)
1186 {
1187 if (IOM_SUCCESS(rcStrict))
1188 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1189 if (uCounterReg == 0)
1190 iemRegAddToRip(pIemCpu, cbInstr);
1191 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1192 return rcStrict;
1193 }
1194 off++;
1195 }
1196 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1197
1198 /* If unaligned, we drop thru and do the page crossing access
1199 below. Otherwise, do the next page. */
1200 if (!(uVirtAddr & (OP_SIZE - 1)))
1201 continue;
1202 if (uCounterReg == 0)
1203 break;
1204 cLeftPage = 0;
1205 }
1206 }
1207
1208 /*
1209 * Fallback - slow processing till the end of the current page.
1210 * In the cross page boundrary case we will end up here with cLeftPage
1211 * as 0, we execute one loop then.
1212 *
1213 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1214 * I/O port, otherwise it wouldn't really be restartable.
1215 */
1216 /** @todo investigate what the CPU actually does with \#PF/\#GP
1217 * during INS. */
1218 do
1219 {
1220 OP_TYPE *puMem;
1221 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1222 if (rcStrict != VINF_SUCCESS)
1223 return rcStrict;
1224
1225 uint32_t u32Value;
1226 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1227 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1228 else
1229 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1230 if (!IOM_SUCCESS(rcStrict))
1231 return rcStrict;
1232
1233 *puMem = (OP_TYPE)u32Value;
1234 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1235 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1236
1237 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1238 pCtx->ADDR_rCX = --uCounterReg;
1239
1240 cLeftPage--;
1241 if (rcStrict != VINF_SUCCESS)
1242 {
1243 if (IOM_SUCCESS(rcStrict))
1244 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1245 if (uCounterReg == 0)
1246 iemRegAddToRip(pIemCpu, cbInstr);
1247 return rcStrict;
1248 }
1249 } while ((int32_t)cLeftPage > 0);
1250 } while (uCounterReg != 0);
1251
1252 /*
1253 * Done.
1254 */
1255 iemRegAddToRip(pIemCpu, cbInstr);
1256 return VINF_SUCCESS;
1257}
1258
1259
1260/**
1261 * Implements 'OUTS' (no rep)
1262 */
1263IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1264{
1265 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1266 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1267 VBOXSTRICTRC rcStrict;
1268
1269 /*
1270 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1271 * segmentation and finally any #PF due to virtual address translation.
1272 * ASSUMES nothing is read from the I/O port before traps are taken.
1273 */
1274 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1275 if (rcStrict != VINF_SUCCESS)
1276 return rcStrict;
1277
1278 OP_TYPE uValue;
1279 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1280 if (rcStrict == VINF_SUCCESS)
1281 {
1282 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1283 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1284 else
1285 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1286 if (IOM_SUCCESS(rcStrict))
1287 {
1288 if (!pCtx->eflags.Bits.u1DF)
1289 pCtx->ADDR_rSI += OP_SIZE / 8;
1290 else
1291 pCtx->ADDR_rSI -= OP_SIZE / 8;
1292 iemRegAddToRip(pIemCpu, cbInstr);
1293 if (rcStrict != VINF_SUCCESS)
1294 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1295 }
1296 }
1297 return rcStrict;
1298}
1299
1300
1301/**
1302 * Implements 'REP OUTS'.
1303 */
1304IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1305{
1306 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1307 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1308 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1309
1310 /*
1311 * Setup.
1312 */
1313 uint16_t const u16Port = pCtx->dx;
1314 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1315 if (rcStrict != VINF_SUCCESS)
1316 return rcStrict;
1317
1318 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1319 if (uCounterReg == 0)
1320 {
1321 iemRegAddToRip(pIemCpu, cbInstr);
1322 return VINF_SUCCESS;
1323 }
1324
1325 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1326 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329
1330 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1331 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1332
1333 /*
1334 * The loop.
1335 */
1336 do
1337 {
1338 /*
1339 * Do segmentation and virtual page stuff.
1340 */
1341#if ADDR_SIZE != 64
1342 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1343#else
1344 uint64_t uVirtAddr = uAddrReg;
1345#endif
1346 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1347 if (cLeftPage > uCounterReg)
1348 cLeftPage = uCounterReg;
1349 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1350 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1351#if ADDR_SIZE != 64
1352 && uAddrReg < pHid->u32Limit
1353 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1354#endif
1355 )
1356 {
1357 RTGCPHYS GCPhysMem;
1358 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1359 if (rcStrict != VINF_SUCCESS)
1360 return rcStrict;
1361
1362 /*
1363 * If we can map the page without trouble, we would've liked to use
1364 * an string I/O method to do the work, but the current IOM
1365 * interface doesn't match our current approach. So, do a regular
1366 * loop instead.
1367 */
1368 /** @todo Change the I/O manager interface to make use of
1369 * mapped buffers instead of leaving those bits to the
1370 * device implementation? */
1371 PGMPAGEMAPLOCK PgLockMem;
1372 OP_TYPE const *puMem;
1373 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1374 if (rcStrict == VINF_SUCCESS)
1375 {
1376 uint32_t off = 0;
1377 while (off < cLeftPage)
1378 {
1379 uint32_t u32Value = *puMem++;
1380 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1381 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, OP_SIZE / 8);
1382 else
1383 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1384 if (IOM_SUCCESS(rcStrict))
1385 {
1386 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1387 pCtx->ADDR_rCX = --uCounterReg;
1388 }
1389 if (rcStrict != VINF_SUCCESS)
1390 {
1391 if (IOM_SUCCESS(rcStrict))
1392 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1393 if (uCounterReg == 0)
1394 iemRegAddToRip(pIemCpu, cbInstr);
1395 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1396 return rcStrict;
1397 }
1398 off++;
1399 }
1400 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1401
1402 /* If unaligned, we drop thru and do the page crossing access
1403 below. Otherwise, do the next page. */
1404 if (!(uVirtAddr & (OP_SIZE - 1)))
1405 continue;
1406 if (uCounterReg == 0)
1407 break;
1408 cLeftPage = 0;
1409 }
1410 }
1411
1412 /*
1413 * Fallback - slow processing till the end of the current page.
1414 * In the cross page boundrary case we will end up here with cLeftPage
1415 * as 0, we execute one loop then.
1416 *
1417 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1418 * I/O port, otherwise it wouldn't really be restartable.
1419 */
1420 /** @todo investigate what the CPU actually does with \#PF/\#GP
1421 * during INS. */
1422 do
1423 {
1424 OP_TYPE uValue;
1425 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1426 if (rcStrict != VINF_SUCCESS)
1427 return rcStrict;
1428
1429 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1430 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1431 else
1432 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1433 if (IOM_SUCCESS(rcStrict))
1434 {
1435 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1436 pCtx->ADDR_rCX = --uCounterReg;
1437 cLeftPage--;
1438 }
1439 if (rcStrict != VINF_SUCCESS)
1440 {
1441 if (IOM_SUCCESS(rcStrict))
1442 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1443 if (uCounterReg == 0)
1444 iemRegAddToRip(pIemCpu, cbInstr);
1445 return rcStrict;
1446 }
1447 } while ((int32_t)cLeftPage > 0);
1448 } while (uCounterReg != 0);
1449
1450 /*
1451 * Done.
1452 */
1453 iemRegAddToRip(pIemCpu, cbInstr);
1454 return VINF_SUCCESS;
1455}
1456
1457#endif /* OP_SIZE != 64-bit */
1458
1459
1460#undef OP_rAX
1461#undef OP_SIZE
1462#undef ADDR_SIZE
1463#undef ADDR_rDI
1464#undef ADDR_rSI
1465#undef ADDR_rCX
1466#undef ADDR_rIP
1467#undef ADDR2_TYPE
1468#undef ADDR_TYPE
1469#undef ADDR2_TYPE
1470
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette