VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 41829

Last change on this file since 41829 was 39970, checked in by vboxsync, 13 years ago

IEM: Implemented bswap. Fixed verification of INS.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 48.5 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 39970 2012-02-02 21:29:12Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->esHid.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->esHid.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 return rcStrict;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 return rcStrict;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 OP_TYPE const *puSrc2Mem;
133 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem);
134 if (rcStrict == VINF_SUCCESS)
135 {
136 OP_TYPE const *puSrc1Mem;
137 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem);
138 if (rcStrict == VINF_SUCCESS)
139 {
140 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
141 {
142 /* All matches, only compare the last itme to get the right eflags. */
143 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
144 uSrc1AddrReg += cLeftPage * cbIncr;
145 uSrc2AddrReg += cLeftPage * cbIncr;
146 uCounterReg -= cLeftPage;
147 }
148 else
149 {
150 /* Some mismatch, compare each item (and keep volatile
151 memory in mind). */
152 uint32_t off = 0;
153 do
154 {
155 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
156 off++;
157 } while ( off < cLeftPage
158 && (uEFlags & X86_EFL_ZF));
159 uSrc1AddrReg += cbIncr * off;
160 uSrc2AddrReg += cbIncr * off;
161 uCounterReg -= off;
162 }
163
164 /* Update the registers before looping. */
165 pCtx->ADDR_rCX = uCounterReg;
166 pCtx->ADDR_rSI = uSrc1AddrReg;
167 pCtx->ADDR_rDI = uSrc2AddrReg;
168 pCtx->eflags.u = uEFlags;
169
170 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem);
171 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem);
172 continue;
173 }
174 }
175 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem);
176 }
177
178 /*
179 * Fallback - slow processing till the end of the current page.
180 * In the cross page boundrary case we will end up here with cLeftPage
181 * as 0, we execute one loop then.
182 */
183 do
184 {
185 OP_TYPE uValue1;
186 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
187 if (rcStrict != VINF_SUCCESS)
188 return rcStrict;
189 OP_TYPE uValue2;
190 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
191 if (rcStrict != VINF_SUCCESS)
192 return rcStrict;
193 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
194
195 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
196 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
197 pCtx->ADDR_rCX = --uCounterReg;
198 pCtx->eflags.u = uEFlags;
199 cLeftPage--;
200 } while ( (int32_t)cLeftPage > 0
201 && (uEFlags & X86_EFL_ZF));
202 } while ( uCounterReg != 0
203 && (uEFlags & X86_EFL_ZF));
204
205 /*
206 * Done.
207 */
208 iemRegAddToRip(pIemCpu, cbInstr);
209 return VINF_SUCCESS;
210}
211
212
213/**
214 * Implements 'REPNE CMPS'.
215 */
216IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
217{
218 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
219
220 /*
221 * Setup.
222 */
223 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
224 if (uCounterReg == 0)
225 {
226 iemRegAddToRip(pIemCpu, cbInstr);
227 return VINF_SUCCESS;
228 }
229
230 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
231 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
232 if (rcStrict != VINF_SUCCESS)
233 return rcStrict;
234
235 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
236 if (rcStrict != VINF_SUCCESS)
237 return rcStrict;
238
239 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
240 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
241 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
242 uint32_t uEFlags = pCtx->eflags.u;
243
244 /*
245 * The loop.
246 */
247 do
248 {
249 /*
250 * Do segmentation and virtual page stuff.
251 */
252#if ADDR_SIZE != 64
253 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
254 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->esHid.u64Base + uSrc2AddrReg;
255#else
256 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
257 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
258#endif
259 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
260 if (cLeftSrc1Page > uCounterReg)
261 cLeftSrc1Page = uCounterReg;
262 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
263 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
264
265 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
266 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
267#if ADDR_SIZE != 64
268 && uSrc1AddrReg < pSrc1Hid->u32Limit
269 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
270 && uSrc2AddrReg < pCtx->esHid.u32Limit
271 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
272#endif
273 )
274 {
275 RTGCPHYS GCPhysSrc1Mem;
276 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
277 if (rcStrict != VINF_SUCCESS)
278 return rcStrict;
279
280 RTGCPHYS GCPhysSrc2Mem;
281 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
282 if (rcStrict != VINF_SUCCESS)
283 return rcStrict;
284
285 /*
286 * If we can map the page without trouble, do a block processing
287 * until the end of the current page.
288 */
289 OP_TYPE const *puSrc2Mem;
290 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 OP_TYPE const *puSrc1Mem;
294 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem);
295 if (rcStrict == VINF_SUCCESS)
296 {
297 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
298 {
299 /* All matches, only compare the last item to get the right eflags. */
300 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
301 uSrc1AddrReg += cLeftPage * cbIncr;
302 uSrc2AddrReg += cLeftPage * cbIncr;
303 uCounterReg -= cLeftPage;
304 }
305 else
306 {
307 /* Some mismatch, compare each item (and keep volatile
308 memory in mind). */
309 uint32_t off = 0;
310 do
311 {
312 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
313 off++;
314 } while ( off < cLeftPage
315 && !(uEFlags & X86_EFL_ZF));
316 uSrc1AddrReg += cbIncr * off;
317 uSrc2AddrReg += cbIncr * off;
318 uCounterReg -= off;
319 }
320
321 /* Update the registers before looping. */
322 pCtx->ADDR_rCX = uCounterReg;
323 pCtx->ADDR_rSI = uSrc1AddrReg;
324 pCtx->ADDR_rDI = uSrc2AddrReg;
325 pCtx->eflags.u = uEFlags;
326
327 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem);
328 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem);
329 continue;
330 }
331 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem);
332 }
333 }
334
335 /*
336 * Fallback - slow processing till the end of the current page.
337 * In the cross page boundrary case we will end up here with cLeftPage
338 * as 0, we execute one loop then.
339 */
340 do
341 {
342 OP_TYPE uValue1;
343 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
344 if (rcStrict != VINF_SUCCESS)
345 return rcStrict;
346 OP_TYPE uValue2;
347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
348 if (rcStrict != VINF_SUCCESS)
349 return rcStrict;
350 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
351
352 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
353 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
354 pCtx->ADDR_rCX = --uCounterReg;
355 pCtx->eflags.u = uEFlags;
356 cLeftPage--;
357 } while ( (int32_t)cLeftPage > 0
358 && !(uEFlags & X86_EFL_ZF));
359 } while ( uCounterReg != 0
360 && !(uEFlags & X86_EFL_ZF));
361
362 /*
363 * Done.
364 */
365 iemRegAddToRip(pIemCpu, cbInstr);
366 return VINF_SUCCESS;
367}
368
369
370/**
371 * Implements 'REPE SCAS'.
372 */
373IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
374{
375 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
376
377 /*
378 * Setup.
379 */
380 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
381 if (uCounterReg == 0)
382 {
383 iemRegAddToRip(pIemCpu, cbInstr);
384 return VINF_SUCCESS;
385 }
386
387 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
388 if (rcStrict != VINF_SUCCESS)
389 return rcStrict;
390
391 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
392 OP_TYPE const uValueReg = pCtx->OP_rAX;
393 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
394 uint32_t uEFlags = pCtx->eflags.u;
395
396 /*
397 * The loop.
398 */
399 do
400 {
401 /*
402 * Do segmentation and virtual page stuff.
403 */
404#if ADDR_SIZE != 64
405 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
406#else
407 uint64_t uVirtAddr = uAddrReg;
408#endif
409 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
410 if (cLeftPage > uCounterReg)
411 cLeftPage = uCounterReg;
412 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
413 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
414#if ADDR_SIZE != 64
415 && uAddrReg < pCtx->esHid.u32Limit
416 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
417#endif
418 )
419 {
420 RTGCPHYS GCPhysMem;
421 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
422 if (rcStrict != VINF_SUCCESS)
423 return rcStrict;
424
425 /*
426 * If we can map the page without trouble, do a block processing
427 * until the end of the current page.
428 */
429 OP_TYPE const *puMem;
430 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
431 if (rcStrict == VINF_SUCCESS)
432 {
433 /* Search till we find a mismatching item. */
434 OP_TYPE uTmpValue;
435 bool fQuit;
436 uint32_t i = 0;
437 do
438 {
439 uTmpValue = puMem[i++];
440 fQuit = uTmpValue != uValueReg;
441 } while (i < cLeftPage && !fQuit);
442
443 /* Update the regs. */
444 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
445 pCtx->ADDR_rCX = uCounterReg -= i;
446 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
447 pCtx->eflags.u = uEFlags;
448 Assert(!(uEFlags & X86_EFL_ZF) == (i < cLeftPage));
449 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem);
450 if (fQuit)
451 break;
452
453
454 /* If unaligned, we drop thru and do the page crossing access
455 below. Otherwise, do the next page. */
456 if (!(uVirtAddr & (OP_SIZE - 1)))
457 continue;
458 if (uCounterReg == 0)
459 break;
460 cLeftPage = 0;
461 }
462 }
463
464 /*
465 * Fallback - slow processing till the end of the current page.
466 * In the cross page boundrary case we will end up here with cLeftPage
467 * as 0, we execute one loop then.
468 */
469 do
470 {
471 OP_TYPE uTmpValue;
472 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
473 if (rcStrict != VINF_SUCCESS)
474 return rcStrict;
475 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
476
477 pCtx->ADDR_rDI = uAddrReg += cbIncr;
478 pCtx->ADDR_rCX = --uCounterReg;
479 pCtx->eflags.u = uEFlags;
480 cLeftPage--;
481 } while ( (int32_t)cLeftPage > 0
482 && (uEFlags & X86_EFL_ZF));
483 } while ( uCounterReg != 0
484 && (uEFlags & X86_EFL_ZF));
485
486 /*
487 * Done.
488 */
489 iemRegAddToRip(pIemCpu, cbInstr);
490 return VINF_SUCCESS;
491}
492
493
494/**
495 * Implements 'REPNE SCAS'.
496 */
497IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
498{
499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
500
501 /*
502 * Setup.
503 */
504 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
505 if (uCounterReg == 0)
506 {
507 iemRegAddToRip(pIemCpu, cbInstr);
508 return VINF_SUCCESS;
509 }
510
511 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
512 if (rcStrict != VINF_SUCCESS)
513 return rcStrict;
514
515 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
516 OP_TYPE const uValueReg = pCtx->OP_rAX;
517 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
518 uint32_t uEFlags = pCtx->eflags.u;
519
520 /*
521 * The loop.
522 */
523 do
524 {
525 /*
526 * Do segmentation and virtual page stuff.
527 */
528#if ADDR_SIZE != 64
529 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
530#else
531 uint64_t uVirtAddr = uAddrReg;
532#endif
533 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
534 if (cLeftPage > uCounterReg)
535 cLeftPage = uCounterReg;
536 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
537 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
538#if ADDR_SIZE != 64
539 && uAddrReg < pCtx->esHid.u32Limit
540 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
541#endif
542 )
543 {
544 RTGCPHYS GCPhysMem;
545 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
546 if (rcStrict != VINF_SUCCESS)
547 return rcStrict;
548
549 /*
550 * If we can map the page without trouble, do a block processing
551 * until the end of the current page.
552 */
553 OP_TYPE const *puMem;
554 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
555 if (rcStrict == VINF_SUCCESS)
556 {
557 /* Search till we find a mismatching item. */
558 OP_TYPE uTmpValue;
559 bool fQuit;
560 uint32_t i = 0;
561 do
562 {
563 uTmpValue = puMem[i++];
564 fQuit = uTmpValue == uValueReg;
565 } while (i < cLeftPage && !fQuit);
566
567 /* Update the regs. */
568 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
569 pCtx->ADDR_rCX = uCounterReg -= i;
570 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
571 pCtx->eflags.u = uEFlags;
572 Assert((!(uEFlags & X86_EFL_ZF) != (i < cLeftPage)) || (i == cLeftPage));
573 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem);
574 if (fQuit)
575 break;
576
577
578 /* If unaligned, we drop thru and do the page crossing access
579 below. Otherwise, do the next page. */
580 if (!(uVirtAddr & (OP_SIZE - 1)))
581 continue;
582 if (uCounterReg == 0)
583 break;
584 cLeftPage = 0;
585 }
586 }
587
588 /*
589 * Fallback - slow processing till the end of the current page.
590 * In the cross page boundrary case we will end up here with cLeftPage
591 * as 0, we execute one loop then.
592 */
593 do
594 {
595 OP_TYPE uTmpValue;
596 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
597 if (rcStrict != VINF_SUCCESS)
598 return rcStrict;
599 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
600
601 pCtx->ADDR_rDI = uAddrReg += cbIncr;
602 pCtx->ADDR_rCX = --uCounterReg;
603 pCtx->eflags.u = uEFlags;
604 cLeftPage--;
605 } while ( (int32_t)cLeftPage > 0
606 && !(uEFlags & X86_EFL_ZF));
607 } while ( uCounterReg != 0
608 && !(uEFlags & X86_EFL_ZF));
609
610 /*
611 * Done.
612 */
613 iemRegAddToRip(pIemCpu, cbInstr);
614 return VINF_SUCCESS;
615}
616
617
618
619
620/**
621 * Implements 'REP MOVS'.
622 */
623IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
624{
625 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
626
627 /*
628 * Setup.
629 */
630 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
631 if (uCounterReg == 0)
632 {
633 iemRegAddToRip(pIemCpu, cbInstr);
634 return VINF_SUCCESS;
635 }
636
637 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
638 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
639 if (rcStrict != VINF_SUCCESS)
640 return rcStrict;
641
642 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
643 if (rcStrict != VINF_SUCCESS)
644 return rcStrict;
645
646 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
647 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
648 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
649
650 /*
651 * The loop.
652 */
653 do
654 {
655 /*
656 * Do segmentation and virtual page stuff.
657 */
658#if ADDR_SIZE != 64
659 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
660 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->esHid.u64Base + uDstAddrReg;
661#else
662 uint64_t uVirtSrcAddr = uSrcAddrReg;
663 uint64_t uVirtDstAddr = uDstAddrReg;
664#endif
665 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
666 if (cLeftSrcPage > uCounterReg)
667 cLeftSrcPage = uCounterReg;
668 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
669 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
670
671 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
672 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
673#if ADDR_SIZE != 64
674 && uSrcAddrReg < pSrcHid->u32Limit
675 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
676 && uDstAddrReg < pCtx->esHid.u32Limit
677 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
678#endif
679 )
680 {
681 RTGCPHYS GCPhysSrcMem;
682 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
683 if (rcStrict != VINF_SUCCESS)
684 return rcStrict;
685
686 RTGCPHYS GCPhysDstMem;
687 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
688 if (rcStrict != VINF_SUCCESS)
689 return rcStrict;
690
691 /*
692 * If we can map the page without trouble, do a block processing
693 * until the end of the current page.
694 */
695 OP_TYPE *puDstMem;
696 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem);
697 if (rcStrict == VINF_SUCCESS)
698 {
699 OP_TYPE const *puSrcMem;
700 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem);
701 if (rcStrict == VINF_SUCCESS)
702 {
703 /* Perform the operation. */
704 memcpy(puDstMem, puSrcMem, cLeftPage * (OP_SIZE / 8));
705
706 /* Update the registers. */
707 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
708 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
709 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
710
711 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem);
712 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem);
713 continue;
714 }
715 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem);
716 }
717 }
718
719 /*
720 * Fallback - slow processing till the end of the current page.
721 * In the cross page boundrary case we will end up here with cLeftPage
722 * as 0, we execute one loop then.
723 */
724 do
725 {
726 OP_TYPE uValue;
727 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
728 if (rcStrict != VINF_SUCCESS)
729 return rcStrict;
730 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
731 if (rcStrict != VINF_SUCCESS)
732 return rcStrict;
733
734 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
735 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
736 pCtx->ADDR_rCX = --uCounterReg;
737 cLeftPage--;
738 } while ((int32_t)cLeftPage > 0);
739 } while (uCounterReg != 0);
740
741 /*
742 * Done.
743 */
744 iemRegAddToRip(pIemCpu, cbInstr);
745 return VINF_SUCCESS;
746}
747
748
749/**
750 * Implements 'REP STOS'.
751 */
752IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755
756 /*
757 * Setup.
758 */
759 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
760 if (uCounterReg == 0)
761 {
762 iemRegAddToRip(pIemCpu, cbInstr);
763 return VINF_SUCCESS;
764 }
765
766 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
767 if (rcStrict != VINF_SUCCESS)
768 return rcStrict;
769
770 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
771 OP_TYPE const uValue = pCtx->OP_rAX;
772 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
773
774 /*
775 * The loop.
776 */
777 do
778 {
779 /*
780 * Do segmentation and virtual page stuff.
781 */
782#if ADDR_SIZE != 64
783 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
784#else
785 uint64_t uVirtAddr = uAddrReg;
786#endif
787 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
788 if (cLeftPage > uCounterReg)
789 cLeftPage = uCounterReg;
790 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
791 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
792#if ADDR_SIZE != 64
793 && uAddrReg < pCtx->esHid.u32Limit
794 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
795#endif
796 )
797 {
798 RTGCPHYS GCPhysMem;
799 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
800 if (rcStrict != VINF_SUCCESS)
801 return rcStrict;
802
803 /*
804 * If we can map the page without trouble, do a block processing
805 * until the end of the current page.
806 */
807 OP_TYPE *puMem;
808 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem);
809 if (rcStrict == VINF_SUCCESS)
810 {
811 /* Update the regs first so we can loop on cLeftPage. */
812 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
813 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
814
815 /* Do the memsetting. */
816#if OP_SIZE == 8
817 memset(puMem, uValue, cLeftPage);
818/*#elif OP_SIZE == 32
819 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
820#else
821 while (cLeftPage-- > 0)
822 *puMem++ = uValue;
823#endif
824
825 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem);
826
827 /* If unaligned, we drop thru and do the page crossing access
828 below. Otherwise, do the next page. */
829 if (!(uVirtAddr & (OP_SIZE - 1)))
830 continue;
831 if (uCounterReg == 0)
832 break;
833 cLeftPage = 0;
834 }
835 }
836
837 /*
838 * Fallback - slow processing till the end of the current page.
839 * In the cross page boundrary case we will end up here with cLeftPage
840 * as 0, we execute one loop then.
841 */
842 do
843 {
844 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
845 if (rcStrict != VINF_SUCCESS)
846 return rcStrict;
847 pCtx->ADDR_rDI = uAddrReg += cbIncr;
848 pCtx->ADDR_rCX = --uCounterReg;
849 cLeftPage--;
850 } while ((int32_t)cLeftPage > 0);
851 } while (uCounterReg != 0);
852
853 /*
854 * Done.
855 */
856 iemRegAddToRip(pIemCpu, cbInstr);
857 return VINF_SUCCESS;
858}
859
860
861/**
862 * Implements 'REP LODS'.
863 */
864IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
865{
866 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
867
868 /*
869 * Setup.
870 */
871 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
872 if (uCounterReg == 0)
873 {
874 iemRegAddToRip(pIemCpu, cbInstr);
875 return VINF_SUCCESS;
876 }
877
878 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
879 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
880 if (rcStrict != VINF_SUCCESS)
881 return rcStrict;
882
883 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
884 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
885
886 /*
887 * The loop.
888 */
889 do
890 {
891 /*
892 * Do segmentation and virtual page stuff.
893 */
894#if ADDR_SIZE != 64
895 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
896#else
897 uint64_t uVirtAddr = uAddrReg;
898#endif
899 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
900 if (cLeftPage > uCounterReg)
901 cLeftPage = uCounterReg;
902 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
903 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
904#if ADDR_SIZE != 64
905 && uAddrReg < pSrcHid->u32Limit
906 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
907#endif
908 )
909 {
910 RTGCPHYS GCPhysMem;
911 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
912 if (rcStrict != VINF_SUCCESS)
913 return rcStrict;
914
915 /*
916 * If we can map the page without trouble, we can get away with
917 * just reading the last value on the page.
918 */
919 OP_TYPE const *puMem;
920 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
921 if (rcStrict == VINF_SUCCESS)
922 {
923 /* Only get the last byte, the rest doesn't matter in direct access mode. */
924#if OP_SIZE == 32
925 pCtx->rax = puMem[cLeftPage - 1];
926#else
927 pCtx->OP_rAX = puMem[cLeftPage - 1];
928#endif
929 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
930 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
931 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem);
932
933 /* If unaligned, we drop thru and do the page crossing access
934 below. Otherwise, do the next page. */
935 if (!(uVirtAddr & (OP_SIZE - 1)))
936 continue;
937 if (uCounterReg == 0)
938 break;
939 cLeftPage = 0;
940 }
941 }
942
943 /*
944 * Fallback - slow processing till the end of the current page.
945 * In the cross page boundrary case we will end up here with cLeftPage
946 * as 0, we execute one loop then.
947 */
948 do
949 {
950 OP_TYPE uTmpValue;
951 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
952 if (rcStrict != VINF_SUCCESS)
953 return rcStrict;
954#if OP_SIZE == 32
955 pCtx->rax = uTmpValue;
956#else
957 pCtx->OP_rAX = uTmpValue;
958#endif
959 pCtx->ADDR_rSI = uAddrReg += cbIncr;
960 pCtx->ADDR_rCX = --uCounterReg;
961 cLeftPage--;
962 } while ((int32_t)cLeftPage > 0);
963 if (rcStrict != VINF_SUCCESS)
964 break;
965 } while (uCounterReg != 0);
966
967 /*
968 * Done.
969 */
970 iemRegAddToRip(pIemCpu, cbInstr);
971 return VINF_SUCCESS;
972}
973
974
975#if OP_SIZE != 64
976
977/**
978 * Implements 'INS' (no rep)
979 */
980IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
981{
982 PVM pVM = IEMCPU_TO_VM(pIemCpu);
983 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
984 VBOXSTRICTRC rcStrict;
985
986 /*
987 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
988 * segmentation and finally any #PF due to virtual address translation.
989 * ASSUMES nothing is read from the I/O port before traps are taken.
990 */
991 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
992 if (rcStrict != VINF_SUCCESS)
993 return rcStrict;
994
995 OP_TYPE *puMem;
996 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
997 if (rcStrict != VINF_SUCCESS)
998 return rcStrict;
999
1000 uint32_t u32Value;
1001 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1002 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
1003 else
1004 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1005 if (IOM_SUCCESS(rcStrict))
1006 {
1007 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1008 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1009 {
1010 if (!pCtx->eflags.Bits.u1DF)
1011 pCtx->ADDR_rDI += OP_SIZE / 8;
1012 else
1013 pCtx->ADDR_rDI -= OP_SIZE / 8;
1014 iemRegAddToRip(pIemCpu, cbInstr);
1015 }
1016 /* iemMemMap already check permissions, so this may only be real errors
1017 or access handlers medling. The access handler case is going to
1018 cause misbehavior if the instruction is re-interpreted or smth. So,
1019 we fail with an internal error here instead. */
1020 else
1021 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1022 }
1023 return rcStrict;
1024}
1025
1026
1027/**
1028 * Implements 'REP INS'.
1029 */
1030IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1031{
1032 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1033 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1034
1035 /*
1036 * Setup.
1037 */
1038 uint16_t const u16Port = pCtx->dx;
1039 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1040 if (rcStrict != VINF_SUCCESS)
1041 return rcStrict;
1042
1043 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1044 if (uCounterReg == 0)
1045 {
1046 iemRegAddToRip(pIemCpu, cbInstr);
1047 return VINF_SUCCESS;
1048 }
1049
1050 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->esHid, X86_SREG_ES);
1051 if (rcStrict != VINF_SUCCESS)
1052 return rcStrict;
1053
1054 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1055 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1056
1057 /*
1058 * The loop.
1059 */
1060 do
1061 {
1062 /*
1063 * Do segmentation and virtual page stuff.
1064 */
1065#if ADDR_SIZE != 64
1066 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->esHid.u64Base + uAddrReg;
1067#else
1068 uint64_t uVirtAddr = uAddrReg;
1069#endif
1070 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1071 if (cLeftPage > uCounterReg)
1072 cLeftPage = uCounterReg;
1073 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1074 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1075#if ADDR_SIZE != 64
1076 && uAddrReg < pCtx->esHid.u32Limit
1077 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->esHid.u32Limit
1078#endif
1079 )
1080 {
1081 RTGCPHYS GCPhysMem;
1082 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 /*
1087 * If we can map the page without trouble, we would've liked to use
1088 * an string I/O method to do the work, but the current IOM
1089 * interface doesn't match our current approach. So, do a regular
1090 * loop instead.
1091 */
1092 /** @todo Change the I/O manager interface to make use of
1093 * mapped buffers instead of leaving those bits to the
1094 * device implementation? */
1095 OP_TYPE *puMem;
1096 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem);
1097 if (rcStrict == VINF_SUCCESS)
1098 {
1099 uint32_t off = 0;
1100 while (off < cLeftPage)
1101 {
1102 uint32_t u32Value;
1103 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1104 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1105 else
1106 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1107 if (IOM_SUCCESS(rcStrict))
1108 {
1109 puMem[off] = (OP_TYPE)u32Value;
1110 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1111 pCtx->ADDR_rCX = --uCounterReg;
1112 }
1113 if (rcStrict != VINF_SUCCESS)
1114 {
1115 /** @todo massage rc */
1116 if (uCounterReg == 0)
1117 iemRegAddToRip(pIemCpu, cbInstr);
1118 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem);
1119 return rcStrict;
1120 }
1121 off++;
1122 }
1123 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem);
1124
1125 /* If unaligned, we drop thru and do the page crossing access
1126 below. Otherwise, do the next page. */
1127 if (!(uVirtAddr & (OP_SIZE - 1)))
1128 continue;
1129 if (uCounterReg == 0)
1130 break;
1131 cLeftPage = 0;
1132 }
1133 }
1134
1135 /*
1136 * Fallback - slow processing till the end of the current page.
1137 * In the cross page boundrary case we will end up here with cLeftPage
1138 * as 0, we execute one loop then.
1139 *
1140 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1141 * I/O port, otherwise it wouldn't really be restartable.
1142 */
1143 /** @todo investigate what the CPU actually does with \#PF/\#GP
1144 * during INS. */
1145 do
1146 {
1147 OP_TYPE *puMem;
1148 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1149 if (rcStrict != VINF_SUCCESS)
1150 return rcStrict;
1151
1152 uint32_t u32Value;
1153 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1154 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1155 else
1156 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1157 if (!IOM_SUCCESS(rcStrict))
1158 return rcStrict;
1159
1160 *puMem = (OP_TYPE)u32Value;
1161 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1162 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1163
1164 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1165 pCtx->ADDR_rCX = --uCounterReg;
1166
1167 cLeftPage--;
1168 if (rcStrict != VINF_SUCCESS)
1169 {
1170 /** @todo massage IOM status codes! */
1171 if (uCounterReg == 0)
1172 iemRegAddToRip(pIemCpu, cbInstr);
1173 return rcStrict;
1174 }
1175 } while ((int32_t)cLeftPage > 0);
1176 } while (uCounterReg != 0);
1177
1178 /*
1179 * Done.
1180 */
1181 iemRegAddToRip(pIemCpu, cbInstr);
1182 return VINF_SUCCESS;
1183}
1184
1185
1186/**
1187 * Implements 'OUTS' (no rep)
1188 */
1189IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1190{
1191 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1192 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1193 VBOXSTRICTRC rcStrict;
1194
1195 /*
1196 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1197 * segmentation and finally any #PF due to virtual address translation.
1198 * ASSUMES nothing is read from the I/O port before traps are taken.
1199 */
1200 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1201 if (rcStrict != VINF_SUCCESS)
1202 return rcStrict;
1203
1204 OP_TYPE uValue;
1205 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1206 if (rcStrict == VINF_SUCCESS)
1207 {
1208 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1209 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
1210 else
1211 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1212 if (IOM_SUCCESS(rcStrict))
1213 {
1214 if (!pCtx->eflags.Bits.u1DF)
1215 pCtx->ADDR_rSI += OP_SIZE / 8;
1216 else
1217 pCtx->ADDR_rSI -= OP_SIZE / 8;
1218 iemRegAddToRip(pIemCpu, cbInstr);
1219 /** @todo massage IOM status codes. */
1220 }
1221 }
1222 return rcStrict;
1223}
1224
1225
1226/**
1227 * Implements 'REP OUTS'.
1228 */
1229IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1230{
1231 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1232 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1233
1234 /*
1235 * Setup.
1236 */
1237 uint16_t const u16Port = pCtx->dx;
1238 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1239 if (rcStrict != VINF_SUCCESS)
1240 return rcStrict;
1241
1242 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1243 if (uCounterReg == 0)
1244 {
1245 iemRegAddToRip(pIemCpu, cbInstr);
1246 return VINF_SUCCESS;
1247 }
1248
1249 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1250 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1251 if (rcStrict != VINF_SUCCESS)
1252 return rcStrict;
1253
1254 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1255 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1256
1257 /*
1258 * The loop.
1259 */
1260 do
1261 {
1262 /*
1263 * Do segmentation and virtual page stuff.
1264 */
1265#if ADDR_SIZE != 64
1266 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1267#else
1268 uint64_t uVirtAddr = uAddrReg;
1269#endif
1270 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1271 if (cLeftPage > uCounterReg)
1272 cLeftPage = uCounterReg;
1273 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1274 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1275#if ADDR_SIZE != 64
1276 && uAddrReg < pHid->u32Limit
1277 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1278#endif
1279 )
1280 {
1281 RTGCPHYS GCPhysMem;
1282 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1283 if (rcStrict != VINF_SUCCESS)
1284 return rcStrict;
1285
1286 /*
1287 * If we can map the page without trouble, we would've liked to use
1288 * an string I/O method to do the work, but the current IOM
1289 * interface doesn't match our current approach. So, do a regular
1290 * loop instead.
1291 */
1292 /** @todo Change the I/O manager interface to make use of
1293 * mapped buffers instead of leaving those bits to the
1294 * device implementation? */
1295 OP_TYPE const *puMem;
1296 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem);
1297 if (rcStrict == VINF_SUCCESS)
1298 {
1299 uint32_t off = 0;
1300 while (off < cLeftPage)
1301 {
1302 uint32_t u32Value = *puMem++;
1303 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1304 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
1305 else
1306 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1307 if (IOM_SUCCESS(rcStrict))
1308 {
1309 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1310 pCtx->ADDR_rCX = --uCounterReg;
1311 }
1312 if (rcStrict != VINF_SUCCESS)
1313 {
1314 /** @todo massage IOM rc */
1315 if (uCounterReg == 0)
1316 iemRegAddToRip(pIemCpu, cbInstr);
1317 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem);
1318 return rcStrict;
1319 }
1320 off++;
1321 }
1322 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem);
1323
1324 /* If unaligned, we drop thru and do the page crossing access
1325 below. Otherwise, do the next page. */
1326 if (!(uVirtAddr & (OP_SIZE - 1)))
1327 continue;
1328 if (uCounterReg == 0)
1329 break;
1330 cLeftPage = 0;
1331 }
1332 }
1333
1334 /*
1335 * Fallback - slow processing till the end of the current page.
1336 * In the cross page boundrary case we will end up here with cLeftPage
1337 * as 0, we execute one loop then.
1338 *
1339 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1340 * I/O port, otherwise it wouldn't really be restartable.
1341 */
1342 /** @todo investigate what the CPU actually does with \#PF/\#GP
1343 * during INS. */
1344 do
1345 {
1346 OP_TYPE uValue;
1347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1348 if (rcStrict != VINF_SUCCESS)
1349 return rcStrict;
1350
1351 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1352 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
1353 else
1354 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1355 if (IOM_SUCCESS(rcStrict))
1356 {
1357 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1358 pCtx->ADDR_rCX = --uCounterReg;
1359 cLeftPage--;
1360 }
1361 if (rcStrict != VINF_SUCCESS)
1362 {
1363 /** @todo massage IOM status codes! */
1364 if (uCounterReg == 0)
1365 iemRegAddToRip(pIemCpu, cbInstr);
1366 return rcStrict;
1367 }
1368 } while ((int32_t)cLeftPage > 0);
1369 } while (uCounterReg != 0);
1370
1371 /*
1372 * Done.
1373 */
1374 iemRegAddToRip(pIemCpu, cbInstr);
1375 return VINF_SUCCESS;
1376}
1377
1378#endif /* OP_SIZE != 64-bit */
1379
1380
1381#undef OP_rAX
1382#undef OP_SIZE
1383#undef ADDR_SIZE
1384#undef ADDR_rDI
1385#undef ADDR_rSI
1386#undef ADDR_rCX
1387#undef ADDR_rIP
1388#undef ADDR2_TYPE
1389#undef ADDR_TYPE
1390#undef ADDR2_TYPE
1391
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette