VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 53404

Last change on this file since 53404 was 52456, checked in by vboxsync, 10 years ago

Typos.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 53.0 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 52456 2014-08-22 08:46:41Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pIemCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pIemCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pIemCpu) (false)
62#endif
63
64
65/**
66 * Implements 'REPE CMPS'.
67 */
68IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
69{
70 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
71
72 /*
73 * Setup.
74 */
75 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
76 if (uCounterReg == 0)
77 {
78 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
79 return VINF_SUCCESS;
80 }
81
82 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
83 uint64_t uSrc1Base;
84 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
85 if (rcStrict != VINF_SUCCESS)
86 return rcStrict;
87
88 uint64_t uSrc2Base;
89 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
90 if (rcStrict != VINF_SUCCESS)
91 return rcStrict;
92
93 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
94 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
95 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
96 uint32_t uEFlags = pCtx->eflags.u;
97
98 /*
99 * The loop.
100 */
101 do
102 {
103 /*
104 * Do segmentation and virtual page stuff.
105 */
106 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
107 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
108 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
109 if (cLeftSrc1Page > uCounterReg)
110 cLeftSrc1Page = uCounterReg;
111 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
112 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
113
114 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
115 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
116 && ( IS_64_BIT_CODE(pIemCpu)
117 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
118 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
119 && uSrc2AddrReg < pCtx->es.u32Limit
120 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
121 )
122 )
123 {
124 RTGCPHYS GCPhysSrc1Mem;
125 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
126 if (rcStrict != VINF_SUCCESS)
127 return rcStrict;
128
129 RTGCPHYS GCPhysSrc2Mem;
130 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
131 if (rcStrict != VINF_SUCCESS)
132 return rcStrict;
133
134 /*
135 * If we can map the page without trouble, do a block processing
136 * until the end of the current page.
137 */
138 PGMPAGEMAPLOCK PgLockSrc2Mem;
139 OP_TYPE const *puSrc2Mem;
140 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
141 if (rcStrict == VINF_SUCCESS)
142 {
143 PGMPAGEMAPLOCK PgLockSrc1Mem;
144 OP_TYPE const *puSrc1Mem;
145 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
146 if (rcStrict == VINF_SUCCESS)
147 {
148 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
149 {
150 /* All matches, only compare the last itme to get the right eflags. */
151 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
152 uSrc1AddrReg += cLeftPage * cbIncr;
153 uSrc2AddrReg += cLeftPage * cbIncr;
154 uCounterReg -= cLeftPage;
155 }
156 else
157 {
158 /* Some mismatch, compare each item (and keep volatile
159 memory in mind). */
160 uint32_t off = 0;
161 do
162 {
163 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
164 off++;
165 } while ( off < cLeftPage
166 && (uEFlags & X86_EFL_ZF));
167 uSrc1AddrReg += cbIncr * off;
168 uSrc2AddrReg += cbIncr * off;
169 uCounterReg -= off;
170 }
171
172 /* Update the registers before looping. */
173 pCtx->ADDR_rCX = uCounterReg;
174 pCtx->ADDR_rSI = uSrc1AddrReg;
175 pCtx->ADDR_rDI = uSrc2AddrReg;
176 pCtx->eflags.u = uEFlags;
177
178 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
179 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
180 continue;
181 }
182 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
183 }
184 }
185
186 /*
187 * Fallback - slow processing till the end of the current page.
188 * In the cross page boundrary case we will end up here with cLeftPage
189 * as 0, we execute one loop then.
190 */
191 do
192 {
193 OP_TYPE uValue1;
194 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
195 if (rcStrict != VINF_SUCCESS)
196 return rcStrict;
197 OP_TYPE uValue2;
198 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
199 if (rcStrict != VINF_SUCCESS)
200 return rcStrict;
201 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
202
203 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
204 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
205 pCtx->ADDR_rCX = --uCounterReg;
206 pCtx->eflags.u = uEFlags;
207 cLeftPage--;
208 } while ( (int32_t)cLeftPage > 0
209 && (uEFlags & X86_EFL_ZF));
210 } while ( uCounterReg != 0
211 && (uEFlags & X86_EFL_ZF));
212
213 /*
214 * Done.
215 */
216 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * Implements 'REPNE CMPS'.
223 */
224IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
225{
226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
227
228 /*
229 * Setup.
230 */
231 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
232 if (uCounterReg == 0)
233 {
234 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
235 return VINF_SUCCESS;
236 }
237
238 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
239 uint64_t uSrc1Base;
240 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
241 if (rcStrict != VINF_SUCCESS)
242 return rcStrict;
243
244 uint64_t uSrc2Base;
245 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
246 if (rcStrict != VINF_SUCCESS)
247 return rcStrict;
248
249 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
250 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
251 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
252 uint32_t uEFlags = pCtx->eflags.u;
253
254 /*
255 * The loop.
256 */
257 do
258 {
259 /*
260 * Do segmentation and virtual page stuff.
261 */
262 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
263 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
264 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 if (cLeftSrc1Page > uCounterReg)
266 cLeftSrc1Page = uCounterReg;
267 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
268 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
269
270 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
271 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
272 && ( IS_64_BIT_CODE(pIemCpu)
273 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
274 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
275 && uSrc2AddrReg < pCtx->es.u32Limit
276 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
277 )
278 )
279 {
280 RTGCPHYS GCPhysSrc1Mem;
281 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
282 if (rcStrict != VINF_SUCCESS)
283 return rcStrict;
284
285 RTGCPHYS GCPhysSrc2Mem;
286 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
287 if (rcStrict != VINF_SUCCESS)
288 return rcStrict;
289
290 /*
291 * If we can map the page without trouble, do a block processing
292 * until the end of the current page.
293 */
294 OP_TYPE const *puSrc2Mem;
295 PGMPAGEMAPLOCK PgLockSrc2Mem;
296 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
297 if (rcStrict == VINF_SUCCESS)
298 {
299 OP_TYPE const *puSrc1Mem;
300 PGMPAGEMAPLOCK PgLockSrc1Mem;
301 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
305 {
306 /* All matches, only compare the last item to get the right eflags. */
307 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
308 uSrc1AddrReg += cLeftPage * cbIncr;
309 uSrc2AddrReg += cLeftPage * cbIncr;
310 uCounterReg -= cLeftPage;
311 }
312 else
313 {
314 /* Some mismatch, compare each item (and keep volatile
315 memory in mind). */
316 uint32_t off = 0;
317 do
318 {
319 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
320 off++;
321 } while ( off < cLeftPage
322 && !(uEFlags & X86_EFL_ZF));
323 uSrc1AddrReg += cbIncr * off;
324 uSrc2AddrReg += cbIncr * off;
325 uCounterReg -= off;
326 }
327
328 /* Update the registers before looping. */
329 pCtx->ADDR_rCX = uCounterReg;
330 pCtx->ADDR_rSI = uSrc1AddrReg;
331 pCtx->ADDR_rDI = uSrc2AddrReg;
332 pCtx->eflags.u = uEFlags;
333
334 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 continue;
337 }
338 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
339 }
340 }
341
342 /*
343 * Fallback - slow processing till the end of the current page.
344 * In the cross page boundrary case we will end up here with cLeftPage
345 * as 0, we execute one loop then.
346 */
347 do
348 {
349 OP_TYPE uValue1;
350 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
351 if (rcStrict != VINF_SUCCESS)
352 return rcStrict;
353 OP_TYPE uValue2;
354 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
355 if (rcStrict != VINF_SUCCESS)
356 return rcStrict;
357 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
358
359 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
360 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
361 pCtx->ADDR_rCX = --uCounterReg;
362 pCtx->eflags.u = uEFlags;
363 cLeftPage--;
364 } while ( (int32_t)cLeftPage > 0
365 && !(uEFlags & X86_EFL_ZF));
366 } while ( uCounterReg != 0
367 && !(uEFlags & X86_EFL_ZF));
368
369 /*
370 * Done.
371 */
372 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
373 return VINF_SUCCESS;
374}
375
376
377/**
378 * Implements 'REPE SCAS'.
379 */
380IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
381{
382 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
383
384 /*
385 * Setup.
386 */
387 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
388 if (uCounterReg == 0)
389 {
390 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
391 return VINF_SUCCESS;
392 }
393
394 uint64_t uBaseAddr;
395 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
400 OP_TYPE const uValueReg = pCtx->OP_rAX;
401 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
402 uint32_t uEFlags = pCtx->eflags.u;
403
404 /*
405 * The loop.
406 */
407 do
408 {
409 /*
410 * Do segmentation and virtual page stuff.
411 */
412 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418 && ( IS_64_BIT_CODE(pIemCpu)
419 || ( uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
421 )
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 uint64_t uBaseAddr;
517 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
518 if (rcStrict != VINF_SUCCESS)
519 return rcStrict;
520
521 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
522 OP_TYPE const uValueReg = pCtx->OP_rAX;
523 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
524 uint32_t uEFlags = pCtx->eflags.u;
525
526 /*
527 * The loop.
528 */
529 do
530 {
531 /*
532 * Do segmentation and virtual page stuff.
533 */
534 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
535 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
536 if (cLeftPage > uCounterReg)
537 cLeftPage = uCounterReg;
538 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
539 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
540 && ( IS_64_BIT_CODE(pIemCpu)
541 || ( uAddrReg < pCtx->es.u32Limit
542 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
543 )
544 )
545 {
546 RTGCPHYS GCPhysMem;
547 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
548 if (rcStrict != VINF_SUCCESS)
549 return rcStrict;
550
551 /*
552 * If we can map the page without trouble, do a block processing
553 * until the end of the current page.
554 */
555 PGMPAGEMAPLOCK PgLockMem;
556 OP_TYPE const *puMem;
557 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
558 if (rcStrict == VINF_SUCCESS)
559 {
560 /* Search till we find a mismatching item. */
561 OP_TYPE uTmpValue;
562 bool fQuit;
563 uint32_t i = 0;
564 do
565 {
566 uTmpValue = puMem[i++];
567 fQuit = uTmpValue == uValueReg;
568 } while (i < cLeftPage && !fQuit);
569
570 /* Update the regs. */
571 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
572 pCtx->ADDR_rCX = uCounterReg -= i;
573 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
574 pCtx->eflags.u = uEFlags;
575 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
576 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
577 if (fQuit)
578 break;
579
580
581 /* If unaligned, we drop thru and do the page crossing access
582 below. Otherwise, do the next page. */
583 if (!(uVirtAddr & (OP_SIZE - 1)))
584 continue;
585 if (uCounterReg == 0)
586 break;
587 cLeftPage = 0;
588 }
589 }
590
591 /*
592 * Fallback - slow processing till the end of the current page.
593 * In the cross page boundrary case we will end up here with cLeftPage
594 * as 0, we execute one loop then.
595 */
596 do
597 {
598 OP_TYPE uTmpValue;
599 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
600 if (rcStrict != VINF_SUCCESS)
601 return rcStrict;
602 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
603 pCtx->ADDR_rDI = uAddrReg += cbIncr;
604 pCtx->ADDR_rCX = --uCounterReg;
605 pCtx->eflags.u = uEFlags;
606 cLeftPage--;
607 } while ( (int32_t)cLeftPage > 0
608 && !(uEFlags & X86_EFL_ZF));
609 } while ( uCounterReg != 0
610 && !(uEFlags & X86_EFL_ZF));
611
612 /*
613 * Done.
614 */
615 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620
621
622/**
623 * Implements 'REP MOVS'.
624 */
625IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
626{
627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
628
629 /*
630 * Setup.
631 */
632 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
633 if (uCounterReg == 0)
634 {
635 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
636 return VINF_SUCCESS;
637 }
638
639 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
640 uint64_t uSrcBase;
641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 uint64_t uDstBase;
646 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase);
647 if (rcStrict != VINF_SUCCESS)
648 return rcStrict;
649
650 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
651 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
652 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
653
654 /*
655 * Be careful with handle bypassing.
656 */
657 if (pIemCpu->fBypassHandlers)
658 {
659 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
660 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
661 }
662
663 /*
664 * If we're reading back what we write, we have to let the verfication code
665 * to prevent a false positive.
666 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
667 */
668#ifdef IEM_VERIFICATION_MODE_FULL
669 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
670 && (cbIncr > 0
671 ? uSrcAddrReg <= uDstAddrReg
672 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
673 : uDstAddrReg <= uSrcAddrReg
674 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
675 pIemCpu->fOverlappingMovs = true;
676#endif
677
678 /*
679 * The loop.
680 */
681 do
682 {
683 /*
684 * Do segmentation and virtual page stuff.
685 */
686 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
687 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
688 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
689 if (cLeftSrcPage > uCounterReg)
690 cLeftSrcPage = uCounterReg;
691 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
692 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
693
694 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
695 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
696 && ( IS_64_BIT_CODE(pIemCpu)
697 || ( uSrcAddrReg < pSrcHid->u32Limit
698 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
699 && uDstAddrReg < pCtx->es.u32Limit
700 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
701 )
702 )
703 {
704 RTGCPHYS GCPhysSrcMem;
705 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
706 if (rcStrict != VINF_SUCCESS)
707 return rcStrict;
708
709 RTGCPHYS GCPhysDstMem;
710 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
711 if (rcStrict != VINF_SUCCESS)
712 return rcStrict;
713
714 /*
715 * If we can map the page without trouble, do a block processing
716 * until the end of the current page.
717 */
718 PGMPAGEMAPLOCK PgLockDstMem;
719 OP_TYPE *puDstMem;
720 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
721 if (rcStrict == VINF_SUCCESS)
722 {
723 PGMPAGEMAPLOCK PgLockSrcMem;
724 OP_TYPE const *puSrcMem;
725 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
726 if (rcStrict == VINF_SUCCESS)
727 {
728 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
729 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
730
731 /* Perform the operation exactly (don't use memcpy to avoid
732 having to consider how its implementation would affect
733 any overlapping source and destination area). */
734 OP_TYPE const *puSrcCur = puSrcMem;
735 OP_TYPE *puDstCur = puDstMem;
736 uint32_t cTodo = cLeftPage;
737 while (cTodo-- > 0)
738 *puDstCur++ = *puSrcCur++;
739
740 /* Update the registers. */
741 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
742 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
743 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
744
745 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
746 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
747 continue;
748 }
749 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
750 }
751 }
752
753 /*
754 * Fallback - slow processing till the end of the current page.
755 * In the cross page boundrary case we will end up here with cLeftPage
756 * as 0, we execute one loop then.
757 */
758 do
759 {
760 OP_TYPE uValue;
761 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
762 if (rcStrict != VINF_SUCCESS)
763 return rcStrict;
764 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767
768 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
769 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
770 pCtx->ADDR_rCX = --uCounterReg;
771 cLeftPage--;
772 } while ((int32_t)cLeftPage > 0);
773 } while (uCounterReg != 0);
774
775 /*
776 * Done.
777 */
778 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
779 return VINF_SUCCESS;
780}
781
782
783/**
784 * Implements 'REP STOS'.
785 */
786IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
787{
788 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
789
790 /*
791 * Setup.
792 */
793 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
794 if (uCounterReg == 0)
795 {
796 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
797 return VINF_SUCCESS;
798 }
799
800 uint64_t uBaseAddr;
801 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
802 if (rcStrict != VINF_SUCCESS)
803 return rcStrict;
804
805 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
806 OP_TYPE const uValue = pCtx->OP_rAX;
807 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
808
809 /*
810 * Be careful with handle bypassing.
811 */
812 /** @todo Permit doing a page if correctly aligned. */
813 if (pIemCpu->fBypassHandlers)
814 {
815 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
816 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
817 }
818
819 /*
820 * The loop.
821 */
822 do
823 {
824 /*
825 * Do segmentation and virtual page stuff.
826 */
827 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
828 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
829 if (cLeftPage > uCounterReg)
830 cLeftPage = uCounterReg;
831 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
832 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
833 && ( IS_64_BIT_CODE(pIemCpu)
834 || ( uAddrReg < pCtx->es.u32Limit
835 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
836 )
837 )
838 {
839 RTGCPHYS GCPhysMem;
840 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
841 if (rcStrict != VINF_SUCCESS)
842 return rcStrict;
843
844 /*
845 * If we can map the page without trouble, do a block processing
846 * until the end of the current page.
847 */
848 PGMPAGEMAPLOCK PgLockMem;
849 OP_TYPE *puMem;
850 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
851 if (rcStrict == VINF_SUCCESS)
852 {
853 /* Update the regs first so we can loop on cLeftPage. */
854 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
855 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
856
857 /* Do the memsetting. */
858#if OP_SIZE == 8
859 memset(puMem, uValue, cLeftPage);
860/*#elif OP_SIZE == 32
861 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
862#else
863 while (cLeftPage-- > 0)
864 *puMem++ = uValue;
865#endif
866
867 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
868
869 /* If unaligned, we drop thru and do the page crossing access
870 below. Otherwise, do the next page. */
871 if (!(uVirtAddr & (OP_SIZE - 1)))
872 continue;
873 if (uCounterReg == 0)
874 break;
875 cLeftPage = 0;
876 }
877 }
878
879 /*
880 * Fallback - slow processing till the end of the current page.
881 * In the cross page boundrary case we will end up here with cLeftPage
882 * as 0, we execute one loop then.
883 */
884 do
885 {
886 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
887 if (rcStrict != VINF_SUCCESS)
888 return rcStrict;
889 pCtx->ADDR_rDI = uAddrReg += cbIncr;
890 pCtx->ADDR_rCX = --uCounterReg;
891 cLeftPage--;
892 } while ((int32_t)cLeftPage > 0);
893 } while (uCounterReg != 0);
894
895 /*
896 * Done.
897 */
898 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Implements 'REP LODS'.
905 */
906IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
907{
908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
909
910 /*
911 * Setup.
912 */
913 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
914 if (uCounterReg == 0)
915 {
916 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
917 return VINF_SUCCESS;
918 }
919
920 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
921 uint64_t uBaseAddr;
922 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
923 if (rcStrict != VINF_SUCCESS)
924 return rcStrict;
925
926 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
927 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
928
929 /*
930 * The loop.
931 */
932 do
933 {
934 /*
935 * Do segmentation and virtual page stuff.
936 */
937 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
938 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
939 if (cLeftPage > uCounterReg)
940 cLeftPage = uCounterReg;
941 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
942 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
943 && ( IS_64_BIT_CODE(pIemCpu)
944 || ( uAddrReg < pSrcHid->u32Limit
945 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
946 )
947 )
948 {
949 RTGCPHYS GCPhysMem;
950 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
951 if (rcStrict != VINF_SUCCESS)
952 return rcStrict;
953
954 /*
955 * If we can map the page without trouble, we can get away with
956 * just reading the last value on the page.
957 */
958 PGMPAGEMAPLOCK PgLockMem;
959 OP_TYPE const *puMem;
960 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
961 if (rcStrict == VINF_SUCCESS)
962 {
963 /* Only get the last byte, the rest doesn't matter in direct access mode. */
964#if OP_SIZE == 32
965 pCtx->rax = puMem[cLeftPage - 1];
966#else
967 pCtx->OP_rAX = puMem[cLeftPage - 1];
968#endif
969 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
970 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
971 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
972
973 /* If unaligned, we drop thru and do the page crossing access
974 below. Otherwise, do the next page. */
975 if (!(uVirtAddr & (OP_SIZE - 1)))
976 continue;
977 if (uCounterReg == 0)
978 break;
979 cLeftPage = 0;
980 }
981 }
982
983 /*
984 * Fallback - slow processing till the end of the current page.
985 * In the cross page boundrary case we will end up here with cLeftPage
986 * as 0, we execute one loop then.
987 */
988 do
989 {
990 OP_TYPE uTmpValue;
991 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
992 if (rcStrict != VINF_SUCCESS)
993 return rcStrict;
994#if OP_SIZE == 32
995 pCtx->rax = uTmpValue;
996#else
997 pCtx->OP_rAX = uTmpValue;
998#endif
999 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1000 pCtx->ADDR_rCX = --uCounterReg;
1001 cLeftPage--;
1002 } while ((int32_t)cLeftPage > 0);
1003 if (rcStrict != VINF_SUCCESS)
1004 break;
1005 } while (uCounterReg != 0);
1006
1007 /*
1008 * Done.
1009 */
1010 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1011 return VINF_SUCCESS;
1012}
1013
1014
1015#if OP_SIZE != 64
1016
1017/**
1018 * Implements 'INS' (no rep)
1019 */
1020IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1021{
1022 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1023 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1024 VBOXSTRICTRC rcStrict;
1025
1026 /*
1027 * Be careful with handle bypassing.
1028 */
1029 if (pIemCpu->fBypassHandlers)
1030 {
1031 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1032 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1033 }
1034
1035 /*
1036 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1037 * segmentation and finally any #PF due to virtual address translation.
1038 * ASSUMES nothing is read from the I/O port before traps are taken.
1039 */
1040 if (!fIoChecked)
1041 {
1042 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1043 if (rcStrict != VINF_SUCCESS)
1044 return rcStrict;
1045 }
1046
1047 OP_TYPE *puMem;
1048 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1049 if (rcStrict != VINF_SUCCESS)
1050 return rcStrict;
1051
1052 uint32_t u32Value = 0;
1053 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1054 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1055 else
1056 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1057 if (IOM_SUCCESS(rcStrict))
1058 {
1059 *puMem = (OP_TYPE)u32Value;
1060 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1061 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1062 {
1063 if (!pCtx->eflags.Bits.u1DF)
1064 pCtx->ADDR_rDI += OP_SIZE / 8;
1065 else
1066 pCtx->ADDR_rDI -= OP_SIZE / 8;
1067 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1068 }
1069 /* iemMemMap already checked permissions, so this may only be real errors
1070 or access handlers meddling. The access handler case is going to
1071 cause misbehavior if the instruction is re-interpreted or smth. So,
1072 we fail with an internal error here instead. */
1073 else
1074 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), VERR_IEM_IPE_1);
1075 }
1076 return rcStrict;
1077}
1078
1079
1080/**
1081 * Implements 'REP INS'.
1082 */
1083IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1084{
1085 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1086 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1087 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1088
1089 /*
1090 * Setup.
1091 */
1092 uint16_t const u16Port = pCtx->dx;
1093 VBOXSTRICTRC rcStrict;
1094 if (!fIoChecked)
1095 {
1096 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1097 if (rcStrict != VINF_SUCCESS)
1098 return rcStrict;
1099 }
1100
1101 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1102 if (uCounterReg == 0)
1103 {
1104 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1105 return VINF_SUCCESS;
1106 }
1107
1108 uint64_t uBaseAddr;
1109 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
1110 if (rcStrict != VINF_SUCCESS)
1111 return rcStrict;
1112
1113 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1114 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1115
1116 /*
1117 * Be careful with handle bypassing.
1118 */
1119 if (pIemCpu->fBypassHandlers)
1120 {
1121 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1122 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1123 }
1124
1125 /*
1126 * The loop.
1127 */
1128 do
1129 {
1130 /*
1131 * Do segmentation and virtual page stuff.
1132 */
1133 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1134 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1135 if (cLeftPage > uCounterReg)
1136 cLeftPage = uCounterReg;
1137 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1138 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1139 && ( IS_64_BIT_CODE(pIemCpu)
1140 || ( uAddrReg < pCtx->es.u32Limit
1141 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1142 )
1143 )
1144 {
1145 RTGCPHYS GCPhysMem;
1146 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1147 if (rcStrict != VINF_SUCCESS)
1148 return rcStrict;
1149
1150 /*
1151 * If we can map the page without trouble, we would've liked to use
1152 * an string I/O method to do the work, but the current IOM
1153 * interface doesn't match our current approach. So, do a regular
1154 * loop instead.
1155 */
1156 /** @todo Change the I/O manager interface to make use of
1157 * mapped buffers instead of leaving those bits to the
1158 * device implementation! */
1159 PGMPAGEMAPLOCK PgLockMem;
1160 OP_TYPE *puMem;
1161 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1162 if (rcStrict == VINF_SUCCESS)
1163 {
1164 uint32_t off = 0;
1165 while (off < cLeftPage)
1166 {
1167 uint32_t u32Value;
1168 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1169 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1170 else
1171 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1172 if (IOM_SUCCESS(rcStrict))
1173 {
1174 puMem[off] = (OP_TYPE)u32Value;
1175 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1176 pCtx->ADDR_rCX = --uCounterReg;
1177 }
1178 if (rcStrict != VINF_SUCCESS)
1179 {
1180 if (IOM_SUCCESS(rcStrict))
1181 {
1182 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1183 if (uCounterReg == 0)
1184 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1185 }
1186 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1187 return rcStrict;
1188 }
1189 off++;
1190 }
1191 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1192
1193 /* If unaligned, we drop thru and do the page crossing access
1194 below. Otherwise, do the next page. */
1195 if (!(uVirtAddr & (OP_SIZE - 1)))
1196 continue;
1197 if (uCounterReg == 0)
1198 break;
1199 cLeftPage = 0;
1200 }
1201 }
1202
1203 /*
1204 * Fallback - slow processing till the end of the current page.
1205 * In the cross page boundrary case we will end up here with cLeftPage
1206 * as 0, we execute one loop then.
1207 *
1208 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1209 * I/O port, otherwise it wouldn't really be restartable.
1210 */
1211 /** @todo investigate what the CPU actually does with \#PF/\#GP
1212 * during INS. */
1213 do
1214 {
1215 OP_TYPE *puMem;
1216 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1217 if (rcStrict != VINF_SUCCESS)
1218 return rcStrict;
1219
1220 uint32_t u32Value = 0;
1221 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1222 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1223 else
1224 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1225 if (!IOM_SUCCESS(rcStrict))
1226 return rcStrict;
1227
1228 *puMem = (OP_TYPE)u32Value;
1229 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1230 AssertLogRelMsgReturn(rcStrict2 == VINF_SUCCESS, ("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1231 VERR_IEM_IPE_1); /* See non-rep version. */
1232
1233 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1234 pCtx->ADDR_rCX = --uCounterReg;
1235
1236 cLeftPage--;
1237 if (rcStrict != VINF_SUCCESS)
1238 {
1239 if (uCounterReg == 0)
1240 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1241 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1242 return rcStrict;
1243 }
1244 } while ((int32_t)cLeftPage > 0);
1245 } while (uCounterReg != 0);
1246
1247 /*
1248 * Done.
1249 */
1250 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1251 return VINF_SUCCESS;
1252}
1253
1254
1255/**
1256 * Implements 'OUTS' (no rep)
1257 */
1258IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1259{
1260 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1261 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1262 VBOXSTRICTRC rcStrict;
1263
1264 /*
1265 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1266 * segmentation and finally any #PF due to virtual address translation.
1267 * ASSUMES nothing is read from the I/O port before traps are taken.
1268 */
1269 if (!fIoChecked)
1270 {
1271 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1272 if (rcStrict != VINF_SUCCESS)
1273 return rcStrict;
1274 }
1275
1276 OP_TYPE uValue;
1277 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1278 if (rcStrict == VINF_SUCCESS)
1279 {
1280 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1281 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1282 else
1283 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1284 if (IOM_SUCCESS(rcStrict))
1285 {
1286 if (!pCtx->eflags.Bits.u1DF)
1287 pCtx->ADDR_rSI += OP_SIZE / 8;
1288 else
1289 pCtx->ADDR_rSI -= OP_SIZE / 8;
1290 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1291 if (rcStrict != VINF_SUCCESS)
1292 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1293 }
1294 }
1295 return rcStrict;
1296}
1297
1298
1299/**
1300 * Implements 'REP OUTS'.
1301 */
1302IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1303{
1304 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1305 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1307
1308 /*
1309 * Setup.
1310 */
1311 uint16_t const u16Port = pCtx->dx;
1312 VBOXSTRICTRC rcStrict;
1313 if (!fIoChecked)
1314 {
1315 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1316 if (rcStrict != VINF_SUCCESS)
1317 return rcStrict;
1318 }
1319
1320 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1321 if (uCounterReg == 0)
1322 {
1323 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1324 return VINF_SUCCESS;
1325 }
1326
1327 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1328 uint64_t uBaseAddr;
1329 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1330 if (rcStrict != VINF_SUCCESS)
1331 return rcStrict;
1332
1333 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1334 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1335
1336 /*
1337 * The loop.
1338 */
1339 do
1340 {
1341 /*
1342 * Do segmentation and virtual page stuff.
1343 */
1344 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1345 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1346 if (cLeftPage > uCounterReg)
1347 cLeftPage = uCounterReg;
1348 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1349 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1350 && ( IS_64_BIT_CODE(pIemCpu)
1351 || ( uAddrReg < pHid->u32Limit
1352 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1353 )
1354 )
1355 {
1356 RTGCPHYS GCPhysMem;
1357 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1358 if (rcStrict != VINF_SUCCESS)
1359 return rcStrict;
1360
1361 /*
1362 * If we can map the page without trouble, we would've liked to use
1363 * an string I/O method to do the work, but the current IOM
1364 * interface doesn't match our current approach. So, do a regular
1365 * loop instead.
1366 */
1367 /** @todo Change the I/O manager interface to make use of
1368 * mapped buffers instead of leaving those bits to the
1369 * device implementation? */
1370 PGMPAGEMAPLOCK PgLockMem;
1371 OP_TYPE const *puMem;
1372 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1373 if (rcStrict == VINF_SUCCESS)
1374 {
1375 uint32_t off = 0;
1376 while (off < cLeftPage)
1377 {
1378 uint32_t u32Value = *puMem++;
1379 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1380 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, OP_SIZE / 8);
1381 else
1382 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1383 if (IOM_SUCCESS(rcStrict))
1384 {
1385 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1386 pCtx->ADDR_rCX = --uCounterReg;
1387 }
1388 if (rcStrict != VINF_SUCCESS)
1389 {
1390 if (IOM_SUCCESS(rcStrict))
1391 {
1392 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1393 if (uCounterReg == 0)
1394 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1395 }
1396 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1397 return rcStrict;
1398 }
1399 off++;
1400 }
1401 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1402
1403 /* If unaligned, we drop thru and do the page crossing access
1404 below. Otherwise, do the next page. */
1405 if (!(uVirtAddr & (OP_SIZE - 1)))
1406 continue;
1407 if (uCounterReg == 0)
1408 break;
1409 cLeftPage = 0;
1410 }
1411 }
1412
1413 /*
1414 * Fallback - slow processing till the end of the current page.
1415 * In the cross page boundrary case we will end up here with cLeftPage
1416 * as 0, we execute one loop then.
1417 *
1418 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1419 * I/O port, otherwise it wouldn't really be restartable.
1420 */
1421 /** @todo investigate what the CPU actually does with \#PF/\#GP
1422 * during INS. */
1423 do
1424 {
1425 OP_TYPE uValue;
1426 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1427 if (rcStrict != VINF_SUCCESS)
1428 return rcStrict;
1429
1430 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1431 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1432 else
1433 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1434 if (IOM_SUCCESS(rcStrict))
1435 {
1436 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1437 pCtx->ADDR_rCX = --uCounterReg;
1438 cLeftPage--;
1439 }
1440 if (rcStrict != VINF_SUCCESS)
1441 {
1442 if (IOM_SUCCESS(rcStrict))
1443 {
1444 if (uCounterReg == 0)
1445 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1446 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1447 }
1448 return rcStrict;
1449 }
1450 } while ((int32_t)cLeftPage > 0);
1451 } while (uCounterReg != 0);
1452
1453 /*
1454 * Done.
1455 */
1456 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1457 return VINF_SUCCESS;
1458}
1459
1460#endif /* OP_SIZE != 64-bit */
1461
1462
1463#undef OP_rAX
1464#undef OP_SIZE
1465#undef ADDR_SIZE
1466#undef ADDR_rDI
1467#undef ADDR_rSI
1468#undef ADDR_rCX
1469#undef ADDR_rIP
1470#undef ADDR2_TYPE
1471#undef ADDR_TYPE
1472#undef ADDR2_TYPE
1473#undef IS_64_BIT_CODE
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette