VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 48126

Last change on this file since 48126 was 47740, checked in by vboxsync, 11 years ago

IEM: Implemented RF. Working on verfication against VT-x.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 52.9 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 47740 2013-08-14 19:55:03Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pIemCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pIemCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pIemCpu) (false)
62#endif
63
64
65/**
66 * Implements 'REPE CMPS'.
67 */
68IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
69{
70 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
71
72 /*
73 * Setup.
74 */
75 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
76 if (uCounterReg == 0)
77 {
78 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
79 return VINF_SUCCESS;
80 }
81
82 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
83 uint64_t uSrc1Base;
84 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
85 if (rcStrict != VINF_SUCCESS)
86 return rcStrict;
87
88 uint64_t uSrc2Base;
89 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
90 if (rcStrict != VINF_SUCCESS)
91 return rcStrict;
92
93 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
94 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
95 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
96 uint32_t uEFlags = pCtx->eflags.u;
97
98 /*
99 * The loop.
100 */
101 do
102 {
103 /*
104 * Do segmentation and virtual page stuff.
105 */
106 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
107 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
108 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
109 if (cLeftSrc1Page > uCounterReg)
110 cLeftSrc1Page = uCounterReg;
111 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
112 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
113
114 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
115 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
116 && ( IS_64_BIT_CODE(pIemCpu)
117 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
118 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
119 && uSrc2AddrReg < pCtx->es.u32Limit
120 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
121 )
122 )
123 {
124 RTGCPHYS GCPhysSrc1Mem;
125 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
126 if (rcStrict != VINF_SUCCESS)
127 return rcStrict;
128
129 RTGCPHYS GCPhysSrc2Mem;
130 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
131 if (rcStrict != VINF_SUCCESS)
132 return rcStrict;
133
134 /*
135 * If we can map the page without trouble, do a block processing
136 * until the end of the current page.
137 */
138 PGMPAGEMAPLOCK PgLockSrc2Mem;
139 OP_TYPE const *puSrc2Mem;
140 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
141 if (rcStrict == VINF_SUCCESS)
142 {
143 PGMPAGEMAPLOCK PgLockSrc1Mem;
144 OP_TYPE const *puSrc1Mem;
145 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
146 if (rcStrict == VINF_SUCCESS)
147 {
148 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
149 {
150 /* All matches, only compare the last itme to get the right eflags. */
151 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
152 uSrc1AddrReg += cLeftPage * cbIncr;
153 uSrc2AddrReg += cLeftPage * cbIncr;
154 uCounterReg -= cLeftPage;
155 }
156 else
157 {
158 /* Some mismatch, compare each item (and keep volatile
159 memory in mind). */
160 uint32_t off = 0;
161 do
162 {
163 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
164 off++;
165 } while ( off < cLeftPage
166 && (uEFlags & X86_EFL_ZF));
167 uSrc1AddrReg += cbIncr * off;
168 uSrc2AddrReg += cbIncr * off;
169 uCounterReg -= off;
170 }
171
172 /* Update the registers before looping. */
173 pCtx->ADDR_rCX = uCounterReg;
174 pCtx->ADDR_rSI = uSrc1AddrReg;
175 pCtx->ADDR_rDI = uSrc2AddrReg;
176 pCtx->eflags.u = uEFlags;
177
178 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
179 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
180 continue;
181 }
182 }
183 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
184 }
185
186 /*
187 * Fallback - slow processing till the end of the current page.
188 * In the cross page boundrary case we will end up here with cLeftPage
189 * as 0, we execute one loop then.
190 */
191 do
192 {
193 OP_TYPE uValue1;
194 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
195 if (rcStrict != VINF_SUCCESS)
196 return rcStrict;
197 OP_TYPE uValue2;
198 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
199 if (rcStrict != VINF_SUCCESS)
200 return rcStrict;
201 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
202
203 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
204 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
205 pCtx->ADDR_rCX = --uCounterReg;
206 pCtx->eflags.u = uEFlags;
207 cLeftPage--;
208 } while ( (int32_t)cLeftPage > 0
209 && (uEFlags & X86_EFL_ZF));
210 } while ( uCounterReg != 0
211 && (uEFlags & X86_EFL_ZF));
212
213 /*
214 * Done.
215 */
216 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * Implements 'REPNE CMPS'.
223 */
224IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
225{
226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
227
228 /*
229 * Setup.
230 */
231 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
232 if (uCounterReg == 0)
233 {
234 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
235 return VINF_SUCCESS;
236 }
237
238 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
239 uint64_t uSrc1Base;
240 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
241 if (rcStrict != VINF_SUCCESS)
242 return rcStrict;
243
244 uint64_t uSrc2Base;
245 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
246 if (rcStrict != VINF_SUCCESS)
247 return rcStrict;
248
249 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
250 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
251 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
252 uint32_t uEFlags = pCtx->eflags.u;
253
254 /*
255 * The loop.
256 */
257 do
258 {
259 /*
260 * Do segmentation and virtual page stuff.
261 */
262 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
263 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
264 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 if (cLeftSrc1Page > uCounterReg)
266 cLeftSrc1Page = uCounterReg;
267 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
268 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
269
270 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
271 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
272 && ( IS_64_BIT_CODE(pIemCpu)
273 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
274 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
275 && uSrc2AddrReg < pCtx->es.u32Limit
276 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
277 )
278 )
279 {
280 RTGCPHYS GCPhysSrc1Mem;
281 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
282 if (rcStrict != VINF_SUCCESS)
283 return rcStrict;
284
285 RTGCPHYS GCPhysSrc2Mem;
286 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
287 if (rcStrict != VINF_SUCCESS)
288 return rcStrict;
289
290 /*
291 * If we can map the page without trouble, do a block processing
292 * until the end of the current page.
293 */
294 OP_TYPE const *puSrc2Mem;
295 PGMPAGEMAPLOCK PgLockSrc2Mem;
296 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
297 if (rcStrict == VINF_SUCCESS)
298 {
299 OP_TYPE const *puSrc1Mem;
300 PGMPAGEMAPLOCK PgLockSrc1Mem;
301 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
305 {
306 /* All matches, only compare the last item to get the right eflags. */
307 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
308 uSrc1AddrReg += cLeftPage * cbIncr;
309 uSrc2AddrReg += cLeftPage * cbIncr;
310 uCounterReg -= cLeftPage;
311 }
312 else
313 {
314 /* Some mismatch, compare each item (and keep volatile
315 memory in mind). */
316 uint32_t off = 0;
317 do
318 {
319 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
320 off++;
321 } while ( off < cLeftPage
322 && !(uEFlags & X86_EFL_ZF));
323 uSrc1AddrReg += cbIncr * off;
324 uSrc2AddrReg += cbIncr * off;
325 uCounterReg -= off;
326 }
327
328 /* Update the registers before looping. */
329 pCtx->ADDR_rCX = uCounterReg;
330 pCtx->ADDR_rSI = uSrc1AddrReg;
331 pCtx->ADDR_rDI = uSrc2AddrReg;
332 pCtx->eflags.u = uEFlags;
333
334 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 continue;
337 }
338 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
339 }
340 }
341
342 /*
343 * Fallback - slow processing till the end of the current page.
344 * In the cross page boundrary case we will end up here with cLeftPage
345 * as 0, we execute one loop then.
346 */
347 do
348 {
349 OP_TYPE uValue1;
350 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
351 if (rcStrict != VINF_SUCCESS)
352 return rcStrict;
353 OP_TYPE uValue2;
354 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
355 if (rcStrict != VINF_SUCCESS)
356 return rcStrict;
357 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
358
359 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
360 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
361 pCtx->ADDR_rCX = --uCounterReg;
362 pCtx->eflags.u = uEFlags;
363 cLeftPage--;
364 } while ( (int32_t)cLeftPage > 0
365 && !(uEFlags & X86_EFL_ZF));
366 } while ( uCounterReg != 0
367 && !(uEFlags & X86_EFL_ZF));
368
369 /*
370 * Done.
371 */
372 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
373 return VINF_SUCCESS;
374}
375
376
377/**
378 * Implements 'REPE SCAS'.
379 */
380IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
381{
382 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
383
384 /*
385 * Setup.
386 */
387 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
388 if (uCounterReg == 0)
389 {
390 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
391 return VINF_SUCCESS;
392 }
393
394 uint64_t uBaseAddr;
395 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
400 OP_TYPE const uValueReg = pCtx->OP_rAX;
401 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
402 uint32_t uEFlags = pCtx->eflags.u;
403
404 /*
405 * The loop.
406 */
407 do
408 {
409 /*
410 * Do segmentation and virtual page stuff.
411 */
412 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418 && ( IS_64_BIT_CODE(pIemCpu)
419 || ( uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
421 )
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 uint64_t uBaseAddr;
517 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
518 if (rcStrict != VINF_SUCCESS)
519 return rcStrict;
520
521 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
522 OP_TYPE const uValueReg = pCtx->OP_rAX;
523 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
524 uint32_t uEFlags = pCtx->eflags.u;
525
526 /*
527 * The loop.
528 */
529 do
530 {
531 /*
532 * Do segmentation and virtual page stuff.
533 */
534 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
535 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
536 if (cLeftPage > uCounterReg)
537 cLeftPage = uCounterReg;
538 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
539 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
540 && ( IS_64_BIT_CODE(pIemCpu)
541 || ( uAddrReg < pCtx->es.u32Limit
542 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
543 )
544 )
545 {
546 RTGCPHYS GCPhysMem;
547 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
548 if (rcStrict != VINF_SUCCESS)
549 return rcStrict;
550
551 /*
552 * If we can map the page without trouble, do a block processing
553 * until the end of the current page.
554 */
555 PGMPAGEMAPLOCK PgLockMem;
556 OP_TYPE const *puMem;
557 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
558 if (rcStrict == VINF_SUCCESS)
559 {
560 /* Search till we find a mismatching item. */
561 OP_TYPE uTmpValue;
562 bool fQuit;
563 uint32_t i = 0;
564 do
565 {
566 uTmpValue = puMem[i++];
567 fQuit = uTmpValue == uValueReg;
568 } while (i < cLeftPage && !fQuit);
569
570 /* Update the regs. */
571 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
572 pCtx->ADDR_rCX = uCounterReg -= i;
573 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
574 pCtx->eflags.u = uEFlags;
575 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
576 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
577 if (fQuit)
578 break;
579
580
581 /* If unaligned, we drop thru and do the page crossing access
582 below. Otherwise, do the next page. */
583 if (!(uVirtAddr & (OP_SIZE - 1)))
584 continue;
585 if (uCounterReg == 0)
586 break;
587 cLeftPage = 0;
588 }
589 }
590
591 /*
592 * Fallback - slow processing till the end of the current page.
593 * In the cross page boundrary case we will end up here with cLeftPage
594 * as 0, we execute one loop then.
595 */
596 do
597 {
598 OP_TYPE uTmpValue;
599 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
600 if (rcStrict != VINF_SUCCESS)
601 return rcStrict;
602 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
603 pCtx->ADDR_rDI = uAddrReg += cbIncr;
604 pCtx->ADDR_rCX = --uCounterReg;
605 pCtx->eflags.u = uEFlags;
606 cLeftPage--;
607 } while ( (int32_t)cLeftPage > 0
608 && !(uEFlags & X86_EFL_ZF));
609 } while ( uCounterReg != 0
610 && !(uEFlags & X86_EFL_ZF));
611
612 /*
613 * Done.
614 */
615 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620
621
622/**
623 * Implements 'REP MOVS'.
624 */
625IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
626{
627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
628
629 /*
630 * Setup.
631 */
632 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
633 if (uCounterReg == 0)
634 {
635 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
636 return VINF_SUCCESS;
637 }
638
639 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
640 uint64_t uSrcBase;
641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 uint64_t uDstBase;
646 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase);
647 if (rcStrict != VINF_SUCCESS)
648 return rcStrict;
649
650 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
651 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
652 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
653
654 /*
655 * Be careful with handle bypassing.
656 */
657 if (pIemCpu->fBypassHandlers)
658 {
659 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
660 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
661 }
662
663 /*
664 * If we're reading back what we write, we have to let the verfication code
665 * to prevent a false positive.
666 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
667 */
668#ifdef IEM_VERIFICATION_MODE_FULL
669 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
670 && (cbIncr > 0
671 ? uSrcAddrReg <= uDstAddrReg
672 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
673 : uDstAddrReg <= uSrcAddrReg
674 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
675 pIemCpu->fOverlappingMovs = true;
676#endif
677
678 /*
679 * The loop.
680 */
681 do
682 {
683 /*
684 * Do segmentation and virtual page stuff.
685 */
686 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
687 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
688 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
689 if (cLeftSrcPage > uCounterReg)
690 cLeftSrcPage = uCounterReg;
691 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
692 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
693
694 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
695 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
696 && ( IS_64_BIT_CODE(pIemCpu)
697 || ( uSrcAddrReg < pSrcHid->u32Limit
698 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
699 && uDstAddrReg < pCtx->es.u32Limit
700 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
701 )
702 )
703 {
704 RTGCPHYS GCPhysSrcMem;
705 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
706 if (rcStrict != VINF_SUCCESS)
707 return rcStrict;
708
709 RTGCPHYS GCPhysDstMem;
710 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
711 if (rcStrict != VINF_SUCCESS)
712 return rcStrict;
713
714 /*
715 * If we can map the page without trouble, do a block processing
716 * until the end of the current page.
717 */
718 PGMPAGEMAPLOCK PgLockDstMem;
719 OP_TYPE *puDstMem;
720 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
721 if (rcStrict == VINF_SUCCESS)
722 {
723 PGMPAGEMAPLOCK PgLockSrcMem;
724 OP_TYPE const *puSrcMem;
725 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
726 if (rcStrict == VINF_SUCCESS)
727 {
728 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
729 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
730
731 /* Perform the operation exactly (don't use memcpy to avoid
732 having to consider how its implementation would affect
733 any overlapping source and destination area). */
734 OP_TYPE const *puSrcCur = puSrcMem;
735 OP_TYPE *puDstCur = puDstMem;
736 uint32_t cTodo = cLeftPage;
737 while (cTodo-- > 0)
738 *puDstCur++ = *puSrcCur++;
739
740 /* Update the registers. */
741 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
742 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
743 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
744
745 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
746 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
747 continue;
748 }
749 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
750 }
751 }
752
753 /*
754 * Fallback - slow processing till the end of the current page.
755 * In the cross page boundrary case we will end up here with cLeftPage
756 * as 0, we execute one loop then.
757 */
758 do
759 {
760 OP_TYPE uValue;
761 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
762 if (rcStrict != VINF_SUCCESS)
763 return rcStrict;
764 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767
768 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
769 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
770 pCtx->ADDR_rCX = --uCounterReg;
771 cLeftPage--;
772 } while ((int32_t)cLeftPage > 0);
773 } while (uCounterReg != 0);
774
775 /*
776 * Done.
777 */
778 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
779 return VINF_SUCCESS;
780}
781
782
783/**
784 * Implements 'REP STOS'.
785 */
786IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
787{
788 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
789
790 /*
791 * Setup.
792 */
793 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
794 if (uCounterReg == 0)
795 {
796 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
797 return VINF_SUCCESS;
798 }
799
800 uint64_t uBaseAddr;
801 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
802 if (rcStrict != VINF_SUCCESS)
803 return rcStrict;
804
805 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
806 OP_TYPE const uValue = pCtx->OP_rAX;
807 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
808
809 /*
810 * Be careful with handle bypassing.
811 */
812 /** @todo Permit doing a page if correctly aligned. */
813 if (pIemCpu->fBypassHandlers)
814 {
815 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
816 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
817 }
818
819 /*
820 * The loop.
821 */
822 do
823 {
824 /*
825 * Do segmentation and virtual page stuff.
826 */
827 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
828 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
829 if (cLeftPage > uCounterReg)
830 cLeftPage = uCounterReg;
831 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
832 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
833 && ( IS_64_BIT_CODE(pIemCpu)
834 || ( uAddrReg < pCtx->es.u32Limit
835 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
836 )
837 )
838 {
839 RTGCPHYS GCPhysMem;
840 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
841 if (rcStrict != VINF_SUCCESS)
842 return rcStrict;
843
844 /*
845 * If we can map the page without trouble, do a block processing
846 * until the end of the current page.
847 */
848 PGMPAGEMAPLOCK PgLockMem;
849 OP_TYPE *puMem;
850 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
851 if (rcStrict == VINF_SUCCESS)
852 {
853 /* Update the regs first so we can loop on cLeftPage. */
854 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
855 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
856
857 /* Do the memsetting. */
858#if OP_SIZE == 8
859 memset(puMem, uValue, cLeftPage);
860/*#elif OP_SIZE == 32
861 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
862#else
863 while (cLeftPage-- > 0)
864 *puMem++ = uValue;
865#endif
866
867 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
868
869 /* If unaligned, we drop thru and do the page crossing access
870 below. Otherwise, do the next page. */
871 if (!(uVirtAddr & (OP_SIZE - 1)))
872 continue;
873 if (uCounterReg == 0)
874 break;
875 cLeftPage = 0;
876 }
877 }
878
879 /*
880 * Fallback - slow processing till the end of the current page.
881 * In the cross page boundrary case we will end up here with cLeftPage
882 * as 0, we execute one loop then.
883 */
884 do
885 {
886 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
887 if (rcStrict != VINF_SUCCESS)
888 return rcStrict;
889 pCtx->ADDR_rDI = uAddrReg += cbIncr;
890 pCtx->ADDR_rCX = --uCounterReg;
891 cLeftPage--;
892 } while ((int32_t)cLeftPage > 0);
893 } while (uCounterReg != 0);
894
895 /*
896 * Done.
897 */
898 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Implements 'REP LODS'.
905 */
906IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
907{
908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
909
910 /*
911 * Setup.
912 */
913 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
914 if (uCounterReg == 0)
915 {
916 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
917 return VINF_SUCCESS;
918 }
919
920 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
921 uint64_t uBaseAddr;
922 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
923 if (rcStrict != VINF_SUCCESS)
924 return rcStrict;
925
926 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
927 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
928
929 /*
930 * The loop.
931 */
932 do
933 {
934 /*
935 * Do segmentation and virtual page stuff.
936 */
937 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
938 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
939 if (cLeftPage > uCounterReg)
940 cLeftPage = uCounterReg;
941 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
942 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
943 && ( IS_64_BIT_CODE(pIemCpu)
944 || ( uAddrReg < pSrcHid->u32Limit
945 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
946 )
947 )
948 {
949 RTGCPHYS GCPhysMem;
950 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
951 if (rcStrict != VINF_SUCCESS)
952 return rcStrict;
953
954 /*
955 * If we can map the page without trouble, we can get away with
956 * just reading the last value on the page.
957 */
958 PGMPAGEMAPLOCK PgLockMem;
959 OP_TYPE const *puMem;
960 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
961 if (rcStrict == VINF_SUCCESS)
962 {
963 /* Only get the last byte, the rest doesn't matter in direct access mode. */
964#if OP_SIZE == 32
965 pCtx->rax = puMem[cLeftPage - 1];
966#else
967 pCtx->OP_rAX = puMem[cLeftPage - 1];
968#endif
969 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
970 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
971 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
972
973 /* If unaligned, we drop thru and do the page crossing access
974 below. Otherwise, do the next page. */
975 if (!(uVirtAddr & (OP_SIZE - 1)))
976 continue;
977 if (uCounterReg == 0)
978 break;
979 cLeftPage = 0;
980 }
981 }
982
983 /*
984 * Fallback - slow processing till the end of the current page.
985 * In the cross page boundrary case we will end up here with cLeftPage
986 * as 0, we execute one loop then.
987 */
988 do
989 {
990 OP_TYPE uTmpValue;
991 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
992 if (rcStrict != VINF_SUCCESS)
993 return rcStrict;
994#if OP_SIZE == 32
995 pCtx->rax = uTmpValue;
996#else
997 pCtx->OP_rAX = uTmpValue;
998#endif
999 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1000 pCtx->ADDR_rCX = --uCounterReg;
1001 cLeftPage--;
1002 } while ((int32_t)cLeftPage > 0);
1003 if (rcStrict != VINF_SUCCESS)
1004 break;
1005 } while (uCounterReg != 0);
1006
1007 /*
1008 * Done.
1009 */
1010 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1011 return VINF_SUCCESS;
1012}
1013
1014
1015#if OP_SIZE != 64
1016
1017/**
1018 * Implements 'INS' (no rep)
1019 */
1020IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1021{
1022 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1023 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1024 VBOXSTRICTRC rcStrict;
1025
1026 /*
1027 * Be careful with handle bypassing.
1028 */
1029 if (pIemCpu->fBypassHandlers)
1030 {
1031 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1032 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1033 }
1034
1035 /*
1036 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1037 * segmentation and finally any #PF due to virtual address translation.
1038 * ASSUMES nothing is read from the I/O port before traps are taken.
1039 */
1040 if (!fIoChecked)
1041 {
1042 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1043 if (rcStrict != VINF_SUCCESS)
1044 return rcStrict;
1045 }
1046
1047 OP_TYPE *puMem;
1048 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1049 if (rcStrict != VINF_SUCCESS)
1050 return rcStrict;
1051
1052 uint32_t u32Value = 0;
1053 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1054 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1055 else
1056 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1057 if (IOM_SUCCESS(rcStrict))
1058 {
1059 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1060 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1061 {
1062 if (!pCtx->eflags.Bits.u1DF)
1063 pCtx->ADDR_rDI += OP_SIZE / 8;
1064 else
1065 pCtx->ADDR_rDI -= OP_SIZE / 8;
1066 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1067 }
1068 /* iemMemMap already check permissions, so this may only be real errors
1069 or access handlers medling. The access handler case is going to
1070 cause misbehavior if the instruction is re-interpreted or smth. So,
1071 we fail with an internal error here instead. */
1072 else
1073 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1074 }
1075 return rcStrict;
1076}
1077
1078
1079/**
1080 * Implements 'REP INS'.
1081 */
1082IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1083{
1084 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1085 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1086 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1087
1088 /*
1089 * Setup.
1090 */
1091 uint16_t const u16Port = pCtx->dx;
1092 VBOXSTRICTRC rcStrict;
1093 if (!fIoChecked)
1094 {
1095 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1096 if (rcStrict != VINF_SUCCESS)
1097 return rcStrict;
1098 }
1099
1100 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1101 if (uCounterReg == 0)
1102 {
1103 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1104 return VINF_SUCCESS;
1105 }
1106
1107 uint64_t uBaseAddr;
1108 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
1109 if (rcStrict != VINF_SUCCESS)
1110 return rcStrict;
1111
1112 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1113 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1114
1115 /*
1116 * Be careful with handle bypassing.
1117 */
1118 if (pIemCpu->fBypassHandlers)
1119 {
1120 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1121 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1122 }
1123
1124 /*
1125 * The loop.
1126 */
1127 do
1128 {
1129 /*
1130 * Do segmentation and virtual page stuff.
1131 */
1132 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1133 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1134 if (cLeftPage > uCounterReg)
1135 cLeftPage = uCounterReg;
1136 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1137 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1138 && ( IS_64_BIT_CODE(pIemCpu)
1139 || ( uAddrReg < pCtx->es.u32Limit
1140 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1141 )
1142 )
1143 {
1144 RTGCPHYS GCPhysMem;
1145 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1146 if (rcStrict != VINF_SUCCESS)
1147 return rcStrict;
1148
1149 /*
1150 * If we can map the page without trouble, we would've liked to use
1151 * an string I/O method to do the work, but the current IOM
1152 * interface doesn't match our current approach. So, do a regular
1153 * loop instead.
1154 */
1155 /** @todo Change the I/O manager interface to make use of
1156 * mapped buffers instead of leaving those bits to the
1157 * device implementation! */
1158 PGMPAGEMAPLOCK PgLockMem;
1159 OP_TYPE *puMem;
1160 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1161 if (rcStrict == VINF_SUCCESS)
1162 {
1163 uint32_t off = 0;
1164 while (off < cLeftPage)
1165 {
1166 uint32_t u32Value;
1167 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1168 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1169 else
1170 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1171 if (IOM_SUCCESS(rcStrict))
1172 {
1173 puMem[off] = (OP_TYPE)u32Value;
1174 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1175 pCtx->ADDR_rCX = --uCounterReg;
1176 }
1177 if (rcStrict != VINF_SUCCESS)
1178 {
1179 if (IOM_SUCCESS(rcStrict))
1180 {
1181 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1182 if (uCounterReg == 0)
1183 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1184 }
1185 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1186 return rcStrict;
1187 }
1188 off++;
1189 }
1190 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1191
1192 /* If unaligned, we drop thru and do the page crossing access
1193 below. Otherwise, do the next page. */
1194 if (!(uVirtAddr & (OP_SIZE - 1)))
1195 continue;
1196 if (uCounterReg == 0)
1197 break;
1198 cLeftPage = 0;
1199 }
1200 }
1201
1202 /*
1203 * Fallback - slow processing till the end of the current page.
1204 * In the cross page boundrary case we will end up here with cLeftPage
1205 * as 0, we execute one loop then.
1206 *
1207 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1208 * I/O port, otherwise it wouldn't really be restartable.
1209 */
1210 /** @todo investigate what the CPU actually does with \#PF/\#GP
1211 * during INS. */
1212 do
1213 {
1214 OP_TYPE *puMem;
1215 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1216 if (rcStrict != VINF_SUCCESS)
1217 return rcStrict;
1218
1219 uint32_t u32Value = 0;
1220 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1221 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1222 else
1223 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1224 if (!IOM_SUCCESS(rcStrict))
1225 return rcStrict;
1226
1227 *puMem = (OP_TYPE)u32Value;
1228 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1229 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1230
1231 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1232 pCtx->ADDR_rCX = --uCounterReg;
1233
1234 cLeftPage--;
1235 if (rcStrict != VINF_SUCCESS)
1236 {
1237 if (uCounterReg == 0)
1238 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1239 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1240 return rcStrict;
1241 }
1242 } while ((int32_t)cLeftPage > 0);
1243 } while (uCounterReg != 0);
1244
1245 /*
1246 * Done.
1247 */
1248 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1249 return VINF_SUCCESS;
1250}
1251
1252
1253/**
1254 * Implements 'OUTS' (no rep)
1255 */
1256IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1257{
1258 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1259 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1260 VBOXSTRICTRC rcStrict;
1261
1262 /*
1263 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1264 * segmentation and finally any #PF due to virtual address translation.
1265 * ASSUMES nothing is read from the I/O port before traps are taken.
1266 */
1267 if (!fIoChecked)
1268 {
1269 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1270 if (rcStrict != VINF_SUCCESS)
1271 return rcStrict;
1272 }
1273
1274 OP_TYPE uValue;
1275 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1276 if (rcStrict == VINF_SUCCESS)
1277 {
1278 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1279 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1280 else
1281 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1282 if (IOM_SUCCESS(rcStrict))
1283 {
1284 if (!pCtx->eflags.Bits.u1DF)
1285 pCtx->ADDR_rSI += OP_SIZE / 8;
1286 else
1287 pCtx->ADDR_rSI -= OP_SIZE / 8;
1288 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1289 if (rcStrict != VINF_SUCCESS)
1290 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1291 }
1292 }
1293 return rcStrict;
1294}
1295
1296
1297/**
1298 * Implements 'REP OUTS'.
1299 */
1300IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1301{
1302 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1303 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1304 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1305
1306 /*
1307 * Setup.
1308 */
1309 uint16_t const u16Port = pCtx->dx;
1310 VBOXSTRICTRC rcStrict;
1311 if (!fIoChecked)
1312 {
1313 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1314 if (rcStrict != VINF_SUCCESS)
1315 return rcStrict;
1316 }
1317
1318 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1319 if (uCounterReg == 0)
1320 {
1321 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1322 return VINF_SUCCESS;
1323 }
1324
1325 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1326 uint64_t uBaseAddr;
1327 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1328 if (rcStrict != VINF_SUCCESS)
1329 return rcStrict;
1330
1331 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1332 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1333
1334 /*
1335 * The loop.
1336 */
1337 do
1338 {
1339 /*
1340 * Do segmentation and virtual page stuff.
1341 */
1342 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1343 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1344 if (cLeftPage > uCounterReg)
1345 cLeftPage = uCounterReg;
1346 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1347 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1348 && ( IS_64_BIT_CODE(pIemCpu)
1349 || ( uAddrReg < pHid->u32Limit
1350 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1351 )
1352 )
1353 {
1354 RTGCPHYS GCPhysMem;
1355 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1356 if (rcStrict != VINF_SUCCESS)
1357 return rcStrict;
1358
1359 /*
1360 * If we can map the page without trouble, we would've liked to use
1361 * an string I/O method to do the work, but the current IOM
1362 * interface doesn't match our current approach. So, do a regular
1363 * loop instead.
1364 */
1365 /** @todo Change the I/O manager interface to make use of
1366 * mapped buffers instead of leaving those bits to the
1367 * device implementation? */
1368 PGMPAGEMAPLOCK PgLockMem;
1369 OP_TYPE const *puMem;
1370 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1371 if (rcStrict == VINF_SUCCESS)
1372 {
1373 uint32_t off = 0;
1374 while (off < cLeftPage)
1375 {
1376 uint32_t u32Value = *puMem++;
1377 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1378 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, OP_SIZE / 8);
1379 else
1380 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1381 if (IOM_SUCCESS(rcStrict))
1382 {
1383 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1384 pCtx->ADDR_rCX = --uCounterReg;
1385 }
1386 if (rcStrict != VINF_SUCCESS)
1387 {
1388 if (IOM_SUCCESS(rcStrict))
1389 {
1390 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1391 if (uCounterReg == 0)
1392 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1393 }
1394 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1395 return rcStrict;
1396 }
1397 off++;
1398 }
1399 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1400
1401 /* If unaligned, we drop thru and do the page crossing access
1402 below. Otherwise, do the next page. */
1403 if (!(uVirtAddr & (OP_SIZE - 1)))
1404 continue;
1405 if (uCounterReg == 0)
1406 break;
1407 cLeftPage = 0;
1408 }
1409 }
1410
1411 /*
1412 * Fallback - slow processing till the end of the current page.
1413 * In the cross page boundrary case we will end up here with cLeftPage
1414 * as 0, we execute one loop then.
1415 *
1416 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1417 * I/O port, otherwise it wouldn't really be restartable.
1418 */
1419 /** @todo investigate what the CPU actually does with \#PF/\#GP
1420 * during INS. */
1421 do
1422 {
1423 OP_TYPE uValue;
1424 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1425 if (rcStrict != VINF_SUCCESS)
1426 return rcStrict;
1427
1428 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1429 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1430 else
1431 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1432 if (IOM_SUCCESS(rcStrict))
1433 {
1434 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1435 pCtx->ADDR_rCX = --uCounterReg;
1436 cLeftPage--;
1437 }
1438 if (rcStrict != VINF_SUCCESS)
1439 {
1440 if (IOM_SUCCESS(rcStrict))
1441 {
1442 if (uCounterReg == 0)
1443 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1444 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1445 }
1446 return rcStrict;
1447 }
1448 } while ((int32_t)cLeftPage > 0);
1449 } while (uCounterReg != 0);
1450
1451 /*
1452 * Done.
1453 */
1454 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1455 return VINF_SUCCESS;
1456}
1457
1458#endif /* OP_SIZE != 64-bit */
1459
1460
1461#undef OP_rAX
1462#undef OP_SIZE
1463#undef ADDR_SIZE
1464#undef ADDR_rDI
1465#undef ADDR_rSI
1466#undef ADDR_rCX
1467#undef ADDR_rIP
1468#undef ADDR2_TYPE
1469#undef ADDR_TYPE
1470#undef ADDR2_TYPE
1471#undef IS_64_BIT_CODE
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette