VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 73912

Last change on this file since 73912 was 72712, checked in by vboxsync, 6 years ago

IEM: Fixed regression in IEMExecStringIoRead from r123057 where writes postponed to ring-3 would accidentally be rolled back.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 63.4 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 72712 2018-06-28 08:47:47Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pVCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pVCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pVCpu) ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pVCpu) (false)
62#endif
63
64/** @def IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
65 * Used in the outer (page-by-page) loop to check for reasons for returnning
66 * before completing the instruction. In raw-mode we temporarily enable
67 * interrupts to let the host interrupt us. We cannot let big string operations
68 * hog the CPU, especially not in raw-mode.
69 */
70#ifdef IN_RC
71# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
72 do { \
73 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
74 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
75 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) \
76 )) \
77 { \
78 RTCCUINTREG fSavedFlags = ASMGetFlags(); \
79 if (!(fSavedFlags & X86_EFL_IF)) \
80 { \
81 ASMSetFlags(fSavedFlags | X86_EFL_IF); \
82 ASMNopPause(); \
83 ASMSetFlags(fSavedFlags); \
84 } \
85 } \
86 else \
87 { \
88 LogFlow(("%s: Leaving early (outer)! ffcpu=%#x ffvm=%#x\n", \
89 __FUNCTION__, (a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
90 return VINF_SUCCESS; \
91 } \
92 } while (0)
93#else
94# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
95 do { \
96 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
97 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
98 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) \
99 )) \
100 { /* probable */ } \
101 else \
102 { \
103 LogFlow(("%s: Leaving early (outer)! ffcpu=%#x ffvm=%#x\n", \
104 __FUNCTION__, (a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
105 return VINF_SUCCESS; \
106 } \
107 } while (0)
108#endif
109
110/** @def IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
111 * This is used in some of the inner loops to make sure we respond immediately
112 * to VMCPU_FF_IOM as well as outside requests. Use this for expensive
113 * instructions. Use IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN for
114 * ones that are typically cheap. */
115#define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
116 do { \
117 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
118 && !VM_FF_IS_PENDING(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \
119 || (a_fExitExpr) )) \
120 { /* very likely */ } \
121 else \
122 { \
123 LogFlow(("%s: Leaving early (inner)! ffcpu=%#x ffvm=%#x\n", \
124 __FUNCTION__, (a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
125 return VINF_SUCCESS; \
126 } \
127 } while (0)
128
129
130/** @def IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
131 * This is used in the inner loops where
132 * IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN isn't used. It only
133 * checks the CPU FFs so that we respond immediately to the pending IOM FF
134 * (status code is hidden in IEMCPU::rcPassUp by IEM memory commit code).
135 */
136#define IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
137 do { \
138 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
139 || (a_fExitExpr) )) \
140 { /* very likely */ } \
141 else \
142 { \
143 LogFlow(("%s: Leaving early (inner)! ffcpu=%#x (ffvm=%#x)\n", \
144 __FUNCTION__, (a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
145 return VINF_SUCCESS; \
146 } \
147 } while (0)
148
149
150/**
151 * Implements 'REPE CMPS'.
152 */
153IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
154{
155 PVM pVM = pVCpu->CTX_SUFF(pVM);
156
157 /*
158 * Setup.
159 */
160 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
161 if (uCounterReg == 0)
162 {
163 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
164 return VINF_SUCCESS;
165 }
166
167 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
168
169 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
170 uint64_t uSrc1Base;
171 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
172 if (rcStrict != VINF_SUCCESS)
173 return rcStrict;
174
175 uint64_t uSrc2Base;
176 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
177 if (rcStrict != VINF_SUCCESS)
178 return rcStrict;
179
180 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
181 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
182 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
183 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
184
185 /*
186 * The loop.
187 */
188 for (;;)
189 {
190 /*
191 * Do segmentation and virtual page stuff.
192 */
193 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
194 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
195 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
196 if (cLeftSrc1Page > uCounterReg)
197 cLeftSrc1Page = uCounterReg;
198 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
199 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
200
201 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
202 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
203 && ( IS_64_BIT_CODE(pVCpu)
204 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
205 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
206 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
207 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
208 )
209 )
210 {
211 RTGCPHYS GCPhysSrc1Mem;
212 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
213 if (rcStrict != VINF_SUCCESS)
214 return rcStrict;
215
216 RTGCPHYS GCPhysSrc2Mem;
217 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
218 if (rcStrict != VINF_SUCCESS)
219 return rcStrict;
220
221 /*
222 * If we can map the page without trouble, do a block processing
223 * until the end of the current page.
224 */
225 PGMPAGEMAPLOCK PgLockSrc2Mem;
226 OP_TYPE const *puSrc2Mem;
227 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
228 if (rcStrict == VINF_SUCCESS)
229 {
230 PGMPAGEMAPLOCK PgLockSrc1Mem;
231 OP_TYPE const *puSrc1Mem;
232 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
233 if (rcStrict == VINF_SUCCESS)
234 {
235 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
236 {
237 /* All matches, only compare the last itme to get the right eflags. */
238 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
239 uSrc1AddrReg += cLeftPage * cbIncr;
240 uSrc2AddrReg += cLeftPage * cbIncr;
241 uCounterReg -= cLeftPage;
242 }
243 else
244 {
245 /* Some mismatch, compare each item (and keep volatile
246 memory in mind). */
247 uint32_t off = 0;
248 do
249 {
250 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
251 off++;
252 } while ( off < cLeftPage
253 && (uEFlags & X86_EFL_ZF));
254 uSrc1AddrReg += cbIncr * off;
255 uSrc2AddrReg += cbIncr * off;
256 uCounterReg -= off;
257 }
258
259 /* Update the registers before looping. */
260 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
261 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
262 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
263 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
264
265 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
266 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
267 if ( uCounterReg == 0
268 || !(uEFlags & X86_EFL_ZF))
269 break;
270 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
271 continue;
272 }
273 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
274 }
275 }
276
277 /*
278 * Fallback - slow processing till the end of the current page.
279 * In the cross page boundrary case we will end up here with cLeftPage
280 * as 0, we execute one loop then.
281 */
282 do
283 {
284 OP_TYPE uValue1;
285 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
286 if (rcStrict != VINF_SUCCESS)
287 return rcStrict;
288 OP_TYPE uValue2;
289 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
290 if (rcStrict != VINF_SUCCESS)
291 return rcStrict;
292 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
293
294 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
295 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
296 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
297 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
298 cLeftPage--;
299 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
300 } while ( (int32_t)cLeftPage > 0
301 && (uEFlags & X86_EFL_ZF));
302
303 /*
304 * Next page? Must check for interrupts and stuff here.
305 */
306 if ( uCounterReg == 0
307 || !(uEFlags & X86_EFL_ZF))
308 break;
309 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
310 }
311
312 /*
313 * Done.
314 */
315 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
316 return VINF_SUCCESS;
317}
318
319
320/**
321 * Implements 'REPNE CMPS'.
322 */
323IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
324{
325 PVM pVM = pVCpu->CTX_SUFF(pVM);
326
327 /*
328 * Setup.
329 */
330 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
331 if (uCounterReg == 0)
332 {
333 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
334 return VINF_SUCCESS;
335 }
336
337 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
338
339 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
340 uint64_t uSrc1Base;
341 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
342 if (rcStrict != VINF_SUCCESS)
343 return rcStrict;
344
345 uint64_t uSrc2Base;
346 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
347 if (rcStrict != VINF_SUCCESS)
348 return rcStrict;
349
350 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
351 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
352 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
353 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
354
355 /*
356 * The loop.
357 */
358 for (;;)
359 {
360 /*
361 * Do segmentation and virtual page stuff.
362 */
363 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
364 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
365 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
366 if (cLeftSrc1Page > uCounterReg)
367 cLeftSrc1Page = uCounterReg;
368 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
369 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
370
371 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
372 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
373 && ( IS_64_BIT_CODE(pVCpu)
374 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
375 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
376 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
377 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
378 )
379 )
380 {
381 RTGCPHYS GCPhysSrc1Mem;
382 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
383 if (rcStrict != VINF_SUCCESS)
384 return rcStrict;
385
386 RTGCPHYS GCPhysSrc2Mem;
387 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
388 if (rcStrict != VINF_SUCCESS)
389 return rcStrict;
390
391 /*
392 * If we can map the page without trouble, do a block processing
393 * until the end of the current page.
394 */
395 OP_TYPE const *puSrc2Mem;
396 PGMPAGEMAPLOCK PgLockSrc2Mem;
397 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
398 if (rcStrict == VINF_SUCCESS)
399 {
400 OP_TYPE const *puSrc1Mem;
401 PGMPAGEMAPLOCK PgLockSrc1Mem;
402 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
403 if (rcStrict == VINF_SUCCESS)
404 {
405 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
406 {
407 /* All matches, only compare the last item to get the right eflags. */
408 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
409 uSrc1AddrReg += cLeftPage * cbIncr;
410 uSrc2AddrReg += cLeftPage * cbIncr;
411 uCounterReg -= cLeftPage;
412 }
413 else
414 {
415 /* Some mismatch, compare each item (and keep volatile
416 memory in mind). */
417 uint32_t off = 0;
418 do
419 {
420 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
421 off++;
422 } while ( off < cLeftPage
423 && !(uEFlags & X86_EFL_ZF));
424 uSrc1AddrReg += cbIncr * off;
425 uSrc2AddrReg += cbIncr * off;
426 uCounterReg -= off;
427 }
428
429 /* Update the registers before looping. */
430 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
431 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
432 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
433 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
434
435 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
436 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
437 if ( uCounterReg == 0
438 || (uEFlags & X86_EFL_ZF))
439 break;
440 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
441 continue;
442 }
443 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
444 }
445 }
446
447 /*
448 * Fallback - slow processing till the end of the current page.
449 * In the cross page boundrary case we will end up here with cLeftPage
450 * as 0, we execute one loop then.
451 */
452 do
453 {
454 OP_TYPE uValue1;
455 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
456 if (rcStrict != VINF_SUCCESS)
457 return rcStrict;
458 OP_TYPE uValue2;
459 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
460 if (rcStrict != VINF_SUCCESS)
461 return rcStrict;
462 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
463
464 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
465 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
466 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
467 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
468 cLeftPage--;
469 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
470 } while ( (int32_t)cLeftPage > 0
471 && !(uEFlags & X86_EFL_ZF));
472
473 /*
474 * Next page? Must check for interrupts and stuff here.
475 */
476 if ( uCounterReg == 0
477 || (uEFlags & X86_EFL_ZF))
478 break;
479 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
480 }
481
482 /*
483 * Done.
484 */
485 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
486 return VINF_SUCCESS;
487}
488
489
490/**
491 * Implements 'REPE SCAS'.
492 */
493IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
494{
495 PVM pVM = pVCpu->CTX_SUFF(pVM);
496
497 /*
498 * Setup.
499 */
500 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
501 if (uCounterReg == 0)
502 {
503 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
504 return VINF_SUCCESS;
505 }
506
507 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
508 uint64_t uBaseAddr;
509 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
510 if (rcStrict != VINF_SUCCESS)
511 return rcStrict;
512
513 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
514 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
515 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
516 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
517
518 /*
519 * The loop.
520 */
521 for (;;)
522 {
523 /*
524 * Do segmentation and virtual page stuff.
525 */
526 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
527 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
528 if (cLeftPage > uCounterReg)
529 cLeftPage = uCounterReg;
530 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
531 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
532 && ( IS_64_BIT_CODE(pVCpu)
533 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
534 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
535 )
536 )
537 {
538 RTGCPHYS GCPhysMem;
539 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
540 if (rcStrict != VINF_SUCCESS)
541 return rcStrict;
542
543 /*
544 * If we can map the page without trouble, do a block processing
545 * until the end of the current page.
546 */
547 PGMPAGEMAPLOCK PgLockMem;
548 OP_TYPE const *puMem;
549 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
550 if (rcStrict == VINF_SUCCESS)
551 {
552 /* Search till we find a mismatching item. */
553 OP_TYPE uTmpValue;
554 bool fQuit;
555 uint32_t i = 0;
556 do
557 {
558 uTmpValue = puMem[i++];
559 fQuit = uTmpValue != uValueReg;
560 } while (i < cLeftPage && !fQuit);
561
562 /* Update the regs. */
563 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
564 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
565 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
566 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
567 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
568 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
569 if ( fQuit
570 || uCounterReg == 0)
571 break;
572
573 /* If unaligned, we drop thru and do the page crossing access
574 below. Otherwise, do the next page. */
575 if (!(uVirtAddr & (OP_SIZE - 1)))
576 {
577 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
578 continue;
579 }
580 cLeftPage = 0;
581 }
582 }
583
584 /*
585 * Fallback - slow processing till the end of the current page.
586 * In the cross page boundrary case we will end up here with cLeftPage
587 * as 0, we execute one loop then.
588 */
589 do
590 {
591 OP_TYPE uTmpValue;
592 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
593 if (rcStrict != VINF_SUCCESS)
594 return rcStrict;
595 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
596
597 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
598 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
599 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
600 cLeftPage--;
601 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
602 } while ( (int32_t)cLeftPage > 0
603 && (uEFlags & X86_EFL_ZF));
604
605 /*
606 * Next page? Must check for interrupts and stuff here.
607 */
608 if ( uCounterReg == 0
609 || !(uEFlags & X86_EFL_ZF))
610 break;
611 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
612 }
613
614 /*
615 * Done.
616 */
617 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
618 return VINF_SUCCESS;
619}
620
621
622/**
623 * Implements 'REPNE SCAS'.
624 */
625IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
626{
627 PVM pVM = pVCpu->CTX_SUFF(pVM);
628
629 /*
630 * Setup.
631 */
632 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
633 if (uCounterReg == 0)
634 {
635 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
636 return VINF_SUCCESS;
637 }
638
639 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
640 uint64_t uBaseAddr;
641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
646 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
647 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
648 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
649
650 /*
651 * The loop.
652 */
653 for (;;)
654 {
655 /*
656 * Do segmentation and virtual page stuff.
657 */
658 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
659 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
660 if (cLeftPage > uCounterReg)
661 cLeftPage = uCounterReg;
662 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
663 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
664 && ( IS_64_BIT_CODE(pVCpu)
665 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
666 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
667 )
668 )
669 {
670 RTGCPHYS GCPhysMem;
671 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
672 if (rcStrict != VINF_SUCCESS)
673 return rcStrict;
674
675 /*
676 * If we can map the page without trouble, do a block processing
677 * until the end of the current page.
678 */
679 PGMPAGEMAPLOCK PgLockMem;
680 OP_TYPE const *puMem;
681 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
682 if (rcStrict == VINF_SUCCESS)
683 {
684 /* Search till we find a mismatching item. */
685 OP_TYPE uTmpValue;
686 bool fQuit;
687 uint32_t i = 0;
688 do
689 {
690 uTmpValue = puMem[i++];
691 fQuit = uTmpValue == uValueReg;
692 } while (i < cLeftPage && !fQuit);
693
694 /* Update the regs. */
695 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
696 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
697 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
698 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
699 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
700 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
701 if ( fQuit
702 || uCounterReg == 0)
703 break;
704
705 /* If unaligned, we drop thru and do the page crossing access
706 below. Otherwise, do the next page. */
707 if (!(uVirtAddr & (OP_SIZE - 1)))
708 {
709 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
710 continue;
711 }
712 cLeftPage = 0;
713 }
714 }
715
716 /*
717 * Fallback - slow processing till the end of the current page.
718 * In the cross page boundrary case we will end up here with cLeftPage
719 * as 0, we execute one loop then.
720 */
721 do
722 {
723 OP_TYPE uTmpValue;
724 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
725 if (rcStrict != VINF_SUCCESS)
726 return rcStrict;
727 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
728 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
729 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
730 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
731 cLeftPage--;
732 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
733 } while ( (int32_t)cLeftPage > 0
734 && !(uEFlags & X86_EFL_ZF));
735
736 /*
737 * Next page? Must check for interrupts and stuff here.
738 */
739 if ( uCounterReg == 0
740 || (uEFlags & X86_EFL_ZF))
741 break;
742 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
743 }
744
745 /*
746 * Done.
747 */
748 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
749 return VINF_SUCCESS;
750}
751
752
753
754
755/**
756 * Implements 'REP MOVS'.
757 */
758IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
759{
760 PVM pVM = pVCpu->CTX_SUFF(pVM);
761
762 /*
763 * Setup.
764 */
765 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
766 if (uCounterReg == 0)
767 {
768 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
769 return VINF_SUCCESS;
770 }
771
772 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
773
774 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
775 uint64_t uSrcBase;
776 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uSrcBase);
777 if (rcStrict != VINF_SUCCESS)
778 return rcStrict;
779
780 uint64_t uDstBase;
781 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uDstBase);
782 if (rcStrict != VINF_SUCCESS)
783 return rcStrict;
784
785 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
786 ADDR_TYPE uSrcAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
787 ADDR_TYPE uDstAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
788
789 /*
790 * Be careful with handle bypassing.
791 */
792 if (pVCpu->iem.s.fBypassHandlers)
793 {
794 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
795 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
796 }
797
798 /*
799 * The loop.
800 */
801 for (;;)
802 {
803 /*
804 * Do segmentation and virtual page stuff.
805 */
806 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
807 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
808 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
809 if (cLeftSrcPage > uCounterReg)
810 cLeftSrcPage = uCounterReg;
811 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
812 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
813
814 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
815 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
816 && ( IS_64_BIT_CODE(pVCpu)
817 || ( uSrcAddrReg < pSrcHid->u32Limit
818 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
819 && uDstAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
820 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
821 )
822 )
823 {
824 RTGCPHYS GCPhysSrcMem;
825 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
826 if (rcStrict != VINF_SUCCESS)
827 return rcStrict;
828
829 RTGCPHYS GCPhysDstMem;
830 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
831 if (rcStrict != VINF_SUCCESS)
832 return rcStrict;
833
834 /*
835 * If we can map the page without trouble, do a block processing
836 * until the end of the current page.
837 */
838 PGMPAGEMAPLOCK PgLockDstMem;
839 OP_TYPE *puDstMem;
840 rcStrict = iemMemPageMap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
841 if (rcStrict == VINF_SUCCESS)
842 {
843 PGMPAGEMAPLOCK PgLockSrcMem;
844 OP_TYPE const *puSrcMem;
845 rcStrict = iemMemPageMap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
846 if (rcStrict == VINF_SUCCESS)
847 {
848 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
849 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
850
851 /* Perform the operation exactly (don't use memcpy to avoid
852 having to consider how its implementation would affect
853 any overlapping source and destination area). */
854 OP_TYPE const *puSrcCur = puSrcMem;
855 OP_TYPE *puDstCur = puDstMem;
856 uint32_t cTodo = cLeftPage;
857 while (cTodo-- > 0)
858 *puDstCur++ = *puSrcCur++;
859
860 /* Update the registers. */
861 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
862 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
863 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
864
865 iemMemPageUnmap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
866 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
867
868 if (uCounterReg == 0)
869 break;
870 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
871 continue;
872 }
873 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
874 }
875 }
876
877 /*
878 * Fallback - slow processing till the end of the current page.
879 * In the cross page boundrary case we will end up here with cLeftPage
880 * as 0, we execute one loop then.
881 */
882 do
883 {
884 OP_TYPE uValue;
885 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uSrcAddrReg);
886 if (rcStrict != VINF_SUCCESS)
887 return rcStrict;
888 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uDstAddrReg, uValue);
889 if (rcStrict != VINF_SUCCESS)
890 return rcStrict;
891
892 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cbIncr;
893 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cbIncr;
894 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
895 cLeftPage--;
896 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
897 } while ((int32_t)cLeftPage > 0);
898
899 /*
900 * Next page. Must check for interrupts and stuff here.
901 */
902 if (uCounterReg == 0)
903 break;
904 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
905 }
906
907 /*
908 * Done.
909 */
910 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
911 return VINF_SUCCESS;
912}
913
914
915/**
916 * Implements 'REP STOS'.
917 */
918IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
919{
920 PVM pVM = pVCpu->CTX_SUFF(pVM);
921
922 /*
923 * Setup.
924 */
925 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
926 if (uCounterReg == 0)
927 {
928 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
929 return VINF_SUCCESS;
930 }
931
932 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
933
934 uint64_t uBaseAddr;
935 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
936 if (rcStrict != VINF_SUCCESS)
937 return rcStrict;
938
939 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
940 OP_TYPE const uValue = pVCpu->cpum.GstCtx.OP_rAX;
941 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
942
943 /*
944 * Be careful with handle bypassing.
945 */
946 /** @todo Permit doing a page if correctly aligned. */
947 if (pVCpu->iem.s.fBypassHandlers)
948 {
949 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
950 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
951 }
952
953 /*
954 * The loop.
955 */
956 for (;;)
957 {
958 /*
959 * Do segmentation and virtual page stuff.
960 */
961 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
962 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
963 if (cLeftPage > uCounterReg)
964 cLeftPage = uCounterReg;
965 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
966 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
967 && ( IS_64_BIT_CODE(pVCpu)
968 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
969 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
970 )
971 )
972 {
973 RTGCPHYS GCPhysMem;
974 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
975 if (rcStrict != VINF_SUCCESS)
976 return rcStrict;
977
978 /*
979 * If we can map the page without trouble, do a block processing
980 * until the end of the current page.
981 */
982 PGMPAGEMAPLOCK PgLockMem;
983 OP_TYPE *puMem;
984 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
985 if (rcStrict == VINF_SUCCESS)
986 {
987 /* Update the regs first so we can loop on cLeftPage. */
988 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
989 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
990
991 /* Do the memsetting. */
992#if OP_SIZE == 8
993 memset(puMem, uValue, cLeftPage);
994/*#elif OP_SIZE == 32
995 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
996#else
997 while (cLeftPage-- > 0)
998 *puMem++ = uValue;
999#endif
1000
1001 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1002
1003 if (uCounterReg == 0)
1004 break;
1005
1006 /* If unaligned, we drop thru and do the page crossing access
1007 below. Otherwise, do the next page. */
1008 if (!(uVirtAddr & (OP_SIZE - 1)))
1009 {
1010 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1011 continue;
1012 }
1013 cLeftPage = 0;
1014 }
1015 }
1016
1017 /*
1018 * Fallback - slow processing till the end of the current page.
1019 * In the cross page boundrary case we will end up here with cLeftPage
1020 * as 0, we execute one loop then.
1021 */
1022 do
1023 {
1024 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uAddrReg, uValue);
1025 if (rcStrict != VINF_SUCCESS)
1026 return rcStrict;
1027 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1028 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1029 cLeftPage--;
1030 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1031 } while ((int32_t)cLeftPage > 0);
1032
1033 /*
1034 * Next page. Must check for interrupts and stuff here.
1035 */
1036 if (uCounterReg == 0)
1037 break;
1038 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1039 }
1040
1041 /*
1042 * Done.
1043 */
1044 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1045 return VINF_SUCCESS;
1046}
1047
1048
1049/**
1050 * Implements 'REP LODS'.
1051 */
1052IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
1053{
1054 PVM pVM = pVCpu->CTX_SUFF(pVM);
1055
1056 /*
1057 * Setup.
1058 */
1059 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1060 if (uCounterReg == 0)
1061 {
1062 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1063 return VINF_SUCCESS;
1064 }
1065
1066 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg));
1067 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
1068 uint64_t uBaseAddr;
1069 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uBaseAddr);
1070 if (rcStrict != VINF_SUCCESS)
1071 return rcStrict;
1072
1073 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1074 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1075
1076 /*
1077 * The loop.
1078 */
1079 for (;;)
1080 {
1081 /*
1082 * Do segmentation and virtual page stuff.
1083 */
1084 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1085 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1086 if (cLeftPage > uCounterReg)
1087 cLeftPage = uCounterReg;
1088 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1089 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1090 && ( IS_64_BIT_CODE(pVCpu)
1091 || ( uAddrReg < pSrcHid->u32Limit
1092 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
1093 )
1094 )
1095 {
1096 RTGCPHYS GCPhysMem;
1097 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1098 if (rcStrict != VINF_SUCCESS)
1099 return rcStrict;
1100
1101 /*
1102 * If we can map the page without trouble, we can get away with
1103 * just reading the last value on the page.
1104 */
1105 PGMPAGEMAPLOCK PgLockMem;
1106 OP_TYPE const *puMem;
1107 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1108 if (rcStrict == VINF_SUCCESS)
1109 {
1110 /* Only get the last byte, the rest doesn't matter in direct access mode. */
1111#if OP_SIZE == 32
1112 pVCpu->cpum.GstCtx.rax = puMem[cLeftPage - 1];
1113#else
1114 pVCpu->cpum.GstCtx.OP_rAX = puMem[cLeftPage - 1];
1115#endif
1116 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
1117 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
1118 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1119
1120 if (uCounterReg == 0)
1121 break;
1122
1123 /* If unaligned, we drop thru and do the page crossing access
1124 below. Otherwise, do the next page. */
1125 if (!(uVirtAddr & (OP_SIZE - 1)))
1126 {
1127 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1128 continue;
1129 }
1130 cLeftPage = 0;
1131 }
1132 }
1133
1134 /*
1135 * Fallback - slow processing till the end of the current page.
1136 * In the cross page boundrary case we will end up here with cLeftPage
1137 * as 0, we execute one loop then.
1138 */
1139 do
1140 {
1141 OP_TYPE uTmpValue;
1142 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, iEffSeg, uAddrReg);
1143 if (rcStrict != VINF_SUCCESS)
1144 return rcStrict;
1145#if OP_SIZE == 32
1146 pVCpu->cpum.GstCtx.rax = uTmpValue;
1147#else
1148 pVCpu->cpum.GstCtx.OP_rAX = uTmpValue;
1149#endif
1150 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1151 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1152 cLeftPage--;
1153 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1154 } while ((int32_t)cLeftPage > 0);
1155
1156 if (rcStrict != VINF_SUCCESS)
1157 break;
1158
1159 /*
1160 * Next page. Must check for interrupts and stuff here.
1161 */
1162 if (uCounterReg == 0)
1163 break;
1164 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1165 }
1166
1167 /*
1168 * Done.
1169 */
1170 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1171 return VINF_SUCCESS;
1172}
1173
1174
1175#if OP_SIZE != 64
1176
1177/**
1178 * Implements 'INS' (no rep)
1179 */
1180IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1181{
1182 PVM pVM = pVCpu->CTX_SUFF(pVM);
1183 VBOXSTRICTRC rcStrict;
1184
1185 /*
1186 * Be careful with handle bypassing.
1187 */
1188 if (pVCpu->iem.s.fBypassHandlers)
1189 {
1190 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1191 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1192 }
1193
1194 /*
1195 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1196 * segmentation and finally any #PF due to virtual address translation.
1197 * ASSUMES nothing is read from the I/O port before traps are taken.
1198 */
1199 if (!fIoChecked)
1200 {
1201 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1202 if (rcStrict != VINF_SUCCESS)
1203 return rcStrict;
1204 }
1205
1206#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1207 /*
1208 * Check SVM nested-guest IO intercept.
1209 */
1210 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1211 {
1212 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, false /* fRep */,
1213 true /* fStrIo */, cbInstr);
1214 if (rcStrict == VINF_SVM_VMEXIT)
1215 return VINF_SUCCESS;
1216 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
1217 {
1218 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8,
1219 VBOXSTRICTRC_VAL(rcStrict)));
1220 return rcStrict;
1221 }
1222 }
1223#endif
1224
1225 OP_TYPE *puMem;
1226 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI, IEM_ACCESS_DATA_W);
1227 if (rcStrict != VINF_SUCCESS)
1228 return rcStrict;
1229
1230 uint32_t u32Value = 0;
1231 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, &u32Value, OP_SIZE / 8);
1232 if (IOM_SUCCESS(rcStrict))
1233 {
1234 *puMem = (OP_TYPE)u32Value;
1235# ifdef IN_RING3
1236 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1237# else
1238 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1239# endif
1240 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1241 {
1242 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1243 pVCpu->cpum.GstCtx.ADDR_rDI += OP_SIZE / 8;
1244 else
1245 pVCpu->cpum.GstCtx.ADDR_rDI -= OP_SIZE / 8;
1246 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1247 }
1248 else
1249 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1250 }
1251 return rcStrict;
1252}
1253
1254
1255/**
1256 * Implements 'REP INS'.
1257 */
1258IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1259{
1260 PVM pVM = pVCpu->CTX_SUFF(pVM);
1261
1262 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR);
1263
1264 /*
1265 * Setup.
1266 */
1267 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1268 VBOXSTRICTRC rcStrict;
1269 if (!fIoChecked)
1270 {
1271/** @todo check if this is too early for ecx=0. */
1272 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1273 if (rcStrict != VINF_SUCCESS)
1274 return rcStrict;
1275 }
1276
1277#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1278 /*
1279 * Check SVM nested-guest IO intercept.
1280 */
1281 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1282 {
1283 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
1284 true /* fStrIo */, cbInstr);
1285 if (rcStrict == VINF_SVM_VMEXIT)
1286 return VINF_SUCCESS;
1287 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
1288 {
1289 Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1290 VBOXSTRICTRC_VAL(rcStrict)));
1291 return rcStrict;
1292 }
1293 }
1294#endif
1295
1296 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1297 if (uCounterReg == 0)
1298 {
1299 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1300 return VINF_SUCCESS;
1301 }
1302
1303 uint64_t uBaseAddr;
1304 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
1305 if (rcStrict != VINF_SUCCESS)
1306 return rcStrict;
1307
1308 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1309 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
1310
1311 /*
1312 * Be careful with handle bypassing.
1313 */
1314 if (pVCpu->iem.s.fBypassHandlers)
1315 {
1316 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1317 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1318 }
1319
1320 /*
1321 * The loop.
1322 */
1323 for (;;)
1324 {
1325 /*
1326 * Do segmentation and virtual page stuff.
1327 */
1328 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1329 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1330 if (cLeftPage > uCounterReg)
1331 cLeftPage = uCounterReg;
1332 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1333 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1334 && ( IS_64_BIT_CODE(pVCpu)
1335 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
1336 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
1337 )
1338 )
1339 {
1340 RTGCPHYS GCPhysMem;
1341 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1342 if (rcStrict != VINF_SUCCESS)
1343 return rcStrict;
1344
1345 /*
1346 * If we can map the page without trouble, use the IOM
1347 * string I/O interface to do the work.
1348 */
1349 PGMPAGEMAPLOCK PgLockMem;
1350 OP_TYPE *puMem;
1351 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1352 if (rcStrict == VINF_SUCCESS)
1353 {
1354 uint32_t cTransfers = cLeftPage;
1355 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1356
1357 uint32_t cActualTransfers = cLeftPage - cTransfers;
1358 Assert(cActualTransfers <= cLeftPage);
1359 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1360 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1361 puMem += cActualTransfers;
1362
1363 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1364
1365 if (rcStrict != VINF_SUCCESS)
1366 {
1367 if (IOM_SUCCESS(rcStrict))
1368 {
1369 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1370 if (uCounterReg == 0)
1371 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1372 }
1373 return rcStrict;
1374 }
1375
1376 /* If unaligned, we drop thru and do the page crossing access
1377 below. Otherwise, do the next page. */
1378 if (uCounterReg == 0)
1379 break;
1380 if (!(uVirtAddr & (OP_SIZE - 1)))
1381 {
1382 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1383 continue;
1384 }
1385 cLeftPage = 0;
1386 }
1387 }
1388
1389 /*
1390 * Fallback - slow processing till the end of the current page.
1391 * In the cross page boundrary case we will end up here with cLeftPage
1392 * as 0, we execute one loop then.
1393 *
1394 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1395 * I/O port, otherwise it wouldn't really be restartable.
1396 */
1397 /** @todo investigate what the CPU actually does with \#PF/\#GP
1398 * during INS. */
1399 do
1400 {
1401 OP_TYPE *puMem;
1402 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1403 if (rcStrict != VINF_SUCCESS)
1404 return rcStrict;
1405
1406 uint32_t u32Value = 0;
1407 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1408 if (!IOM_SUCCESS(rcStrict))
1409 {
1410 iemMemRollback(pVCpu);
1411 return rcStrict;
1412 }
1413
1414 *puMem = (OP_TYPE)u32Value;
1415# ifdef IN_RING3
1416 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1417# else
1418 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1419# endif
1420 if (rcStrict2 == VINF_SUCCESS)
1421 { /* likely */ }
1422 else
1423 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1424 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1425
1426 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1427 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1428
1429 cLeftPage--;
1430 if (rcStrict != VINF_SUCCESS)
1431 {
1432 if (uCounterReg == 0)
1433 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1434 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1435 return rcStrict;
1436 }
1437
1438 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1439 } while ((int32_t)cLeftPage > 0);
1440
1441
1442 /*
1443 * Next page. Must check for interrupts and stuff here.
1444 */
1445 if (uCounterReg == 0)
1446 break;
1447 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1448 }
1449
1450 /*
1451 * Done.
1452 */
1453 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1454 return VINF_SUCCESS;
1455}
1456
1457
1458/**
1459 * Implements 'OUTS' (no rep)
1460 */
1461IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1462{
1463 PVM pVM = pVCpu->CTX_SUFF(pVM);
1464 VBOXSTRICTRC rcStrict;
1465
1466 /*
1467 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1468 * segmentation and finally any #PF due to virtual address translation.
1469 * ASSUMES nothing is read from the I/O port before traps are taken.
1470 */
1471 if (!fIoChecked)
1472 {
1473 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1474 if (rcStrict != VINF_SUCCESS)
1475 return rcStrict;
1476 }
1477
1478#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1479 /*
1480 * Check SVM nested-guest IO intercept.
1481 */
1482 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1483 {
1484 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, false /* fRep */,
1485 true /* fStrIo */, cbInstr);
1486 if (rcStrict == VINF_SVM_VMEXIT)
1487 return VINF_SUCCESS;
1488 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
1489 {
1490 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8,
1491 VBOXSTRICTRC_VAL(rcStrict)));
1492 return rcStrict;
1493 }
1494 }
1495#endif
1496
1497 OP_TYPE uValue;
1498 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pVCpu->cpum.GstCtx.ADDR_rSI);
1499 if (rcStrict == VINF_SUCCESS)
1500 {
1501 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, uValue, OP_SIZE / 8);
1502 if (IOM_SUCCESS(rcStrict))
1503 {
1504 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1505 pVCpu->cpum.GstCtx.ADDR_rSI += OP_SIZE / 8;
1506 else
1507 pVCpu->cpum.GstCtx.ADDR_rSI -= OP_SIZE / 8;
1508 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1509 if (rcStrict != VINF_SUCCESS)
1510 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1511 }
1512 }
1513 return rcStrict;
1514}
1515
1516
1517/**
1518 * Implements 'REP OUTS'.
1519 */
1520IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1521{
1522 PVM pVM = pVCpu->CTX_SUFF(pVM);
1523
1524 /*
1525 * Setup.
1526 */
1527 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1528 VBOXSTRICTRC rcStrict;
1529 if (!fIoChecked)
1530 {
1531/** @todo check if this is too early for ecx=0. */
1532 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1533 if (rcStrict != VINF_SUCCESS)
1534 return rcStrict;
1535 }
1536
1537#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1538 /*
1539 * Check SVM nested-guest IO intercept.
1540 */
1541 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1542 {
1543 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
1544 true /* fStrIo */, cbInstr);
1545 if (rcStrict == VINF_SVM_VMEXIT)
1546 return VINF_SUCCESS;
1547 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
1548 {
1549 Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1550 VBOXSTRICTRC_VAL(rcStrict)));
1551 return rcStrict;
1552 }
1553 }
1554#endif
1555
1556 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1557 if (uCounterReg == 0)
1558 {
1559 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1560 return VINF_SUCCESS;
1561 }
1562
1563 PCCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iEffSeg);
1564 uint64_t uBaseAddr;
1565 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pHid, iEffSeg, &uBaseAddr);
1566 if (rcStrict != VINF_SUCCESS)
1567 return rcStrict;
1568
1569 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1570 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1571
1572 /*
1573 * The loop.
1574 */
1575 for (;;)
1576 {
1577 /*
1578 * Do segmentation and virtual page stuff.
1579 */
1580 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1581 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1582 if (cLeftPage > uCounterReg)
1583 cLeftPage = uCounterReg;
1584 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1585 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1586 && ( IS_64_BIT_CODE(pVCpu)
1587 || ( uAddrReg < pHid->u32Limit
1588 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1589 )
1590 )
1591 {
1592 RTGCPHYS GCPhysMem;
1593 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1594 if (rcStrict != VINF_SUCCESS)
1595 return rcStrict;
1596
1597 /*
1598 * If we can map the page without trouble, we use the IOM
1599 * string I/O interface to do the job.
1600 */
1601 PGMPAGEMAPLOCK PgLockMem;
1602 OP_TYPE const *puMem;
1603 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1604 if (rcStrict == VINF_SUCCESS)
1605 {
1606 uint32_t cTransfers = cLeftPage;
1607 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1608
1609 uint32_t cActualTransfers = cLeftPage - cTransfers;
1610 Assert(cActualTransfers <= cLeftPage);
1611 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1612 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1613 puMem += cActualTransfers;
1614
1615 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1616
1617 if (rcStrict != VINF_SUCCESS)
1618 {
1619 if (IOM_SUCCESS(rcStrict))
1620 {
1621 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1622 if (uCounterReg == 0)
1623 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1624 }
1625 return rcStrict;
1626 }
1627
1628 if (uCounterReg == 0)
1629 break;
1630
1631 /* If unaligned, we drop thru and do the page crossing access
1632 below. Otherwise, do the next page. */
1633 if (!(uVirtAddr & (OP_SIZE - 1)))
1634 {
1635 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1636 continue;
1637 }
1638 cLeftPage = 0;
1639 }
1640 }
1641
1642 /*
1643 * Fallback - slow processing till the end of the current page.
1644 * In the cross page boundrary case we will end up here with cLeftPage
1645 * as 0, we execute one loop then.
1646 *
1647 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1648 * I/O port, otherwise it wouldn't really be restartable.
1649 */
1650 /** @todo investigate what the CPU actually does with \#PF/\#GP
1651 * during INS. */
1652 do
1653 {
1654 OP_TYPE uValue;
1655 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uAddrReg);
1656 if (rcStrict != VINF_SUCCESS)
1657 return rcStrict;
1658
1659 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1660 if (IOM_SUCCESS(rcStrict))
1661 {
1662 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1663 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1664 cLeftPage--;
1665 }
1666 if (rcStrict != VINF_SUCCESS)
1667 {
1668 if (IOM_SUCCESS(rcStrict))
1669 {
1670 if (uCounterReg == 0)
1671 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1672 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1673 }
1674 return rcStrict;
1675 }
1676 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1677 } while ((int32_t)cLeftPage > 0);
1678
1679
1680 /*
1681 * Next page. Must check for interrupts and stuff here.
1682 */
1683 if (uCounterReg == 0)
1684 break;
1685 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1686 }
1687
1688 /*
1689 * Done.
1690 */
1691 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1692 return VINF_SUCCESS;
1693}
1694
1695#endif /* OP_SIZE != 64-bit */
1696
1697
1698#undef OP_rAX
1699#undef OP_SIZE
1700#undef ADDR_SIZE
1701#undef ADDR_rDI
1702#undef ADDR_rSI
1703#undef ADDR_rCX
1704#undef ADDR_rIP
1705#undef ADDR2_TYPE
1706#undef ADDR_TYPE
1707#undef ADDR2_TYPE
1708#undef IS_64_BIT_CODE
1709#undef IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
1710#undef IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1711#undef IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1712
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette