VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 77844

Last change on this file since 77844 was 77450, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Fix passing operand size for string-IO instruction VM-exits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.7 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 77450 2019-02-25 06:01:34Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40# define ADDR_VMXSTRIO 0
41#elif ADDR_SIZE == 32
42# define ADDR_rDI edi
43# define ADDR_rSI esi
44# define ADDR_rCX ecx
45# define ADDR2_TYPE uint32_t
46# define ADDR_VMXSTRIO 1
47#elif ADDR_SIZE == 64
48# define ADDR_rDI rdi
49# define ADDR_rSI rsi
50# define ADDR_rCX rcx
51# define ADDR2_TYPE uint64_t
52# define ADDR_VMXSTRIO 2
53# define IS_64_BIT_CODE(a_pVCpu) (true)
54#else
55# error "Bad ADDR_SIZE."
56#endif
57#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
58
59#if ADDR_SIZE == 64 || OP_SIZE == 64
60# define IS_64_BIT_CODE(a_pVCpu) (true)
61#elif ADDR_SIZE == 32
62# define IS_64_BIT_CODE(a_pVCpu) ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT)
63#else
64# define IS_64_BIT_CODE(a_pVCpu) (false)
65#endif
66
67/** @def IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
68 * Used in the outer (page-by-page) loop to check for reasons for returnning
69 * before completing the instruction. In raw-mode we temporarily enable
70 * interrupts to let the host interrupt us. We cannot let big string operations
71 * hog the CPU, especially not in raw-mode.
72 */
73#ifdef IN_RC
74# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
75 do { \
76 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
77 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
78 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_YIELD_REPSTR_MASK) \
79 )) \
80 { \
81 RTCCUINTREG fSavedFlags = ASMGetFlags(); \
82 if (!(fSavedFlags & X86_EFL_IF)) \
83 { \
84 ASMSetFlags(fSavedFlags | X86_EFL_IF); \
85 ASMNopPause(); \
86 ASMSetFlags(fSavedFlags); \
87 } \
88 } \
89 else \
90 { \
91 LogFlow(("%s: Leaving early (outer)! ffcpu=%#RX64 ffvm=%#x\n", \
92 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
93 return VINF_SUCCESS; \
94 } \
95 } while (0)
96#else
97# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
98 do { \
99 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
100 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
101 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_YIELD_REPSTR_MASK) \
102 )) \
103 { /* probable */ } \
104 else \
105 { \
106 LogFlow(("%s: Leaving early (outer)! ffcpu=%#RX64 ffvm=%#x\n", \
107 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
108 return VINF_SUCCESS; \
109 } \
110 } while (0)
111#endif
112
113/** @def IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
114 * This is used in some of the inner loops to make sure we respond immediately
115 * to VMCPU_FF_IOM as well as outside requests. Use this for expensive
116 * instructions. Use IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN for
117 * ones that are typically cheap. */
118#define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
119 do { \
120 if (RT_LIKELY( ( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
121 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \
122 || (a_fExitExpr) )) \
123 { /* very likely */ } \
124 else \
125 { \
126 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 ffvm=%#x\n", \
127 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
128 return VINF_SUCCESS; \
129 } \
130 } while (0)
131
132
133/** @def IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
134 * This is used in the inner loops where
135 * IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN isn't used. It only
136 * checks the CPU FFs so that we respond immediately to the pending IOM FF
137 * (status code is hidden in IEMCPU::rcPassUp by IEM memory commit code).
138 */
139#define IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
140 do { \
141 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
142 || (a_fExitExpr) )) \
143 { /* very likely */ } \
144 else \
145 { \
146 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 (ffvm=%#x)\n", \
147 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
148 return VINF_SUCCESS; \
149 } \
150 } while (0)
151
152
153/**
154 * Implements 'REPE CMPS'.
155 */
156IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
157{
158 PVM pVM = pVCpu->CTX_SUFF(pVM);
159
160 /*
161 * Setup.
162 */
163 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
164 if (uCounterReg == 0)
165 {
166 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
167 return VINF_SUCCESS;
168 }
169
170 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
171
172 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
173 uint64_t uSrc1Base;
174 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
175 if (rcStrict != VINF_SUCCESS)
176 return rcStrict;
177
178 uint64_t uSrc2Base;
179 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
180 if (rcStrict != VINF_SUCCESS)
181 return rcStrict;
182
183 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
184 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
185 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
186 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
187
188 /*
189 * The loop.
190 */
191 for (;;)
192 {
193 /*
194 * Do segmentation and virtual page stuff.
195 */
196 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
197 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
198 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
199 if (cLeftSrc1Page > uCounterReg)
200 cLeftSrc1Page = uCounterReg;
201 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
202 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
203
204 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
205 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
206 && ( IS_64_BIT_CODE(pVCpu)
207 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
208 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
209 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
210 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
211 )
212 )
213 {
214 RTGCPHYS GCPhysSrc1Mem;
215 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
216 if (rcStrict != VINF_SUCCESS)
217 return rcStrict;
218
219 RTGCPHYS GCPhysSrc2Mem;
220 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
221 if (rcStrict != VINF_SUCCESS)
222 return rcStrict;
223
224 /*
225 * If we can map the page without trouble, do a block processing
226 * until the end of the current page.
227 */
228 PGMPAGEMAPLOCK PgLockSrc2Mem;
229 OP_TYPE const *puSrc2Mem;
230 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
231 if (rcStrict == VINF_SUCCESS)
232 {
233 PGMPAGEMAPLOCK PgLockSrc1Mem;
234 OP_TYPE const *puSrc1Mem;
235 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
236 if (rcStrict == VINF_SUCCESS)
237 {
238 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
239 {
240 /* All matches, only compare the last itme to get the right eflags. */
241 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
242 uSrc1AddrReg += cLeftPage * cbIncr;
243 uSrc2AddrReg += cLeftPage * cbIncr;
244 uCounterReg -= cLeftPage;
245 }
246 else
247 {
248 /* Some mismatch, compare each item (and keep volatile
249 memory in mind). */
250 uint32_t off = 0;
251 do
252 {
253 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
254 off++;
255 } while ( off < cLeftPage
256 && (uEFlags & X86_EFL_ZF));
257 uSrc1AddrReg += cbIncr * off;
258 uSrc2AddrReg += cbIncr * off;
259 uCounterReg -= off;
260 }
261
262 /* Update the registers before looping. */
263 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
264 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
265 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
266 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
267
268 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
269 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
270 if ( uCounterReg == 0
271 || !(uEFlags & X86_EFL_ZF))
272 break;
273 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
274 continue;
275 }
276 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
277 }
278 }
279
280 /*
281 * Fallback - slow processing till the end of the current page.
282 * In the cross page boundrary case we will end up here with cLeftPage
283 * as 0, we execute one loop then.
284 */
285 do
286 {
287 OP_TYPE uValue1;
288 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
289 if (rcStrict != VINF_SUCCESS)
290 return rcStrict;
291 OP_TYPE uValue2;
292 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
293 if (rcStrict != VINF_SUCCESS)
294 return rcStrict;
295 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
296
297 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
298 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
299 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
300 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
301 cLeftPage--;
302 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
303 } while ( (int32_t)cLeftPage > 0
304 && (uEFlags & X86_EFL_ZF));
305
306 /*
307 * Next page? Must check for interrupts and stuff here.
308 */
309 if ( uCounterReg == 0
310 || !(uEFlags & X86_EFL_ZF))
311 break;
312 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
313 }
314
315 /*
316 * Done.
317 */
318 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
319 return VINF_SUCCESS;
320}
321
322
323/**
324 * Implements 'REPNE CMPS'.
325 */
326IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
327{
328 PVM pVM = pVCpu->CTX_SUFF(pVM);
329
330 /*
331 * Setup.
332 */
333 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
334 if (uCounterReg == 0)
335 {
336 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
337 return VINF_SUCCESS;
338 }
339
340 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
341
342 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
343 uint64_t uSrc1Base;
344 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
345 if (rcStrict != VINF_SUCCESS)
346 return rcStrict;
347
348 uint64_t uSrc2Base;
349 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
350 if (rcStrict != VINF_SUCCESS)
351 return rcStrict;
352
353 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
354 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
355 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
356 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
357
358 /*
359 * The loop.
360 */
361 for (;;)
362 {
363 /*
364 * Do segmentation and virtual page stuff.
365 */
366 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
367 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
368 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
369 if (cLeftSrc1Page > uCounterReg)
370 cLeftSrc1Page = uCounterReg;
371 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
372 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
373
374 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
375 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
376 && ( IS_64_BIT_CODE(pVCpu)
377 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
378 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
379 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
380 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
381 )
382 )
383 {
384 RTGCPHYS GCPhysSrc1Mem;
385 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
386 if (rcStrict != VINF_SUCCESS)
387 return rcStrict;
388
389 RTGCPHYS GCPhysSrc2Mem;
390 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
391 if (rcStrict != VINF_SUCCESS)
392 return rcStrict;
393
394 /*
395 * If we can map the page without trouble, do a block processing
396 * until the end of the current page.
397 */
398 OP_TYPE const *puSrc2Mem;
399 PGMPAGEMAPLOCK PgLockSrc2Mem;
400 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
401 if (rcStrict == VINF_SUCCESS)
402 {
403 OP_TYPE const *puSrc1Mem;
404 PGMPAGEMAPLOCK PgLockSrc1Mem;
405 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
406 if (rcStrict == VINF_SUCCESS)
407 {
408 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
409 {
410 /* All matches, only compare the last item to get the right eflags. */
411 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
412 uSrc1AddrReg += cLeftPage * cbIncr;
413 uSrc2AddrReg += cLeftPage * cbIncr;
414 uCounterReg -= cLeftPage;
415 }
416 else
417 {
418 /* Some mismatch, compare each item (and keep volatile
419 memory in mind). */
420 uint32_t off = 0;
421 do
422 {
423 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
424 off++;
425 } while ( off < cLeftPage
426 && !(uEFlags & X86_EFL_ZF));
427 uSrc1AddrReg += cbIncr * off;
428 uSrc2AddrReg += cbIncr * off;
429 uCounterReg -= off;
430 }
431
432 /* Update the registers before looping. */
433 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
434 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
435 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
436 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
437
438 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
439 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
440 if ( uCounterReg == 0
441 || (uEFlags & X86_EFL_ZF))
442 break;
443 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
444 continue;
445 }
446 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
447 }
448 }
449
450 /*
451 * Fallback - slow processing till the end of the current page.
452 * In the cross page boundrary case we will end up here with cLeftPage
453 * as 0, we execute one loop then.
454 */
455 do
456 {
457 OP_TYPE uValue1;
458 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
459 if (rcStrict != VINF_SUCCESS)
460 return rcStrict;
461 OP_TYPE uValue2;
462 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
463 if (rcStrict != VINF_SUCCESS)
464 return rcStrict;
465 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
466
467 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
468 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
469 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
470 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
471 cLeftPage--;
472 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
473 } while ( (int32_t)cLeftPage > 0
474 && !(uEFlags & X86_EFL_ZF));
475
476 /*
477 * Next page? Must check for interrupts and stuff here.
478 */
479 if ( uCounterReg == 0
480 || (uEFlags & X86_EFL_ZF))
481 break;
482 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
483 }
484
485 /*
486 * Done.
487 */
488 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
489 return VINF_SUCCESS;
490}
491
492
493/**
494 * Implements 'REPE SCAS'.
495 */
496IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
497{
498 PVM pVM = pVCpu->CTX_SUFF(pVM);
499
500 /*
501 * Setup.
502 */
503 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
504 if (uCounterReg == 0)
505 {
506 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
507 return VINF_SUCCESS;
508 }
509
510 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
511 uint64_t uBaseAddr;
512 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
513 if (rcStrict != VINF_SUCCESS)
514 return rcStrict;
515
516 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
517 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
518 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
519 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
520
521 /*
522 * The loop.
523 */
524 for (;;)
525 {
526 /*
527 * Do segmentation and virtual page stuff.
528 */
529 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
530 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
531 if (cLeftPage > uCounterReg)
532 cLeftPage = uCounterReg;
533 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
534 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
535 && ( IS_64_BIT_CODE(pVCpu)
536 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
537 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
538 )
539 )
540 {
541 RTGCPHYS GCPhysMem;
542 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
543 if (rcStrict != VINF_SUCCESS)
544 return rcStrict;
545
546 /*
547 * If we can map the page without trouble, do a block processing
548 * until the end of the current page.
549 */
550 PGMPAGEMAPLOCK PgLockMem;
551 OP_TYPE const *puMem;
552 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
553 if (rcStrict == VINF_SUCCESS)
554 {
555 /* Search till we find a mismatching item. */
556 OP_TYPE uTmpValue;
557 bool fQuit;
558 uint32_t i = 0;
559 do
560 {
561 uTmpValue = puMem[i++];
562 fQuit = uTmpValue != uValueReg;
563 } while (i < cLeftPage && !fQuit);
564
565 /* Update the regs. */
566 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
567 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
568 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
569 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
570 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
571 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
572 if ( fQuit
573 || uCounterReg == 0)
574 break;
575
576 /* If unaligned, we drop thru and do the page crossing access
577 below. Otherwise, do the next page. */
578 if (!(uVirtAddr & (OP_SIZE - 1)))
579 {
580 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
581 continue;
582 }
583 cLeftPage = 0;
584 }
585 }
586
587 /*
588 * Fallback - slow processing till the end of the current page.
589 * In the cross page boundrary case we will end up here with cLeftPage
590 * as 0, we execute one loop then.
591 */
592 do
593 {
594 OP_TYPE uTmpValue;
595 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
596 if (rcStrict != VINF_SUCCESS)
597 return rcStrict;
598 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
599
600 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
601 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
602 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
603 cLeftPage--;
604 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
605 } while ( (int32_t)cLeftPage > 0
606 && (uEFlags & X86_EFL_ZF));
607
608 /*
609 * Next page? Must check for interrupts and stuff here.
610 */
611 if ( uCounterReg == 0
612 || !(uEFlags & X86_EFL_ZF))
613 break;
614 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
615 }
616
617 /*
618 * Done.
619 */
620 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Implements 'REPNE SCAS'.
627 */
628IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
629{
630 PVM pVM = pVCpu->CTX_SUFF(pVM);
631
632 /*
633 * Setup.
634 */
635 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
636 if (uCounterReg == 0)
637 {
638 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
639 return VINF_SUCCESS;
640 }
641
642 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
643 uint64_t uBaseAddr;
644 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
645 if (rcStrict != VINF_SUCCESS)
646 return rcStrict;
647
648 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
649 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
650 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
651 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
652
653 /*
654 * The loop.
655 */
656 for (;;)
657 {
658 /*
659 * Do segmentation and virtual page stuff.
660 */
661 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
662 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
663 if (cLeftPage > uCounterReg)
664 cLeftPage = uCounterReg;
665 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
666 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
667 && ( IS_64_BIT_CODE(pVCpu)
668 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
669 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
670 )
671 )
672 {
673 RTGCPHYS GCPhysMem;
674 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
675 if (rcStrict != VINF_SUCCESS)
676 return rcStrict;
677
678 /*
679 * If we can map the page without trouble, do a block processing
680 * until the end of the current page.
681 */
682 PGMPAGEMAPLOCK PgLockMem;
683 OP_TYPE const *puMem;
684 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
685 if (rcStrict == VINF_SUCCESS)
686 {
687 /* Search till we find a mismatching item. */
688 OP_TYPE uTmpValue;
689 bool fQuit;
690 uint32_t i = 0;
691 do
692 {
693 uTmpValue = puMem[i++];
694 fQuit = uTmpValue == uValueReg;
695 } while (i < cLeftPage && !fQuit);
696
697 /* Update the regs. */
698 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
699 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
700 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
701 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
702 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
703 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
704 if ( fQuit
705 || uCounterReg == 0)
706 break;
707
708 /* If unaligned, we drop thru and do the page crossing access
709 below. Otherwise, do the next page. */
710 if (!(uVirtAddr & (OP_SIZE - 1)))
711 {
712 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
713 continue;
714 }
715 cLeftPage = 0;
716 }
717 }
718
719 /*
720 * Fallback - slow processing till the end of the current page.
721 * In the cross page boundrary case we will end up here with cLeftPage
722 * as 0, we execute one loop then.
723 */
724 do
725 {
726 OP_TYPE uTmpValue;
727 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
728 if (rcStrict != VINF_SUCCESS)
729 return rcStrict;
730 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
731 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
732 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
733 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
734 cLeftPage--;
735 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
736 } while ( (int32_t)cLeftPage > 0
737 && !(uEFlags & X86_EFL_ZF));
738
739 /*
740 * Next page? Must check for interrupts and stuff here.
741 */
742 if ( uCounterReg == 0
743 || (uEFlags & X86_EFL_ZF))
744 break;
745 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
746 }
747
748 /*
749 * Done.
750 */
751 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
752 return VINF_SUCCESS;
753}
754
755
756
757
758/**
759 * Implements 'REP MOVS'.
760 */
761IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
762{
763 PVM pVM = pVCpu->CTX_SUFF(pVM);
764
765 /*
766 * Setup.
767 */
768 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
769 if (uCounterReg == 0)
770 {
771 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
772 return VINF_SUCCESS;
773 }
774
775 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
776
777 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
778 uint64_t uSrcBase;
779 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uSrcBase);
780 if (rcStrict != VINF_SUCCESS)
781 return rcStrict;
782
783 uint64_t uDstBase;
784 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uDstBase);
785 if (rcStrict != VINF_SUCCESS)
786 return rcStrict;
787
788 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
789 ADDR_TYPE uSrcAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
790 ADDR_TYPE uDstAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
791
792 /*
793 * Be careful with handle bypassing.
794 */
795 if (pVCpu->iem.s.fBypassHandlers)
796 {
797 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
798 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
799 }
800
801 /*
802 * The loop.
803 */
804 for (;;)
805 {
806 /*
807 * Do segmentation and virtual page stuff.
808 */
809 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
810 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
811 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
812 if (cLeftSrcPage > uCounterReg)
813 cLeftSrcPage = uCounterReg;
814 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
815 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
816
817 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
818 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
819 && ( IS_64_BIT_CODE(pVCpu)
820 || ( uSrcAddrReg < pSrcHid->u32Limit
821 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
822 && uDstAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
823 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
824 )
825 )
826 {
827 RTGCPHYS GCPhysSrcMem;
828 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
829 if (rcStrict != VINF_SUCCESS)
830 return rcStrict;
831
832 RTGCPHYS GCPhysDstMem;
833 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
834 if (rcStrict != VINF_SUCCESS)
835 return rcStrict;
836
837 /*
838 * If we can map the page without trouble, do a block processing
839 * until the end of the current page.
840 */
841 PGMPAGEMAPLOCK PgLockDstMem;
842 OP_TYPE *puDstMem;
843 rcStrict = iemMemPageMap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
844 if (rcStrict == VINF_SUCCESS)
845 {
846 PGMPAGEMAPLOCK PgLockSrcMem;
847 OP_TYPE const *puSrcMem;
848 rcStrict = iemMemPageMap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
849 if (rcStrict == VINF_SUCCESS)
850 {
851 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
852 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
853
854 /* Perform the operation exactly (don't use memcpy to avoid
855 having to consider how its implementation would affect
856 any overlapping source and destination area). */
857 OP_TYPE const *puSrcCur = puSrcMem;
858 OP_TYPE *puDstCur = puDstMem;
859 uint32_t cTodo = cLeftPage;
860 while (cTodo-- > 0)
861 *puDstCur++ = *puSrcCur++;
862
863 /* Update the registers. */
864 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
865 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
866 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
867
868 iemMemPageUnmap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
869 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
870
871 if (uCounterReg == 0)
872 break;
873 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
874 continue;
875 }
876 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
877 }
878 }
879
880 /*
881 * Fallback - slow processing till the end of the current page.
882 * In the cross page boundrary case we will end up here with cLeftPage
883 * as 0, we execute one loop then.
884 */
885 do
886 {
887 OP_TYPE uValue;
888 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uSrcAddrReg);
889 if (rcStrict != VINF_SUCCESS)
890 return rcStrict;
891 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uDstAddrReg, uValue);
892 if (rcStrict != VINF_SUCCESS)
893 return rcStrict;
894
895 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cbIncr;
896 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cbIncr;
897 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
898 cLeftPage--;
899 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
900 } while ((int32_t)cLeftPage > 0);
901
902 /*
903 * Next page. Must check for interrupts and stuff here.
904 */
905 if (uCounterReg == 0)
906 break;
907 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
908 }
909
910 /*
911 * Done.
912 */
913 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
914 return VINF_SUCCESS;
915}
916
917
918/**
919 * Implements 'REP STOS'.
920 */
921IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
922{
923 PVM pVM = pVCpu->CTX_SUFF(pVM);
924
925 /*
926 * Setup.
927 */
928 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
929 if (uCounterReg == 0)
930 {
931 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
932 return VINF_SUCCESS;
933 }
934
935 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
936
937 uint64_t uBaseAddr;
938 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
939 if (rcStrict != VINF_SUCCESS)
940 return rcStrict;
941
942 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
943 OP_TYPE const uValue = pVCpu->cpum.GstCtx.OP_rAX;
944 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
945
946 /*
947 * Be careful with handle bypassing.
948 */
949 /** @todo Permit doing a page if correctly aligned. */
950 if (pVCpu->iem.s.fBypassHandlers)
951 {
952 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
953 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
954 }
955
956 /*
957 * The loop.
958 */
959 for (;;)
960 {
961 /*
962 * Do segmentation and virtual page stuff.
963 */
964 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
965 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
966 if (cLeftPage > uCounterReg)
967 cLeftPage = uCounterReg;
968 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
969 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
970 && ( IS_64_BIT_CODE(pVCpu)
971 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
972 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
973 )
974 )
975 {
976 RTGCPHYS GCPhysMem;
977 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
978 if (rcStrict != VINF_SUCCESS)
979 return rcStrict;
980
981 /*
982 * If we can map the page without trouble, do a block processing
983 * until the end of the current page.
984 */
985 PGMPAGEMAPLOCK PgLockMem;
986 OP_TYPE *puMem;
987 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
988 if (rcStrict == VINF_SUCCESS)
989 {
990 /* Update the regs first so we can loop on cLeftPage. */
991 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
992 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
993
994 /* Do the memsetting. */
995#if OP_SIZE == 8
996 memset(puMem, uValue, cLeftPage);
997/*#elif OP_SIZE == 32
998 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
999#else
1000 while (cLeftPage-- > 0)
1001 *puMem++ = uValue;
1002#endif
1003
1004 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1005
1006 if (uCounterReg == 0)
1007 break;
1008
1009 /* If unaligned, we drop thru and do the page crossing access
1010 below. Otherwise, do the next page. */
1011 if (!(uVirtAddr & (OP_SIZE - 1)))
1012 {
1013 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1014 continue;
1015 }
1016 cLeftPage = 0;
1017 }
1018 }
1019
1020 /*
1021 * Fallback - slow processing till the end of the current page.
1022 * In the cross page boundrary case we will end up here with cLeftPage
1023 * as 0, we execute one loop then.
1024 */
1025 do
1026 {
1027 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uAddrReg, uValue);
1028 if (rcStrict != VINF_SUCCESS)
1029 return rcStrict;
1030 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1031 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1032 cLeftPage--;
1033 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1034 } while ((int32_t)cLeftPage > 0);
1035
1036 /*
1037 * Next page. Must check for interrupts and stuff here.
1038 */
1039 if (uCounterReg == 0)
1040 break;
1041 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1042 }
1043
1044 /*
1045 * Done.
1046 */
1047 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1048 return VINF_SUCCESS;
1049}
1050
1051
1052/**
1053 * Implements 'REP LODS'.
1054 */
1055IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
1056{
1057 PVM pVM = pVCpu->CTX_SUFF(pVM);
1058
1059 /*
1060 * Setup.
1061 */
1062 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1063 if (uCounterReg == 0)
1064 {
1065 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1066 return VINF_SUCCESS;
1067 }
1068
1069 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg));
1070 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
1071 uint64_t uBaseAddr;
1072 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uBaseAddr);
1073 if (rcStrict != VINF_SUCCESS)
1074 return rcStrict;
1075
1076 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1077 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1078
1079 /*
1080 * The loop.
1081 */
1082 for (;;)
1083 {
1084 /*
1085 * Do segmentation and virtual page stuff.
1086 */
1087 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1088 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1089 if (cLeftPage > uCounterReg)
1090 cLeftPage = uCounterReg;
1091 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1092 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1093 && ( IS_64_BIT_CODE(pVCpu)
1094 || ( uAddrReg < pSrcHid->u32Limit
1095 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
1096 )
1097 )
1098 {
1099 RTGCPHYS GCPhysMem;
1100 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1101 if (rcStrict != VINF_SUCCESS)
1102 return rcStrict;
1103
1104 /*
1105 * If we can map the page without trouble, we can get away with
1106 * just reading the last value on the page.
1107 */
1108 PGMPAGEMAPLOCK PgLockMem;
1109 OP_TYPE const *puMem;
1110 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1111 if (rcStrict == VINF_SUCCESS)
1112 {
1113 /* Only get the last byte, the rest doesn't matter in direct access mode. */
1114#if OP_SIZE == 32
1115 pVCpu->cpum.GstCtx.rax = puMem[cLeftPage - 1];
1116#else
1117 pVCpu->cpum.GstCtx.OP_rAX = puMem[cLeftPage - 1];
1118#endif
1119 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
1120 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
1121 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1122
1123 if (uCounterReg == 0)
1124 break;
1125
1126 /* If unaligned, we drop thru and do the page crossing access
1127 below. Otherwise, do the next page. */
1128 if (!(uVirtAddr & (OP_SIZE - 1)))
1129 {
1130 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1131 continue;
1132 }
1133 cLeftPage = 0;
1134 }
1135 }
1136
1137 /*
1138 * Fallback - slow processing till the end of the current page.
1139 * In the cross page boundrary case we will end up here with cLeftPage
1140 * as 0, we execute one loop then.
1141 */
1142 do
1143 {
1144 OP_TYPE uTmpValue;
1145 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, iEffSeg, uAddrReg);
1146 if (rcStrict != VINF_SUCCESS)
1147 return rcStrict;
1148#if OP_SIZE == 32
1149 pVCpu->cpum.GstCtx.rax = uTmpValue;
1150#else
1151 pVCpu->cpum.GstCtx.OP_rAX = uTmpValue;
1152#endif
1153 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1154 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1155 cLeftPage--;
1156 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1157 } while ((int32_t)cLeftPage > 0);
1158
1159 if (rcStrict != VINF_SUCCESS)
1160 break;
1161
1162 /*
1163 * Next page. Must check for interrupts and stuff here.
1164 */
1165 if (uCounterReg == 0)
1166 break;
1167 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1168 }
1169
1170 /*
1171 * Done.
1172 */
1173 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1174 return VINF_SUCCESS;
1175}
1176
1177
1178#if OP_SIZE != 64
1179
1180/**
1181 * Implements 'INS' (no rep)
1182 */
1183IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1184{
1185 PVM pVM = pVCpu->CTX_SUFF(pVM);
1186 VBOXSTRICTRC rcStrict;
1187
1188 /*
1189 * Be careful with handle bypassing.
1190 */
1191 if (pVCpu->iem.s.fBypassHandlers)
1192 {
1193 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1194 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1195 }
1196
1197 /*
1198 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1199 * segmentation and finally any #PF due to virtual address translation.
1200 * ASSUMES nothing is read from the I/O port before traps are taken.
1201 */
1202 if (!fIoChecked)
1203 {
1204 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1205 if (rcStrict != VINF_SUCCESS)
1206 return rcStrict;
1207 }
1208
1209 /*
1210 * Check nested-guest I/O intercepts.
1211 */
1212#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1213 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1214 {
1215 VMXEXITINSTRINFO ExitInstrInfo;
1216 ExitInstrInfo.u = 0;
1217 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1218 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1219 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1220 ExitInstrInfo, cbInstr);
1221 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1222 return rcStrict;
1223 }
1224#endif
1225
1226#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1227 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1228 {
1229 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES,
1230 false /* fRep */, true /* fStrIo */, cbInstr);
1231 if (rcStrict == VINF_SVM_VMEXIT)
1232 return VINF_SUCCESS;
1233 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1234 {
1235 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1236 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1237 return rcStrict;
1238 }
1239 }
1240#endif
1241
1242 OP_TYPE *puMem;
1243 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI, IEM_ACCESS_DATA_W);
1244 if (rcStrict != VINF_SUCCESS)
1245 return rcStrict;
1246
1247 uint32_t u32Value = 0;
1248 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, &u32Value, OP_SIZE / 8);
1249 if (IOM_SUCCESS(rcStrict))
1250 {
1251 *puMem = (OP_TYPE)u32Value;
1252# ifdef IN_RING3
1253 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1254# else
1255 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1256# endif
1257 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1258 {
1259 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1260 pVCpu->cpum.GstCtx.ADDR_rDI += OP_SIZE / 8;
1261 else
1262 pVCpu->cpum.GstCtx.ADDR_rDI -= OP_SIZE / 8;
1263 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1264 }
1265 else
1266 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1267 }
1268 return rcStrict;
1269}
1270
1271
1272/**
1273 * Implements 'REP INS'.
1274 */
1275IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1276{
1277 PVM pVM = pVCpu->CTX_SUFF(pVM);
1278
1279 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR);
1280
1281 /*
1282 * Setup.
1283 */
1284 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1285 VBOXSTRICTRC rcStrict;
1286 if (!fIoChecked)
1287 {
1288/** @todo check if this is too early for ecx=0. */
1289 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1290 if (rcStrict != VINF_SUCCESS)
1291 return rcStrict;
1292 }
1293
1294 /*
1295 * Check nested-guest I/O intercepts.
1296 */
1297#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1298 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1299 {
1300 VMXEXITINSTRINFO ExitInstrInfo;
1301 ExitInstrInfo.u = 0;
1302 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1303 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1304 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1305 ExitInstrInfo, cbInstr);
1306 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1307 return rcStrict;
1308 }
1309#endif
1310
1311#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1312 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1313 {
1314 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
1315 true /* fStrIo */, cbInstr);
1316 if (rcStrict == VINF_SVM_VMEXIT)
1317 return VINF_SUCCESS;
1318 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1319 {
1320 Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1321 VBOXSTRICTRC_VAL(rcStrict)));
1322 return rcStrict;
1323 }
1324 }
1325#endif
1326
1327 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1328 if (uCounterReg == 0)
1329 {
1330 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1331 return VINF_SUCCESS;
1332 }
1333
1334 uint64_t uBaseAddr;
1335 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
1336 if (rcStrict != VINF_SUCCESS)
1337 return rcStrict;
1338
1339 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1340 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
1341
1342 /*
1343 * Be careful with handle bypassing.
1344 */
1345 if (pVCpu->iem.s.fBypassHandlers)
1346 {
1347 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1348 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1349 }
1350
1351 /*
1352 * The loop.
1353 */
1354 for (;;)
1355 {
1356 /*
1357 * Do segmentation and virtual page stuff.
1358 */
1359 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1360 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1361 if (cLeftPage > uCounterReg)
1362 cLeftPage = uCounterReg;
1363 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1364 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1365 && ( IS_64_BIT_CODE(pVCpu)
1366 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
1367 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
1368 )
1369 )
1370 {
1371 RTGCPHYS GCPhysMem;
1372 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1373 if (rcStrict != VINF_SUCCESS)
1374 return rcStrict;
1375
1376 /*
1377 * If we can map the page without trouble, use the IOM
1378 * string I/O interface to do the work.
1379 */
1380 PGMPAGEMAPLOCK PgLockMem;
1381 OP_TYPE *puMem;
1382 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1383 if (rcStrict == VINF_SUCCESS)
1384 {
1385 uint32_t cTransfers = cLeftPage;
1386 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1387
1388 uint32_t cActualTransfers = cLeftPage - cTransfers;
1389 Assert(cActualTransfers <= cLeftPage);
1390 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1391 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1392 puMem += cActualTransfers;
1393
1394 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1395
1396 if (rcStrict != VINF_SUCCESS)
1397 {
1398 if (IOM_SUCCESS(rcStrict))
1399 {
1400 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1401 if (uCounterReg == 0)
1402 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1403 }
1404 return rcStrict;
1405 }
1406
1407 /* If unaligned, we drop thru and do the page crossing access
1408 below. Otherwise, do the next page. */
1409 if (uCounterReg == 0)
1410 break;
1411 if (!(uVirtAddr & (OP_SIZE - 1)))
1412 {
1413 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1414 continue;
1415 }
1416 cLeftPage = 0;
1417 }
1418 }
1419
1420 /*
1421 * Fallback - slow processing till the end of the current page.
1422 * In the cross page boundrary case we will end up here with cLeftPage
1423 * as 0, we execute one loop then.
1424 *
1425 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1426 * I/O port, otherwise it wouldn't really be restartable.
1427 */
1428 /** @todo investigate what the CPU actually does with \#PF/\#GP
1429 * during INS. */
1430 do
1431 {
1432 OP_TYPE *puMem;
1433 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1434 if (rcStrict != VINF_SUCCESS)
1435 return rcStrict;
1436
1437 uint32_t u32Value = 0;
1438 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1439 if (!IOM_SUCCESS(rcStrict))
1440 {
1441 iemMemRollback(pVCpu);
1442 return rcStrict;
1443 }
1444
1445 *puMem = (OP_TYPE)u32Value;
1446# ifdef IN_RING3
1447 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1448# else
1449 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1450# endif
1451 if (rcStrict2 == VINF_SUCCESS)
1452 { /* likely */ }
1453 else
1454 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1455 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1456
1457 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1458 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1459
1460 cLeftPage--;
1461 if (rcStrict != VINF_SUCCESS)
1462 {
1463 if (uCounterReg == 0)
1464 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1465 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1466 return rcStrict;
1467 }
1468
1469 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1470 } while ((int32_t)cLeftPage > 0);
1471
1472
1473 /*
1474 * Next page. Must check for interrupts and stuff here.
1475 */
1476 if (uCounterReg == 0)
1477 break;
1478 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1479 }
1480
1481 /*
1482 * Done.
1483 */
1484 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1485 return VINF_SUCCESS;
1486}
1487
1488
1489/**
1490 * Implements 'OUTS' (no rep)
1491 */
1492IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1493{
1494 PVM pVM = pVCpu->CTX_SUFF(pVM);
1495 VBOXSTRICTRC rcStrict;
1496
1497 /*
1498 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1499 * segmentation and finally any #PF due to virtual address translation.
1500 * ASSUMES nothing is read from the I/O port before traps are taken.
1501 */
1502 if (!fIoChecked)
1503 {
1504 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1505 if (rcStrict != VINF_SUCCESS)
1506 return rcStrict;
1507 }
1508
1509 /*
1510 * Check nested-guest I/O intercepts.
1511 */
1512#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1513 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1514 {
1515 VMXEXITINSTRINFO ExitInstrInfo;
1516 ExitInstrInfo.u = 0;
1517 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1518 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1519 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1520 ExitInstrInfo, cbInstr);
1521 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1522 return rcStrict;
1523 }
1524#endif
1525
1526#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1527 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1528 {
1529 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg,
1530 false /* fRep */, true /* fStrIo */, cbInstr);
1531 if (rcStrict == VINF_SVM_VMEXIT)
1532 return VINF_SUCCESS;
1533 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1534 {
1535 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1536 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1537 return rcStrict;
1538 }
1539 }
1540#endif
1541
1542 OP_TYPE uValue;
1543 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pVCpu->cpum.GstCtx.ADDR_rSI);
1544 if (rcStrict == VINF_SUCCESS)
1545 {
1546 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, uValue, OP_SIZE / 8);
1547 if (IOM_SUCCESS(rcStrict))
1548 {
1549 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1550 pVCpu->cpum.GstCtx.ADDR_rSI += OP_SIZE / 8;
1551 else
1552 pVCpu->cpum.GstCtx.ADDR_rSI -= OP_SIZE / 8;
1553 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1554 if (rcStrict != VINF_SUCCESS)
1555 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1556 }
1557 }
1558 return rcStrict;
1559}
1560
1561
1562/**
1563 * Implements 'REP OUTS'.
1564 */
1565IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1566{
1567 PVM pVM = pVCpu->CTX_SUFF(pVM);
1568
1569 /*
1570 * Setup.
1571 */
1572 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1573 VBOXSTRICTRC rcStrict;
1574 if (!fIoChecked)
1575 {
1576/** @todo check if this is too early for ecx=0. */
1577 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1578 if (rcStrict != VINF_SUCCESS)
1579 return rcStrict;
1580 }
1581
1582 /*
1583 * Check nested-guest I/O intercepts.
1584 */
1585#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1586 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1587 {
1588 VMXEXITINSTRINFO ExitInstrInfo;
1589 ExitInstrInfo.u = 0;
1590 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1591 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1592 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1593 ExitInstrInfo, cbInstr);
1594 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1595 return rcStrict;
1596 }
1597#endif
1598
1599#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1600 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1601 {
1602 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
1603 true /* fStrIo */, cbInstr);
1604 if (rcStrict == VINF_SVM_VMEXIT)
1605 return VINF_SUCCESS;
1606 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1607 {
1608 Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1609 VBOXSTRICTRC_VAL(rcStrict)));
1610 return rcStrict;
1611 }
1612 }
1613#endif
1614
1615 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1616 if (uCounterReg == 0)
1617 {
1618 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1619 return VINF_SUCCESS;
1620 }
1621
1622 PCCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iEffSeg);
1623 uint64_t uBaseAddr;
1624 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pHid, iEffSeg, &uBaseAddr);
1625 if (rcStrict != VINF_SUCCESS)
1626 return rcStrict;
1627
1628 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1629 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1630
1631 /*
1632 * The loop.
1633 */
1634 for (;;)
1635 {
1636 /*
1637 * Do segmentation and virtual page stuff.
1638 */
1639 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1640 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1641 if (cLeftPage > uCounterReg)
1642 cLeftPage = uCounterReg;
1643 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1644 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1645 && ( IS_64_BIT_CODE(pVCpu)
1646 || ( uAddrReg < pHid->u32Limit
1647 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1648 )
1649 )
1650 {
1651 RTGCPHYS GCPhysMem;
1652 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1653 if (rcStrict != VINF_SUCCESS)
1654 return rcStrict;
1655
1656 /*
1657 * If we can map the page without trouble, we use the IOM
1658 * string I/O interface to do the job.
1659 */
1660 PGMPAGEMAPLOCK PgLockMem;
1661 OP_TYPE const *puMem;
1662 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1663 if (rcStrict == VINF_SUCCESS)
1664 {
1665 uint32_t cTransfers = cLeftPage;
1666 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1667
1668 uint32_t cActualTransfers = cLeftPage - cTransfers;
1669 Assert(cActualTransfers <= cLeftPage);
1670 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1671 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1672 puMem += cActualTransfers;
1673
1674 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1675
1676 if (rcStrict != VINF_SUCCESS)
1677 {
1678 if (IOM_SUCCESS(rcStrict))
1679 {
1680 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1681 if (uCounterReg == 0)
1682 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1683 }
1684 return rcStrict;
1685 }
1686
1687 if (uCounterReg == 0)
1688 break;
1689
1690 /* If unaligned, we drop thru and do the page crossing access
1691 below. Otherwise, do the next page. */
1692 if (!(uVirtAddr & (OP_SIZE - 1)))
1693 {
1694 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1695 continue;
1696 }
1697 cLeftPage = 0;
1698 }
1699 }
1700
1701 /*
1702 * Fallback - slow processing till the end of the current page.
1703 * In the cross page boundrary case we will end up here with cLeftPage
1704 * as 0, we execute one loop then.
1705 *
1706 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1707 * I/O port, otherwise it wouldn't really be restartable.
1708 */
1709 /** @todo investigate what the CPU actually does with \#PF/\#GP
1710 * during INS. */
1711 do
1712 {
1713 OP_TYPE uValue;
1714 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uAddrReg);
1715 if (rcStrict != VINF_SUCCESS)
1716 return rcStrict;
1717
1718 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1719 if (IOM_SUCCESS(rcStrict))
1720 {
1721 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1722 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1723 cLeftPage--;
1724 }
1725 if (rcStrict != VINF_SUCCESS)
1726 {
1727 if (IOM_SUCCESS(rcStrict))
1728 {
1729 if (uCounterReg == 0)
1730 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1731 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1732 }
1733 return rcStrict;
1734 }
1735 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1736 } while ((int32_t)cLeftPage > 0);
1737
1738
1739 /*
1740 * Next page. Must check for interrupts and stuff here.
1741 */
1742 if (uCounterReg == 0)
1743 break;
1744 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1745 }
1746
1747 /*
1748 * Done.
1749 */
1750 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1751 return VINF_SUCCESS;
1752}
1753
1754#endif /* OP_SIZE != 64-bit */
1755
1756
1757#undef OP_rAX
1758#undef OP_SIZE
1759#undef ADDR_SIZE
1760#undef ADDR_rDI
1761#undef ADDR_rSI
1762#undef ADDR_rCX
1763#undef ADDR_rIP
1764#undef ADDR2_TYPE
1765#undef ADDR_TYPE
1766#undef ADDR2_TYPE
1767#undef ADDR_VMXSTRIO
1768#undef IS_64_BIT_CODE
1769#undef IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
1770#undef IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1771#undef IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1772
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette