VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 96789

Last change on this file since 96789 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 66.6 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*******************************************************************************
30* Defined Constants And Macros *
31*******************************************************************************/
32#if OP_SIZE == 8
33# define OP_rAX al
34#elif OP_SIZE == 16
35# define OP_rAX ax
36#elif OP_SIZE == 32
37# define OP_rAX eax
38#elif OP_SIZE == 64
39# define OP_rAX rax
40#else
41# error "Bad OP_SIZE."
42#endif
43#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
44
45#if ADDR_SIZE == 16
46# define ADDR_rDI di
47# define ADDR_rSI si
48# define ADDR_rCX cx
49# define ADDR2_TYPE uint32_t
50# define ADDR_VMXSTRIO 0
51#elif ADDR_SIZE == 32
52# define ADDR_rDI edi
53# define ADDR_rSI esi
54# define ADDR_rCX ecx
55# define ADDR2_TYPE uint32_t
56# define ADDR_VMXSTRIO 1
57#elif ADDR_SIZE == 64
58# define ADDR_rDI rdi
59# define ADDR_rSI rsi
60# define ADDR_rCX rcx
61# define ADDR2_TYPE uint64_t
62# define ADDR_VMXSTRIO 2
63# define IS_64_BIT_CODE(a_pVCpu) (true)
64#else
65# error "Bad ADDR_SIZE."
66#endif
67#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
68
69#if ADDR_SIZE == 64 || OP_SIZE == 64
70# define IS_64_BIT_CODE(a_pVCpu) (true)
71#elif ADDR_SIZE == 32
72# define IS_64_BIT_CODE(a_pVCpu) ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT)
73#else
74# define IS_64_BIT_CODE(a_pVCpu) (false)
75#endif
76
77/** @def IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
78 * Used in the outer (page-by-page) loop to check for reasons for returnning
79 * before completing the instruction. In raw-mode we temporarily enable
80 * interrupts to let the host interrupt us. We cannot let big string operations
81 * hog the CPU, especially not in raw-mode.
82 */
83#define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
84 do { \
85 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
86 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
87 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_YIELD_REPSTR_MASK) \
88 )) \
89 { /* probable */ } \
90 else \
91 { \
92 LogFlow(("%s: Leaving early (outer)! ffcpu=%#RX64 ffvm=%#x\n", \
93 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
94 return VINF_SUCCESS; \
95 } \
96 } while (0)
97
98/** @def IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
99 * This is used in some of the inner loops to make sure we respond immediately
100 * to VMCPU_FF_IOM as well as outside requests. Use this for expensive
101 * instructions. Use IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN for
102 * ones that are typically cheap. */
103#define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
104 do { \
105 if (RT_LIKELY( ( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
106 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \
107 || (a_fExitExpr) )) \
108 { /* very likely */ } \
109 else \
110 { \
111 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 ffvm=%#x\n", \
112 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
113 return VINF_SUCCESS; \
114 } \
115 } while (0)
116
117
118/** @def IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
119 * This is used in the inner loops where
120 * IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN isn't used. It only
121 * checks the CPU FFs so that we respond immediately to the pending IOM FF
122 * (status code is hidden in IEMCPU::rcPassUp by IEM memory commit code).
123 */
124#define IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
125 do { \
126 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
127 || (a_fExitExpr) )) \
128 { /* very likely */ } \
129 else \
130 { \
131 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 (ffvm=%#x)\n", \
132 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
133 return VINF_SUCCESS; \
134 } \
135 } while (0)
136
137
138/**
139 * Implements 'REPE CMPS'.
140 */
141IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
142{
143 PVM pVM = pVCpu->CTX_SUFF(pVM);
144
145 /*
146 * Setup.
147 */
148 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
149 if (uCounterReg == 0)
150 {
151 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
152 return VINF_SUCCESS;
153 }
154
155 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
156
157 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
158 uint64_t uSrc1Base = 0; /* gcc may not be used uninitialized */
159 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
160 if (rcStrict != VINF_SUCCESS)
161 return rcStrict;
162
163 uint64_t uSrc2Base = 0; /* gcc may not be used uninitialized */
164 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
165 if (rcStrict != VINF_SUCCESS)
166 return rcStrict;
167
168 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
169 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
170 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
171 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
172
173 /*
174 * The loop.
175 */
176 for (;;)
177 {
178 /*
179 * Do segmentation and virtual page stuff.
180 */
181 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
182 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
183 uint32_t cLeftSrc1Page = (GUEST_PAGE_SIZE - (uVirtSrc1Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
184 if (cLeftSrc1Page > uCounterReg)
185 cLeftSrc1Page = uCounterReg;
186 uint32_t cLeftSrc2Page = (GUEST_PAGE_SIZE - (uVirtSrc2Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
187 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
188
189 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
190 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
191 && ( IS_64_BIT_CODE(pVCpu)
192 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
193 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
194 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
195 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
196 )
197 )
198 {
199 RTGCPHYS GCPhysSrc1Mem;
200 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
201 if (rcStrict != VINF_SUCCESS)
202 return rcStrict;
203
204 RTGCPHYS GCPhysSrc2Mem;
205 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
206 if (rcStrict != VINF_SUCCESS)
207 return rcStrict;
208
209 /*
210 * If we can map the page without trouble, do a block processing
211 * until the end of the current page.
212 */
213 PGMPAGEMAPLOCK PgLockSrc2Mem;
214 OP_TYPE const *puSrc2Mem;
215 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
216 if (rcStrict == VINF_SUCCESS)
217 {
218 PGMPAGEMAPLOCK PgLockSrc1Mem;
219 OP_TYPE const *puSrc1Mem;
220 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
221 if (rcStrict == VINF_SUCCESS)
222 {
223 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
224 {
225 /* All matches, only compare the last itme to get the right eflags. */
226 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
227 uSrc1AddrReg += cLeftPage * cbIncr;
228 uSrc2AddrReg += cLeftPage * cbIncr;
229 uCounterReg -= cLeftPage;
230 }
231 else
232 {
233 /* Some mismatch, compare each item (and keep volatile
234 memory in mind). */
235 uint32_t off = 0;
236 do
237 {
238 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
239 off++;
240 } while ( off < cLeftPage
241 && (uEFlags & X86_EFL_ZF));
242 uSrc1AddrReg += cbIncr * off;
243 uSrc2AddrReg += cbIncr * off;
244 uCounterReg -= off;
245 }
246
247 /* Update the registers before looping. */
248 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
249 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
250 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
251 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
252
253 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
254 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
255 if ( uCounterReg == 0
256 || !(uEFlags & X86_EFL_ZF))
257 break;
258 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
259 continue;
260 }
261 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
262 }
263 }
264
265 /*
266 * Fallback - slow processing till the end of the current page.
267 * In the cross page boundrary case we will end up here with cLeftPage
268 * as 0, we execute one loop then.
269 */
270 do
271 {
272 OP_TYPE uValue1;
273 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
274 if (rcStrict != VINF_SUCCESS)
275 return rcStrict;
276 OP_TYPE uValue2;
277 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
278 if (rcStrict != VINF_SUCCESS)
279 return rcStrict;
280 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
281
282 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
283 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
284 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
285 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
286 cLeftPage--;
287 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
288 } while ( (int32_t)cLeftPage > 0
289 && (uEFlags & X86_EFL_ZF));
290
291 /*
292 * Next page? Must check for interrupts and stuff here.
293 */
294 if ( uCounterReg == 0
295 || !(uEFlags & X86_EFL_ZF))
296 break;
297 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
298 }
299
300 /*
301 * Done.
302 */
303 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
304 return VINF_SUCCESS;
305}
306
307
308/**
309 * Implements 'REPNE CMPS'.
310 */
311IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
312{
313 PVM pVM = pVCpu->CTX_SUFF(pVM);
314
315 /*
316 * Setup.
317 */
318 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
319 if (uCounterReg == 0)
320 {
321 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
322 return VINF_SUCCESS;
323 }
324
325 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
326
327 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
328 uint64_t uSrc1Base = 0; /* gcc may not be used uninitialized */;
329 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
330 if (rcStrict != VINF_SUCCESS)
331 return rcStrict;
332
333 uint64_t uSrc2Base = 0; /* gcc may not be used uninitialized */
334 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
335 if (rcStrict != VINF_SUCCESS)
336 return rcStrict;
337
338 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
339 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
340 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
341 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
342
343 /*
344 * The loop.
345 */
346 for (;;)
347 {
348 /*
349 * Do segmentation and virtual page stuff.
350 */
351 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
352 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
353 uint32_t cLeftSrc1Page = (GUEST_PAGE_SIZE - (uVirtSrc1Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
354 if (cLeftSrc1Page > uCounterReg)
355 cLeftSrc1Page = uCounterReg;
356 uint32_t cLeftSrc2Page = (GUEST_PAGE_SIZE - (uVirtSrc2Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
357 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
358
359 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
360 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
361 && ( IS_64_BIT_CODE(pVCpu)
362 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
363 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
364 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
365 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
366 )
367 )
368 {
369 RTGCPHYS GCPhysSrc1Mem;
370 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
371 if (rcStrict != VINF_SUCCESS)
372 return rcStrict;
373
374 RTGCPHYS GCPhysSrc2Mem;
375 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
376 if (rcStrict != VINF_SUCCESS)
377 return rcStrict;
378
379 /*
380 * If we can map the page without trouble, do a block processing
381 * until the end of the current page.
382 */
383 OP_TYPE const *puSrc2Mem;
384 PGMPAGEMAPLOCK PgLockSrc2Mem;
385 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
386 if (rcStrict == VINF_SUCCESS)
387 {
388 OP_TYPE const *puSrc1Mem;
389 PGMPAGEMAPLOCK PgLockSrc1Mem;
390 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
391 if (rcStrict == VINF_SUCCESS)
392 {
393 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
394 {
395 /* All matches, only compare the last item to get the right eflags. */
396 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
397 uSrc1AddrReg += cLeftPage * cbIncr;
398 uSrc2AddrReg += cLeftPage * cbIncr;
399 uCounterReg -= cLeftPage;
400 }
401 else
402 {
403 /* Some mismatch, compare each item (and keep volatile
404 memory in mind). */
405 uint32_t off = 0;
406 do
407 {
408 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
409 off++;
410 } while ( off < cLeftPage
411 && !(uEFlags & X86_EFL_ZF));
412 uSrc1AddrReg += cbIncr * off;
413 uSrc2AddrReg += cbIncr * off;
414 uCounterReg -= off;
415 }
416
417 /* Update the registers before looping. */
418 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
419 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
420 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
421 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
422
423 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
424 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
425 if ( uCounterReg == 0
426 || (uEFlags & X86_EFL_ZF))
427 break;
428 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
429 continue;
430 }
431 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
432 }
433 }
434
435 /*
436 * Fallback - slow processing till the end of the current page.
437 * In the cross page boundrary case we will end up here with cLeftPage
438 * as 0, we execute one loop then.
439 */
440 do
441 {
442 OP_TYPE uValue1;
443 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
444 if (rcStrict != VINF_SUCCESS)
445 return rcStrict;
446 OP_TYPE uValue2;
447 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
448 if (rcStrict != VINF_SUCCESS)
449 return rcStrict;
450 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
451
452 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
453 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
454 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
455 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
456 cLeftPage--;
457 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
458 } while ( (int32_t)cLeftPage > 0
459 && !(uEFlags & X86_EFL_ZF));
460
461 /*
462 * Next page? Must check for interrupts and stuff here.
463 */
464 if ( uCounterReg == 0
465 || (uEFlags & X86_EFL_ZF))
466 break;
467 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
468 }
469
470 /*
471 * Done.
472 */
473 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
474 return VINF_SUCCESS;
475}
476
477
478/**
479 * Implements 'REPE SCAS'.
480 */
481IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
482{
483 PVM pVM = pVCpu->CTX_SUFF(pVM);
484
485 /*
486 * Setup.
487 */
488 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
489 if (uCounterReg == 0)
490 {
491 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
492 return VINF_SUCCESS;
493 }
494
495 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
496 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
497 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
498 if (rcStrict != VINF_SUCCESS)
499 return rcStrict;
500
501 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
502 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
503 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
504 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
505
506 /*
507 * The loop.
508 */
509 for (;;)
510 {
511 /*
512 * Do segmentation and virtual page stuff.
513 */
514 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
515 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
516 if (cLeftPage > uCounterReg)
517 cLeftPage = uCounterReg;
518 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
519 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
520 && ( IS_64_BIT_CODE(pVCpu)
521 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
522 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
523 )
524 )
525 {
526 RTGCPHYS GCPhysMem;
527 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
528 if (rcStrict != VINF_SUCCESS)
529 return rcStrict;
530
531 /*
532 * If we can map the page without trouble, do a block processing
533 * until the end of the current page.
534 */
535 PGMPAGEMAPLOCK PgLockMem;
536 OP_TYPE const *puMem;
537 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
538 if (rcStrict == VINF_SUCCESS)
539 {
540 /* Search till we find a mismatching item. */
541 OP_TYPE uTmpValue;
542 bool fQuit;
543 uint32_t i = 0;
544 do
545 {
546 uTmpValue = puMem[i++];
547 fQuit = uTmpValue != uValueReg;
548 } while (i < cLeftPage && !fQuit);
549
550 /* Update the regs. */
551 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
552 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
553 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
554 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
555 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
556 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
557 if ( fQuit
558 || uCounterReg == 0)
559 break;
560
561 /* If unaligned, we drop thru and do the page crossing access
562 below. Otherwise, do the next page. */
563 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
564 {
565 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
566 continue;
567 }
568 cLeftPage = 0;
569 }
570 }
571
572 /*
573 * Fallback - slow processing till the end of the current page.
574 * In the cross page boundrary case we will end up here with cLeftPage
575 * as 0, we execute one loop then.
576 */
577 do
578 {
579 OP_TYPE uTmpValue;
580 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
581 if (rcStrict != VINF_SUCCESS)
582 return rcStrict;
583 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
584
585 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
586 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
587 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
588 cLeftPage--;
589 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
590 } while ( (int32_t)cLeftPage > 0
591 && (uEFlags & X86_EFL_ZF));
592
593 /*
594 * Next page? Must check for interrupts and stuff here.
595 */
596 if ( uCounterReg == 0
597 || !(uEFlags & X86_EFL_ZF))
598 break;
599 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
600 }
601
602 /*
603 * Done.
604 */
605 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
606 return VINF_SUCCESS;
607}
608
609
610/**
611 * Implements 'REPNE SCAS'.
612 */
613IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
614{
615 PVM pVM = pVCpu->CTX_SUFF(pVM);
616
617 /*
618 * Setup.
619 */
620 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
621 if (uCounterReg == 0)
622 {
623 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
624 return VINF_SUCCESS;
625 }
626
627 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
628 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
629 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
630 if (rcStrict != VINF_SUCCESS)
631 return rcStrict;
632
633 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
634 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
635 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
636 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
637
638 /*
639 * The loop.
640 */
641 for (;;)
642 {
643 /*
644 * Do segmentation and virtual page stuff.
645 */
646 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
647 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
648 if (cLeftPage > uCounterReg)
649 cLeftPage = uCounterReg;
650 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
651 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
652 && ( IS_64_BIT_CODE(pVCpu)
653 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
654 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
655 )
656 )
657 {
658 RTGCPHYS GCPhysMem;
659 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
660 if (rcStrict != VINF_SUCCESS)
661 return rcStrict;
662
663 /*
664 * If we can map the page without trouble, do a block processing
665 * until the end of the current page.
666 */
667 PGMPAGEMAPLOCK PgLockMem;
668 OP_TYPE const *puMem;
669 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
670 if (rcStrict == VINF_SUCCESS)
671 {
672 /* Search till we find a mismatching item. */
673 OP_TYPE uTmpValue;
674 bool fQuit;
675 uint32_t i = 0;
676 do
677 {
678 uTmpValue = puMem[i++];
679 fQuit = uTmpValue == uValueReg;
680 } while (i < cLeftPage && !fQuit);
681
682 /* Update the regs. */
683 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
684 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
685 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
686 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
687 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
688 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
689 if ( fQuit
690 || uCounterReg == 0)
691 break;
692
693 /* If unaligned, we drop thru and do the page crossing access
694 below. Otherwise, do the next page. */
695 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
696 {
697 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
698 continue;
699 }
700 cLeftPage = 0;
701 }
702 }
703
704 /*
705 * Fallback - slow processing till the end of the current page.
706 * In the cross page boundrary case we will end up here with cLeftPage
707 * as 0, we execute one loop then.
708 */
709 do
710 {
711 OP_TYPE uTmpValue;
712 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
713 if (rcStrict != VINF_SUCCESS)
714 return rcStrict;
715 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
716 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
717 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
718 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
719 cLeftPage--;
720 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
721 } while ( (int32_t)cLeftPage > 0
722 && !(uEFlags & X86_EFL_ZF));
723
724 /*
725 * Next page? Must check for interrupts and stuff here.
726 */
727 if ( uCounterReg == 0
728 || (uEFlags & X86_EFL_ZF))
729 break;
730 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
731 }
732
733 /*
734 * Done.
735 */
736 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
737 return VINF_SUCCESS;
738}
739
740
741
742
743/**
744 * Implements 'REP MOVS'.
745 */
746IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
747{
748 PVM pVM = pVCpu->CTX_SUFF(pVM);
749
750 /*
751 * Setup.
752 */
753 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
754 if (uCounterReg == 0)
755 {
756 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
757 return VINF_SUCCESS;
758 }
759
760 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
761
762 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
763 uint64_t uSrcBase = 0; /* gcc may not be used uninitialized */
764 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uSrcBase);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767
768 uint64_t uDstBase = 0; /* gcc may not be used uninitialized */
769 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uDstBase);
770 if (rcStrict != VINF_SUCCESS)
771 return rcStrict;
772
773 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
774 ADDR_TYPE uSrcAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
775 ADDR_TYPE uDstAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
776
777 /*
778 * Be careful with handle bypassing.
779 */
780 if (pVCpu->iem.s.fBypassHandlers)
781 {
782 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
783 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
784 }
785
786 /*
787 * The loop.
788 */
789 for (;;)
790 {
791 /*
792 * Do segmentation and virtual page stuff.
793 */
794 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
795 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
796 uint32_t cLeftSrcPage = (GUEST_PAGE_SIZE - (uVirtSrcAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
797 if (cLeftSrcPage > uCounterReg)
798 cLeftSrcPage = uCounterReg;
799 uint32_t cLeftDstPage = (GUEST_PAGE_SIZE - (uVirtDstAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
800 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
801
802 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
803 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
804 && ( IS_64_BIT_CODE(pVCpu)
805 || ( uSrcAddrReg < pSrcHid->u32Limit
806 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
807 && uDstAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
808 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
809 )
810 )
811 {
812 RTGCPHYS GCPhysSrcMem;
813 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
814 if (rcStrict != VINF_SUCCESS)
815 return rcStrict;
816
817 RTGCPHYS GCPhysDstMem;
818 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
819 if (rcStrict != VINF_SUCCESS)
820 return rcStrict;
821
822 /*
823 * If we can map the page without trouble, do a block processing
824 * until the end of the current page.
825 */
826 PGMPAGEMAPLOCK PgLockDstMem;
827 OP_TYPE *puDstMem;
828 rcStrict = iemMemPageMap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
829 if (rcStrict == VINF_SUCCESS)
830 {
831 PGMPAGEMAPLOCK PgLockSrcMem;
832 OP_TYPE const *puSrcMem;
833 rcStrict = iemMemPageMap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
834 if (rcStrict == VINF_SUCCESS)
835 {
836 Assert( (GCPhysSrcMem >> GUEST_PAGE_SHIFT) != (GCPhysDstMem >> GUEST_PAGE_SHIFT)
837 || ((uintptr_t)puSrcMem >> GUEST_PAGE_SHIFT) == ((uintptr_t)puDstMem >> GUEST_PAGE_SHIFT));
838
839 /* Perform the operation exactly (don't use memcpy to avoid
840 having to consider how its implementation would affect
841 any overlapping source and destination area). */
842 OP_TYPE const *puSrcCur = puSrcMem;
843 OP_TYPE *puDstCur = puDstMem;
844 uint32_t cTodo = cLeftPage;
845 while (cTodo-- > 0)
846 *puDstCur++ = *puSrcCur++;
847
848 /* Update the registers. */
849 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
850 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
851 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
852
853 iemMemPageUnmap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
854 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
855
856 if (uCounterReg == 0)
857 break;
858 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
859 continue;
860 }
861 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
862 }
863 }
864
865 /*
866 * Fallback - slow processing till the end of the current page.
867 * In the cross page boundrary case we will end up here with cLeftPage
868 * as 0, we execute one loop then.
869 */
870 do
871 {
872 OP_TYPE uValue;
873 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uSrcAddrReg);
874 if (rcStrict != VINF_SUCCESS)
875 return rcStrict;
876 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uDstAddrReg, uValue);
877 if (rcStrict != VINF_SUCCESS)
878 return rcStrict;
879
880 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cbIncr;
881 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cbIncr;
882 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
883 cLeftPage--;
884 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
885 } while ((int32_t)cLeftPage > 0);
886
887 /*
888 * Next page. Must check for interrupts and stuff here.
889 */
890 if (uCounterReg == 0)
891 break;
892 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
893 }
894
895 /*
896 * Done.
897 */
898 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Implements 'REP STOS'.
905 */
906IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
907{
908 PVM pVM = pVCpu->CTX_SUFF(pVM);
909
910 /*
911 * Setup.
912 */
913 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
914 if (uCounterReg == 0)
915 {
916 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
917 return VINF_SUCCESS;
918 }
919
920 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
921
922 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
923 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
924 if (rcStrict != VINF_SUCCESS)
925 return rcStrict;
926
927 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
928 OP_TYPE const uValue = pVCpu->cpum.GstCtx.OP_rAX;
929 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
930
931 /*
932 * Be careful with handle bypassing.
933 */
934 /** @todo Permit doing a page if correctly aligned. */
935 if (pVCpu->iem.s.fBypassHandlers)
936 {
937 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
938 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
939 }
940
941 /*
942 * The loop.
943 */
944 for (;;)
945 {
946 /*
947 * Do segmentation and virtual page stuff.
948 */
949 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
950 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
951 if (cLeftPage > uCounterReg)
952 cLeftPage = uCounterReg;
953 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
954 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
955 && ( IS_64_BIT_CODE(pVCpu)
956 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
957 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
958 )
959 )
960 {
961 RTGCPHYS GCPhysMem;
962 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
963 if (rcStrict != VINF_SUCCESS)
964 return rcStrict;
965
966 /*
967 * If we can map the page without trouble, do a block processing
968 * until the end of the current page.
969 */
970 PGMPAGEMAPLOCK PgLockMem;
971 OP_TYPE *puMem;
972 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
973 if (rcStrict == VINF_SUCCESS)
974 {
975 /* Update the regs first so we can loop on cLeftPage. */
976 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
977 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
978
979 /* Do the memsetting. */
980#if OP_SIZE == 8
981 memset(puMem, uValue, cLeftPage);
982/*#elif OP_SIZE == 32
983 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
984#else
985 while (cLeftPage-- > 0)
986 *puMem++ = uValue;
987#endif
988
989 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
990
991 if (uCounterReg == 0)
992 break;
993
994 /* If unaligned, we drop thru and do the page crossing access
995 below. Otherwise, do the next page. */
996 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
997 {
998 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
999 continue;
1000 }
1001 cLeftPage = 0;
1002 }
1003 /* If we got an invalid physical address in the page table, just skip
1004 ahead to the next page or the counter reaches zero. This crazy
1005 optimization is for a buggy EFI firmware that's driving me nuts. */
1006 else if (rcStrict == VERR_PGM_PHYS_TLB_UNASSIGNED)
1007 {
1008 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
1009 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
1010 if (uCounterReg == 0)
1011 break;
1012 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1013 {
1014 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1015 continue;
1016 }
1017 }
1018 }
1019
1020 /*
1021 * Fallback - slow processing till the end of the current page.
1022 * In the cross page boundrary case we will end up here with cLeftPage
1023 * as 0, we execute one loop then.
1024 */
1025 do
1026 {
1027 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uAddrReg, uValue);
1028 if (rcStrict != VINF_SUCCESS)
1029 return rcStrict;
1030 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1031 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1032 cLeftPage--;
1033 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1034 } while ((int32_t)cLeftPage > 0);
1035
1036 /*
1037 * Next page. Must check for interrupts and stuff here.
1038 */
1039 if (uCounterReg == 0)
1040 break;
1041 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1042 }
1043
1044 /*
1045 * Done.
1046 */
1047 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1048 return VINF_SUCCESS;
1049}
1050
1051
1052/**
1053 * Implements 'REP LODS'.
1054 */
1055IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
1056{
1057 PVM pVM = pVCpu->CTX_SUFF(pVM);
1058
1059 /*
1060 * Setup.
1061 */
1062 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1063 if (uCounterReg == 0)
1064 {
1065 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1066 return VINF_SUCCESS;
1067 }
1068
1069 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg));
1070 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
1071 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1072 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uBaseAddr);
1073 if (rcStrict != VINF_SUCCESS)
1074 return rcStrict;
1075
1076 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1077 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1078
1079 /*
1080 * The loop.
1081 */
1082 for (;;)
1083 {
1084 /*
1085 * Do segmentation and virtual page stuff.
1086 */
1087 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1088 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1089 if (cLeftPage > uCounterReg)
1090 cLeftPage = uCounterReg;
1091 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1092 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1093 && ( IS_64_BIT_CODE(pVCpu)
1094 || ( uAddrReg < pSrcHid->u32Limit
1095 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
1096 )
1097 )
1098 {
1099 RTGCPHYS GCPhysMem;
1100 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1101 if (rcStrict != VINF_SUCCESS)
1102 return rcStrict;
1103
1104 /*
1105 * If we can map the page without trouble, we can get away with
1106 * just reading the last value on the page.
1107 */
1108 PGMPAGEMAPLOCK PgLockMem;
1109 OP_TYPE const *puMem;
1110 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1111 if (rcStrict == VINF_SUCCESS)
1112 {
1113 /* Only get the last byte, the rest doesn't matter in direct access mode. */
1114#if OP_SIZE == 32
1115 pVCpu->cpum.GstCtx.rax = puMem[cLeftPage - 1];
1116#else
1117 pVCpu->cpum.GstCtx.OP_rAX = puMem[cLeftPage - 1];
1118#endif
1119 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
1120 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
1121 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1122
1123 if (uCounterReg == 0)
1124 break;
1125
1126 /* If unaligned, we drop thru and do the page crossing access
1127 below. Otherwise, do the next page. */
1128 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1129 {
1130 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1131 continue;
1132 }
1133 cLeftPage = 0;
1134 }
1135 }
1136
1137 /*
1138 * Fallback - slow processing till the end of the current page.
1139 * In the cross page boundrary case we will end up here with cLeftPage
1140 * as 0, we execute one loop then.
1141 */
1142 do
1143 {
1144 OP_TYPE uTmpValue;
1145 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, iEffSeg, uAddrReg);
1146 if (rcStrict != VINF_SUCCESS)
1147 return rcStrict;
1148#if OP_SIZE == 32
1149 pVCpu->cpum.GstCtx.rax = uTmpValue;
1150#else
1151 pVCpu->cpum.GstCtx.OP_rAX = uTmpValue;
1152#endif
1153 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1154 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1155 cLeftPage--;
1156 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1157 } while ((int32_t)cLeftPage > 0);
1158
1159 if (rcStrict != VINF_SUCCESS)
1160 break;
1161
1162 /*
1163 * Next page. Must check for interrupts and stuff here.
1164 */
1165 if (uCounterReg == 0)
1166 break;
1167 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1168 }
1169
1170 /*
1171 * Done.
1172 */
1173 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1174 return VINF_SUCCESS;
1175}
1176
1177
1178#if OP_SIZE != 64
1179
1180/**
1181 * Implements 'INS' (no rep)
1182 */
1183IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1184{
1185 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1186 VBOXSTRICTRC rcStrict;
1187
1188 /*
1189 * Be careful with handle bypassing.
1190 */
1191 if (pVCpu->iem.s.fBypassHandlers)
1192 {
1193 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1194 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1195 }
1196
1197 /*
1198 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1199 * segmentation and finally any #PF due to virtual address translation.
1200 * ASSUMES nothing is read from the I/O port before traps are taken.
1201 */
1202 if (!fIoChecked)
1203 {
1204 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1205 if (rcStrict != VINF_SUCCESS)
1206 return rcStrict;
1207 }
1208
1209 /*
1210 * Check nested-guest I/O intercepts.
1211 */
1212#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1213 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1214 {
1215 VMXEXITINSTRINFO ExitInstrInfo;
1216 ExitInstrInfo.u = 0;
1217 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1218 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1219 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1220 ExitInstrInfo, cbInstr);
1221 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1222 return rcStrict;
1223 }
1224#endif
1225
1226#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1227 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1228 {
1229 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES,
1230 false /* fRep */, true /* fStrIo */, cbInstr);
1231 if (rcStrict == VINF_SVM_VMEXIT)
1232 return VINF_SUCCESS;
1233 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1234 {
1235 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1236 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1237 return rcStrict;
1238 }
1239 }
1240#endif
1241
1242 OP_TYPE *puMem;
1243 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI,
1244 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
1245 if (rcStrict != VINF_SUCCESS)
1246 return rcStrict;
1247
1248 uint32_t u32Value = 0;
1249 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, &u32Value, OP_SIZE / 8);
1250 if (IOM_SUCCESS(rcStrict))
1251 {
1252 *puMem = (OP_TYPE)u32Value;
1253# ifdef IN_RING3
1254 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1255# else
1256 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1257# endif
1258 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1259 {
1260 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1261 pVCpu->cpum.GstCtx.ADDR_rDI += OP_SIZE / 8;
1262 else
1263 pVCpu->cpum.GstCtx.ADDR_rDI -= OP_SIZE / 8;
1264 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1265 }
1266 else
1267 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1268 }
1269 return rcStrict;
1270}
1271
1272
1273/**
1274 * Implements 'REP INS'.
1275 */
1276IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1277{
1278 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1279
1280 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR);
1281
1282 /*
1283 * Setup.
1284 */
1285 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1286 VBOXSTRICTRC rcStrict;
1287 if (!fIoChecked)
1288 {
1289/** @todo check if this is too early for ecx=0. */
1290 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1291 if (rcStrict != VINF_SUCCESS)
1292 return rcStrict;
1293 }
1294
1295 /*
1296 * Check nested-guest I/O intercepts.
1297 */
1298#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1299 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1300 {
1301 VMXEXITINSTRINFO ExitInstrInfo;
1302 ExitInstrInfo.u = 0;
1303 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1304 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1305 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1306 ExitInstrInfo, cbInstr);
1307 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1308 return rcStrict;
1309 }
1310#endif
1311
1312#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1313 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1314 {
1315 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
1316 true /* fStrIo */, cbInstr);
1317 if (rcStrict == VINF_SVM_VMEXIT)
1318 return VINF_SUCCESS;
1319 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1320 {
1321 Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1322 VBOXSTRICTRC_VAL(rcStrict)));
1323 return rcStrict;
1324 }
1325 }
1326#endif
1327
1328 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1329 if (uCounterReg == 0)
1330 {
1331 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1332 return VINF_SUCCESS;
1333 }
1334
1335 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1336 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
1337 if (rcStrict != VINF_SUCCESS)
1338 return rcStrict;
1339
1340 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1341 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
1342
1343 /*
1344 * Be careful with handle bypassing.
1345 */
1346 if (pVCpu->iem.s.fBypassHandlers)
1347 {
1348 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1349 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1350 }
1351
1352 /*
1353 * The loop.
1354 */
1355 for (;;)
1356 {
1357 /*
1358 * Do segmentation and virtual page stuff.
1359 */
1360 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1361 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1362 if (cLeftPage > uCounterReg)
1363 cLeftPage = uCounterReg;
1364 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1365 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1366 && ( IS_64_BIT_CODE(pVCpu)
1367 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
1368 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
1369 )
1370 )
1371 {
1372 RTGCPHYS GCPhysMem;
1373 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1374 if (rcStrict != VINF_SUCCESS)
1375 return rcStrict;
1376
1377 /*
1378 * If we can map the page without trouble, use the IOM
1379 * string I/O interface to do the work.
1380 */
1381 PGMPAGEMAPLOCK PgLockMem;
1382 OP_TYPE *puMem;
1383 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1384 if (rcStrict == VINF_SUCCESS)
1385 {
1386 uint32_t cTransfers = cLeftPage;
1387 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1388
1389 uint32_t cActualTransfers = cLeftPage - cTransfers;
1390 Assert(cActualTransfers <= cLeftPage);
1391 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1392 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1393 puMem += cActualTransfers;
1394
1395 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1396
1397 if (rcStrict != VINF_SUCCESS)
1398 {
1399 if (IOM_SUCCESS(rcStrict))
1400 {
1401 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1402 if (uCounterReg == 0)
1403 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1404 }
1405 return rcStrict;
1406 }
1407
1408 /* If unaligned, we drop thru and do the page crossing access
1409 below. Otherwise, do the next page. */
1410 if (uCounterReg == 0)
1411 break;
1412 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1413 {
1414 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1415 continue;
1416 }
1417 cLeftPage = 0;
1418 }
1419 }
1420
1421 /*
1422 * Fallback - slow processing till the end of the current page.
1423 * In the cross page boundrary case we will end up here with cLeftPage
1424 * as 0, we execute one loop then.
1425 *
1426 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1427 * I/O port, otherwise it wouldn't really be restartable.
1428 */
1429 /** @todo investigate what the CPU actually does with \#PF/\#GP
1430 * during INS. */
1431 do
1432 {
1433 OP_TYPE *puMem;
1434 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg,
1435 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
1436 if (rcStrict != VINF_SUCCESS)
1437 return rcStrict;
1438
1439 uint32_t u32Value = 0;
1440 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1441 if (!IOM_SUCCESS(rcStrict))
1442 {
1443 iemMemRollback(pVCpu);
1444 return rcStrict;
1445 }
1446
1447 *puMem = (OP_TYPE)u32Value;
1448# ifdef IN_RING3
1449 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1450# else
1451 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1452# endif
1453 if (rcStrict2 == VINF_SUCCESS)
1454 { /* likely */ }
1455 else
1456 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1457 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1458
1459 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1460 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1461
1462 cLeftPage--;
1463 if (rcStrict != VINF_SUCCESS)
1464 {
1465 if (uCounterReg == 0)
1466 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1467 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1468 return rcStrict;
1469 }
1470
1471 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1472 } while ((int32_t)cLeftPage > 0);
1473
1474
1475 /*
1476 * Next page. Must check for interrupts and stuff here.
1477 */
1478 if (uCounterReg == 0)
1479 break;
1480 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1481 }
1482
1483 /*
1484 * Done.
1485 */
1486 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1487 return VINF_SUCCESS;
1488}
1489
1490
1491/**
1492 * Implements 'OUTS' (no rep)
1493 */
1494IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1495{
1496 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1497 VBOXSTRICTRC rcStrict;
1498
1499 /*
1500 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1501 * segmentation and finally any #PF due to virtual address translation.
1502 * ASSUMES nothing is read from the I/O port before traps are taken.
1503 */
1504 if (!fIoChecked)
1505 {
1506 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1507 if (rcStrict != VINF_SUCCESS)
1508 return rcStrict;
1509 }
1510
1511 /*
1512 * Check nested-guest I/O intercepts.
1513 */
1514#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1515 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1516 {
1517 VMXEXITINSTRINFO ExitInstrInfo;
1518 ExitInstrInfo.u = 0;
1519 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1520 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1521 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1522 ExitInstrInfo, cbInstr);
1523 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1524 return rcStrict;
1525 }
1526#endif
1527
1528#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1529 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1530 {
1531 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg,
1532 false /* fRep */, true /* fStrIo */, cbInstr);
1533 if (rcStrict == VINF_SVM_VMEXIT)
1534 return VINF_SUCCESS;
1535 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1536 {
1537 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1538 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1539 return rcStrict;
1540 }
1541 }
1542#endif
1543
1544 OP_TYPE uValue;
1545 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pVCpu->cpum.GstCtx.ADDR_rSI);
1546 if (rcStrict == VINF_SUCCESS)
1547 {
1548 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, uValue, OP_SIZE / 8);
1549 if (IOM_SUCCESS(rcStrict))
1550 {
1551 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1552 pVCpu->cpum.GstCtx.ADDR_rSI += OP_SIZE / 8;
1553 else
1554 pVCpu->cpum.GstCtx.ADDR_rSI -= OP_SIZE / 8;
1555 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1556 if (rcStrict != VINF_SUCCESS)
1557 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1558 }
1559 }
1560 return rcStrict;
1561}
1562
1563
1564/**
1565 * Implements 'REP OUTS'.
1566 */
1567IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1568{
1569 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1570
1571 /*
1572 * Setup.
1573 */
1574 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1575 VBOXSTRICTRC rcStrict;
1576 if (!fIoChecked)
1577 {
1578/** @todo check if this is too early for ecx=0. */
1579 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1580 if (rcStrict != VINF_SUCCESS)
1581 return rcStrict;
1582 }
1583
1584 /*
1585 * Check nested-guest I/O intercepts.
1586 */
1587#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1588 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1589 {
1590 VMXEXITINSTRINFO ExitInstrInfo;
1591 ExitInstrInfo.u = 0;
1592 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1593 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1594 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1595 ExitInstrInfo, cbInstr);
1596 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1597 return rcStrict;
1598 }
1599#endif
1600
1601#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1602 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1603 {
1604 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
1605 true /* fStrIo */, cbInstr);
1606 if (rcStrict == VINF_SVM_VMEXIT)
1607 return VINF_SUCCESS;
1608 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1609 {
1610 Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1611 VBOXSTRICTRC_VAL(rcStrict)));
1612 return rcStrict;
1613 }
1614 }
1615#endif
1616
1617 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1618 if (uCounterReg == 0)
1619 {
1620 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1621 return VINF_SUCCESS;
1622 }
1623
1624 PCCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iEffSeg);
1625 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1626 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pHid, iEffSeg, &uBaseAddr);
1627 if (rcStrict != VINF_SUCCESS)
1628 return rcStrict;
1629
1630 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1631 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1632
1633 /*
1634 * The loop.
1635 */
1636 for (;;)
1637 {
1638 /*
1639 * Do segmentation and virtual page stuff.
1640 */
1641 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1642 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1643 if (cLeftPage > uCounterReg)
1644 cLeftPage = uCounterReg;
1645 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1646 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1647 && ( IS_64_BIT_CODE(pVCpu)
1648 || ( uAddrReg < pHid->u32Limit
1649 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1650 )
1651 )
1652 {
1653 RTGCPHYS GCPhysMem;
1654 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1655 if (rcStrict != VINF_SUCCESS)
1656 return rcStrict;
1657
1658 /*
1659 * If we can map the page without trouble, we use the IOM
1660 * string I/O interface to do the job.
1661 */
1662 PGMPAGEMAPLOCK PgLockMem;
1663 OP_TYPE const *puMem;
1664 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1665 if (rcStrict == VINF_SUCCESS)
1666 {
1667 uint32_t cTransfers = cLeftPage;
1668 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1669
1670 uint32_t cActualTransfers = cLeftPage - cTransfers;
1671 Assert(cActualTransfers <= cLeftPage);
1672 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1673 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1674 puMem += cActualTransfers;
1675
1676 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1677
1678 if (rcStrict != VINF_SUCCESS)
1679 {
1680 if (IOM_SUCCESS(rcStrict))
1681 {
1682 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1683 if (uCounterReg == 0)
1684 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1685 }
1686 return rcStrict;
1687 }
1688
1689 if (uCounterReg == 0)
1690 break;
1691
1692 /* If unaligned, we drop thru and do the page crossing access
1693 below. Otherwise, do the next page. */
1694 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1695 {
1696 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1697 continue;
1698 }
1699 cLeftPage = 0;
1700 }
1701 }
1702
1703 /*
1704 * Fallback - slow processing till the end of the current page.
1705 * In the cross page boundrary case we will end up here with cLeftPage
1706 * as 0, we execute one loop then.
1707 *
1708 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1709 * I/O port, otherwise it wouldn't really be restartable.
1710 */
1711 /** @todo investigate what the CPU actually does with \#PF/\#GP
1712 * during INS. */
1713 do
1714 {
1715 OP_TYPE uValue;
1716 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uAddrReg);
1717 if (rcStrict != VINF_SUCCESS)
1718 return rcStrict;
1719
1720 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1721 if (IOM_SUCCESS(rcStrict))
1722 {
1723 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1724 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1725 cLeftPage--;
1726 }
1727 if (rcStrict != VINF_SUCCESS)
1728 {
1729 if (IOM_SUCCESS(rcStrict))
1730 {
1731 if (uCounterReg == 0)
1732 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1733 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1734 }
1735 return rcStrict;
1736 }
1737 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1738 } while ((int32_t)cLeftPage > 0);
1739
1740
1741 /*
1742 * Next page. Must check for interrupts and stuff here.
1743 */
1744 if (uCounterReg == 0)
1745 break;
1746 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1747 }
1748
1749 /*
1750 * Done.
1751 */
1752 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1753 return VINF_SUCCESS;
1754}
1755
1756#endif /* OP_SIZE != 64-bit */
1757
1758
1759#undef OP_rAX
1760#undef OP_SIZE
1761#undef ADDR_SIZE
1762#undef ADDR_rDI
1763#undef ADDR_rSI
1764#undef ADDR_rCX
1765#undef ADDR_rIP
1766#undef ADDR2_TYPE
1767#undef ADDR_TYPE
1768#undef ADDR2_TYPE
1769#undef ADDR_VMXSTRIO
1770#undef IS_64_BIT_CODE
1771#undef IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
1772#undef IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1773#undef IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1774
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette