VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 37084

Last change on this file since 37084 was 37084, checked in by vboxsync, 14 years ago

IEM: xadd

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 392.1 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 37084 2011-05-13 19:53:02Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 IEM_MC_ADVANCE_RIP();
133 IEM_MC_END();
134 break;
135
136 case IEMMODE_64BIT:
137 IEM_MC_BEGIN(3, 0);
138 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
139 IEM_MC_ARG(uint64_t, u64Src, 1);
140 IEM_MC_ARG(uint32_t *, pEFlags, 2);
141
142 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
143 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
144 IEM_MC_REF_EFLAGS(pEFlags);
145 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
146
147 IEM_MC_ADVANCE_RIP();
148 IEM_MC_END();
149 break;
150 }
151 }
152 else
153 {
154 /*
155 * We're accessing memory.
156 * Note! We're putting the eflags on the stack here so we can commit them
157 * after the memory.
158 */
159 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
160 switch (pIemCpu->enmEffOpSize)
161 {
162 case IEMMODE_16BIT:
163 IEM_MC_BEGIN(3, 2);
164 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
165 IEM_MC_ARG(uint16_t, u16Src, 1);
166 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
167 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
168
169 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
170 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
171 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
172 IEM_MC_FETCH_EFLAGS(EFlags);
173 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
174 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
175 else
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
177
178 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
179 IEM_MC_COMMIT_EFLAGS(EFlags);
180 IEM_MC_ADVANCE_RIP();
181 IEM_MC_END();
182 break;
183
184 case IEMMODE_32BIT:
185 IEM_MC_BEGIN(3, 2);
186 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
187 IEM_MC_ARG(uint32_t, u32Src, 1);
188 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
189 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
190
191 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
192 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
193 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
194 IEM_MC_FETCH_EFLAGS(EFlags);
195 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
196 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
197 else
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
199
200 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
201 IEM_MC_COMMIT_EFLAGS(EFlags);
202 IEM_MC_ADVANCE_RIP();
203 IEM_MC_END();
204 break;
205
206 case IEMMODE_64BIT:
207 IEM_MC_BEGIN(3, 2);
208 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
209 IEM_MC_ARG(uint64_t, u64Src, 1);
210 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
211 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
212
213 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
214 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
215 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
216 IEM_MC_FETCH_EFLAGS(EFlags);
217 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
218 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
219 else
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
221
222 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
223 IEM_MC_COMMIT_EFLAGS(EFlags);
224 IEM_MC_ADVANCE_RIP();
225 IEM_MC_END();
226 break;
227 }
228 }
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
235 * the destination.
236 *
237 * @param pImpl Pointer to the instruction implementation (assembly).
238 */
239FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
240{
241 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
242 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
243
244 /*
245 * If rm is denoting a register, no more instruction bytes.
246 */
247 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
248 {
249 IEM_MC_BEGIN(3, 0);
250 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
251 IEM_MC_ARG(uint8_t, u8Src, 1);
252 IEM_MC_ARG(uint32_t *, pEFlags, 2);
253
254 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
255 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
256 IEM_MC_REF_EFLAGS(pEFlags);
257 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
258
259 IEM_MC_ADVANCE_RIP();
260 IEM_MC_END();
261 }
262 else
263 {
264 /*
265 * We're accessing memory.
266 */
267 IEM_MC_BEGIN(3, 1);
268 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
269 IEM_MC_ARG(uint8_t, u8Src, 1);
270 IEM_MC_ARG(uint32_t *, pEFlags, 2);
271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
272
273 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
274 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
275 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
276 IEM_MC_REF_EFLAGS(pEFlags);
277 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
278
279 IEM_MC_ADVANCE_RIP();
280 IEM_MC_END();
281 }
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
288 * register as the destination.
289 *
290 * @param pImpl Pointer to the instruction implementation (assembly).
291 */
292FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
293{
294 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
295 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
296
297 /*
298 * If rm is denoting a register, no more instruction bytes.
299 */
300 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
301 {
302 switch (pIemCpu->enmEffOpSize)
303 {
304 case IEMMODE_16BIT:
305 IEM_MC_BEGIN(3, 0);
306 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
307 IEM_MC_ARG(uint16_t, u16Src, 1);
308 IEM_MC_ARG(uint32_t *, pEFlags, 2);
309
310 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
311 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
312 IEM_MC_REF_EFLAGS(pEFlags);
313 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
314
315 IEM_MC_ADVANCE_RIP();
316 IEM_MC_END();
317 break;
318
319 case IEMMODE_32BIT:
320 IEM_MC_BEGIN(3, 0);
321 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
322 IEM_MC_ARG(uint32_t, u32Src, 1);
323 IEM_MC_ARG(uint32_t *, pEFlags, 2);
324
325 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
326 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
327 IEM_MC_REF_EFLAGS(pEFlags);
328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
329
330 IEM_MC_ADVANCE_RIP();
331 IEM_MC_END();
332 break;
333
334 case IEMMODE_64BIT:
335 IEM_MC_BEGIN(3, 0);
336 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
337 IEM_MC_ARG(uint64_t, u64Src, 1);
338 IEM_MC_ARG(uint32_t *, pEFlags, 2);
339
340 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
341 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
342 IEM_MC_REF_EFLAGS(pEFlags);
343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
344
345 IEM_MC_ADVANCE_RIP();
346 IEM_MC_END();
347 break;
348 }
349 }
350 else
351 {
352 /*
353 * We're accessing memory.
354 */
355 switch (pIemCpu->enmEffOpSize)
356 {
357 case IEMMODE_16BIT:
358 IEM_MC_BEGIN(3, 1);
359 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
360 IEM_MC_ARG(uint16_t, u16Src, 1);
361 IEM_MC_ARG(uint32_t *, pEFlags, 2);
362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
363
364 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
365 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
366 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
367 IEM_MC_REF_EFLAGS(pEFlags);
368 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
369
370 IEM_MC_ADVANCE_RIP();
371 IEM_MC_END();
372 break;
373
374 case IEMMODE_32BIT:
375 IEM_MC_BEGIN(3, 1);
376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
377 IEM_MC_ARG(uint32_t, u32Src, 1);
378 IEM_MC_ARG(uint32_t *, pEFlags, 2);
379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
380
381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
382 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
383 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
384 IEM_MC_REF_EFLAGS(pEFlags);
385 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
386
387 IEM_MC_ADVANCE_RIP();
388 IEM_MC_END();
389 break;
390
391 case IEMMODE_64BIT:
392 IEM_MC_BEGIN(3, 1);
393 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
394 IEM_MC_ARG(uint64_t, u64Src, 1);
395 IEM_MC_ARG(uint32_t *, pEFlags, 2);
396 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
397
398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
399 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
400 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
401 IEM_MC_REF_EFLAGS(pEFlags);
402 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
403
404 IEM_MC_ADVANCE_RIP();
405 IEM_MC_END();
406 break;
407 }
408 }
409 return VINF_SUCCESS;
410}
411
412
413/**
414 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
415 * a byte immediate.
416 *
417 * @param pImpl Pointer to the instruction implementation (assembly).
418 */
419FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
420{
421 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
422 IEMOP_HLP_NO_LOCK_PREFIX();
423
424 IEM_MC_BEGIN(3, 0);
425 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
426 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
427 IEM_MC_ARG(uint32_t *, pEFlags, 2);
428
429 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
430 IEM_MC_REF_EFLAGS(pEFlags);
431 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
432
433 IEM_MC_ADVANCE_RIP();
434 IEM_MC_END();
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Common worker for instructions like ADD, AND, OR, ++ with working on
441 * AX/EAX/RAX with a word/dword immediate.
442 *
443 * @param pImpl Pointer to the instruction implementation (assembly).
444 */
445FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
446{
447 switch (pIemCpu->enmEffOpSize)
448 {
449 case IEMMODE_16BIT:
450 {
451 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
452 IEMOP_HLP_NO_LOCK_PREFIX();
453
454 IEM_MC_BEGIN(3, 0);
455 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
456 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
457 IEM_MC_ARG(uint32_t *, pEFlags, 2);
458
459 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
460 IEM_MC_REF_EFLAGS(pEFlags);
461 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
462
463 IEM_MC_ADVANCE_RIP();
464 IEM_MC_END();
465 return VINF_SUCCESS;
466 }
467
468 case IEMMODE_32BIT:
469 {
470 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
471 IEMOP_HLP_NO_LOCK_PREFIX();
472
473 IEM_MC_BEGIN(3, 0);
474 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
475 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
476 IEM_MC_ARG(uint32_t *, pEFlags, 2);
477
478 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
479 IEM_MC_REF_EFLAGS(pEFlags);
480 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
481
482 IEM_MC_ADVANCE_RIP();
483 IEM_MC_END();
484 return VINF_SUCCESS;
485 }
486
487 case IEMMODE_64BIT:
488 {
489 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
490 IEMOP_HLP_NO_LOCK_PREFIX();
491
492 IEM_MC_BEGIN(3, 0);
493 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
494 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
495 IEM_MC_ARG(uint32_t *, pEFlags, 2);
496
497 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
498 IEM_MC_REF_EFLAGS(pEFlags);
499 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
500
501 IEM_MC_ADVANCE_RIP();
502 IEM_MC_END();
503 return VINF_SUCCESS;
504 }
505
506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
507 }
508}
509
510
511/** Opcodes 0xf1, 0xd6. */
512FNIEMOP_DEF(iemOp_Invalid)
513{
514 IEMOP_MNEMONIC("Invalid");
515 return IEMOP_RAISE_INVALID_OPCODE();
516}
517
518
519
520/** @name ..... opcodes.
521 *
522 * @{
523 */
524
525/** @} */
526
527
528/** @name Two byte opcodes (first byte 0x0f).
529 *
530 * @{
531 */
532
533/** Opcode 0x0f 0x00 /0. */
534FNIEMOP_STUB_1(iemOp_Grp6_sldt, uint8_t, bRm);
535
536
537/** Opcode 0x0f 0x00 /1. */
538FNIEMOP_STUB_1(iemOp_Grp6_str, uint8_t, bRm);
539
540
541/** Opcode 0x0f 0x00 /2. */
542FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
543{
544 IEMOP_HLP_NO_LOCK_PREFIX();
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEM_MC_BEGIN(1, 0);
548 IEM_MC_ARG(uint16_t, u16Sel, 0);
549 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
550 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
551 IEM_MC_END();
552 }
553 else
554 {
555 IEM_MC_BEGIN(1, 1);
556 IEM_MC_ARG(uint16_t, u16Sel, 0);
557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
558 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
560 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
561 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
562 IEM_MC_END();
563 }
564 return VINF_SUCCESS;
565}
566
567
568/** Opcode 0x0f 0x00 /3. */
569FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
570{
571 IEMOP_HLP_NO_LOCK_PREFIX();
572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
573 {
574 IEM_MC_BEGIN(1, 0);
575 IEM_MC_ARG(uint16_t, u16Sel, 0);
576 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
577 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
578 IEM_MC_END();
579 }
580 else
581 {
582 IEM_MC_BEGIN(1, 1);
583 IEM_MC_ARG(uint16_t, u16Sel, 0);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
585 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
587 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
588 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
589 IEM_MC_END();
590 }
591 return VINF_SUCCESS;
592}
593
594
595/** Opcode 0x0f 0x00 /4. */
596FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
597
598
599/** Opcode 0x0f 0x00 /5. */
600FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
601
602
603/** Opcode 0x0f 0x00. */
604FNIEMOP_DEF(iemOp_Grp6)
605{
606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
607 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
608 {
609 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
610 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
611 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
612 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
613 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
614 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
615 case 6: return IEMOP_RAISE_INVALID_OPCODE();
616 case 7: return IEMOP_RAISE_INVALID_OPCODE();
617 IEM_NOT_REACHED_DEFAULT_CASE_RET();
618 }
619
620}
621
622
623/** Opcode 0x0f 0x01 /0. */
624FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
625{
626 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
627}
628
629
630/** Opcode 0x0f 0x01 /0. */
631FNIEMOP_DEF(iemOp_Grp7_vmcall)
632{
633 AssertFailed();
634 return IEMOP_RAISE_INVALID_OPCODE();
635}
636
637
638/** Opcode 0x0f 0x01 /0. */
639FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
640{
641 AssertFailed();
642 return IEMOP_RAISE_INVALID_OPCODE();
643}
644
645
646/** Opcode 0x0f 0x01 /0. */
647FNIEMOP_DEF(iemOp_Grp7_vmresume)
648{
649 AssertFailed();
650 return IEMOP_RAISE_INVALID_OPCODE();
651}
652
653
654/** Opcode 0x0f 0x01 /0. */
655FNIEMOP_DEF(iemOp_Grp7_vmxoff)
656{
657 AssertFailed();
658 return IEMOP_RAISE_INVALID_OPCODE();
659}
660
661
662/** Opcode 0x0f 0x01 /1. */
663FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
664{
665 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
666}
667
668
669/** Opcode 0x0f 0x01 /1. */
670FNIEMOP_DEF(iemOp_Grp7_monitor)
671{
672 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
673}
674
675
676/** Opcode 0x0f 0x01 /1. */
677FNIEMOP_DEF(iemOp_Grp7_mwait)
678{
679 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
680}
681
682
683/** Opcode 0x0f 0x01 /2. */
684FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
685{
686 IEMOP_HLP_NO_LOCK_PREFIX();
687
688 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
689 ? IEMMODE_64BIT
690 : pIemCpu->enmEffOpSize;
691 IEM_MC_BEGIN(3, 1);
692 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
693 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
694 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
695 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
696 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
697 IEM_MC_END();
698 return VINF_SUCCESS;
699}
700
701
702/** Opcode 0x0f 0x01 /2. */
703FNIEMOP_DEF(iemOp_Grp7_xgetbv)
704{
705 AssertFailed();
706 return IEMOP_RAISE_INVALID_OPCODE();
707}
708
709
710/** Opcode 0x0f 0x01 /2. */
711FNIEMOP_DEF(iemOp_Grp7_xsetbv)
712{
713 AssertFailed();
714 return IEMOP_RAISE_INVALID_OPCODE();
715}
716
717
718/** Opcode 0x0f 0x01 /3. */
719FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
720{
721 IEMOP_HLP_NO_LOCK_PREFIX();
722
723 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
724 ? IEMMODE_64BIT
725 : pIemCpu->enmEffOpSize;
726 IEM_MC_BEGIN(3, 1);
727 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
728 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
729 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
730 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
731 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
732 IEM_MC_END();
733 return VINF_SUCCESS;
734}
735
736
737/** Opcode 0x0f 0x01 /4. */
738FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
739{
740 IEMOP_HLP_NO_LOCK_PREFIX();
741 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
742 {
743 switch (pIemCpu->enmEffOpSize)
744 {
745 case IEMMODE_16BIT:
746 IEM_MC_BEGIN(0, 1);
747 IEM_MC_LOCAL(uint16_t, u16Tmp);
748 IEM_MC_FETCH_CR0_U16(u16Tmp);
749 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
750 IEM_MC_ADVANCE_RIP();
751 IEM_MC_END();
752 return VINF_SUCCESS;
753
754 case IEMMODE_32BIT:
755 IEM_MC_BEGIN(0, 1);
756 IEM_MC_LOCAL(uint32_t, u32Tmp);
757 IEM_MC_FETCH_CR0_U32(u32Tmp);
758 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
759 IEM_MC_ADVANCE_RIP();
760 IEM_MC_END();
761 return VINF_SUCCESS;
762
763 case IEMMODE_64BIT:
764 IEM_MC_BEGIN(0, 1);
765 IEM_MC_LOCAL(uint64_t, u64Tmp);
766 IEM_MC_FETCH_CR0_U64(u64Tmp);
767 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
768 IEM_MC_ADVANCE_RIP();
769 IEM_MC_END();
770 return VINF_SUCCESS;
771
772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
773 }
774 }
775 else
776 {
777 /* Ignore operand size here, memory refs are always 16-bit. */
778 IEM_MC_BEGIN(0, 2);
779 IEM_MC_LOCAL(uint16_t, u16Tmp);
780 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
781 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
782 IEM_MC_FETCH_CR0_U16(u16Tmp);
783 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
784 IEM_MC_ADVANCE_RIP();
785 IEM_MC_END();
786 return VINF_SUCCESS;
787 }
788}
789
790
791/** Opcode 0x0f 0x01 /6. */
792FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
793{
794 /* The operand size is effectively ignored, all is 16-bit and only the
795 lower 3-bits are used. */
796 IEMOP_HLP_NO_LOCK_PREFIX();
797 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
798 {
799 IEM_MC_BEGIN(1, 0);
800 IEM_MC_ARG(uint16_t, u16Tmp, 0);
801 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
802 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
803 IEM_MC_END();
804 }
805 else
806 {
807 IEM_MC_BEGIN(1, 1);
808 IEM_MC_ARG(uint16_t, u16Tmp, 0);
809 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
810 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
811 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
812 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
813 IEM_MC_END();
814 }
815 return VINF_SUCCESS;
816}
817
818
819/** Opcode 0x0f 0x01 /7. */
820FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
821{
822 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
823}
824
825
826/** Opcode 0x0f 0x01 /7. */
827FNIEMOP_DEF(iemOp_Grp7_swapgs)
828{
829 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
830}
831
832
833/** Opcode 0x0f 0x01 /7. */
834FNIEMOP_DEF(iemOp_Grp7_rdtscp)
835{
836 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
837}
838
839
840/** Opcode 0x0f 0x01. */
841FNIEMOP_DEF(iemOp_Grp7)
842{
843 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
844 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
845 {
846 case 0:
847 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
848 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
849 switch (bRm & X86_MODRM_RM_MASK)
850 {
851 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
852 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
853 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
854 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
855 }
856 return IEMOP_RAISE_INVALID_OPCODE();
857
858 case 1:
859 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
860 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
861 switch (bRm & X86_MODRM_RM_MASK)
862 {
863 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
864 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
865 }
866 return IEMOP_RAISE_INVALID_OPCODE();
867
868 case 2:
869 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
870 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
871 switch (bRm & X86_MODRM_RM_MASK)
872 {
873 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
874 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
875 }
876 return IEMOP_RAISE_INVALID_OPCODE();
877
878 case 3:
879 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
880 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
881 return IEMOP_RAISE_INVALID_OPCODE();
882
883 case 4:
884 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
885
886 case 5:
887 return IEMOP_RAISE_INVALID_OPCODE();
888
889 case 6:
890 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
891
892 case 7:
893 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
894 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
895 switch (bRm & X86_MODRM_RM_MASK)
896 {
897 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
898 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
899 }
900 return IEMOP_RAISE_INVALID_OPCODE();
901
902 IEM_NOT_REACHED_DEFAULT_CASE_RET();
903 }
904}
905
906
907/** Opcode 0x0f 0x02. */
908FNIEMOP_STUB(iemOp_lar_Gv_Ew);
909/** Opcode 0x0f 0x03. */
910FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
911/** Opcode 0x0f 0x04. */
912FNIEMOP_STUB(iemOp_syscall);
913
914
915/** Opcode 0x0f 0x05. */
916FNIEMOP_DEF(iemOp_clts)
917{
918 IEMOP_MNEMONIC("clts");
919 IEMOP_HLP_NO_LOCK_PREFIX();
920 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
921}
922
923
924/** Opcode 0x0f 0x06. */
925FNIEMOP_STUB(iemOp_sysret);
926/** Opcode 0x0f 0x08. */
927FNIEMOP_STUB(iemOp_invd);
928/** Opcode 0x0f 0x09. */
929FNIEMOP_STUB(iemOp_wbinvd);
930/** Opcode 0x0f 0x0b. */
931FNIEMOP_STUB(iemOp_ud2);
932/** Opcode 0x0f 0x0d. */
933FNIEMOP_STUB(iemOp_nop_Ev_prefetch);
934/** Opcode 0x0f 0x0e. */
935FNIEMOP_STUB(iemOp_femms);
936/** Opcode 0x0f 0x0f. */
937FNIEMOP_STUB(iemOp_3Dnow);
938/** Opcode 0x0f 0x10. */
939FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
940/** Opcode 0x0f 0x11. */
941FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
942/** Opcode 0x0f 0x12. */
943FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
944/** Opcode 0x0f 0x13. */
945FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
946/** Opcode 0x0f 0x14. */
947FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
948/** Opcode 0x0f 0x15. */
949FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
950/** Opcode 0x0f 0x16. */
951FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
952/** Opcode 0x0f 0x17. */
953FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
954/** Opcode 0x0f 0x18. */
955FNIEMOP_STUB(iemOp_prefetch_Grp16);
956
957
958/** Opcode 0x0f 0x20. */
959FNIEMOP_DEF(iemOp_mov_Rd_Cd)
960{
961 /* mod is ignored, as is operand size overrides. */
962 IEMOP_MNEMONIC("mov Rd,Cd");
963 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
964 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
965 else
966 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
967
968 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
969 * before the privilege level violation (\#GP). */
970 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
971 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
972 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
973 {
974 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
975 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
976 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
977 iCrReg |= 8;
978 }
979 switch (iCrReg)
980 {
981 case 0: case 2: case 3: case 4: case 8:
982 break;
983 default:
984 return IEMOP_RAISE_INVALID_OPCODE();
985 }
986
987 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
988}
989
990
991/** Opcode 0x0f 0x21. */
992FNIEMOP_DEF(iemOp_mov_Rd_Dd)
993{
994 IEMOP_MNEMONIC("mov Rd,Dd");
995 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
996 IEMOP_HLP_NO_LOCK_PREFIX();
997 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
998 return IEMOP_RAISE_INVALID_OPCODE();
999 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1000 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1001 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1002}
1003
1004
1005/** Opcode 0x0f 0x22. */
1006FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1007{
1008 /* mod is ignored, as is operand size overrides. */
1009 IEMOP_MNEMONIC("mov Cd,Rd");
1010 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1011 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1012 else
1013 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1014
1015 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1016 * before the privilege level violation (\#GP). */
1017 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1018 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1019 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1020 {
1021 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1022 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1023 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1024 iCrReg |= 8;
1025 }
1026 switch (iCrReg)
1027 {
1028 case 0: case 2: case 3: case 4: case 8:
1029 break;
1030 default:
1031 return IEMOP_RAISE_INVALID_OPCODE();
1032 }
1033
1034 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1035}
1036
1037
1038/** Opcode 0x0f 0x23. */
1039FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1040{
1041 IEMOP_MNEMONIC("mov Dd,Rd");
1042 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1043 IEMOP_HLP_NO_LOCK_PREFIX();
1044 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1045 return IEMOP_RAISE_INVALID_OPCODE();
1046 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1047 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1048 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1049}
1050
1051
1052/** Opcode 0x0f 0x24. */
1053FNIEMOP_DEF(iemOp_mov_Rd_Td)
1054{
1055 IEMOP_MNEMONIC("mov Rd,Td");
1056/** @todo Is the invalid opcode raise before parsing any R/M byte? */
1057 return IEMOP_RAISE_INVALID_OPCODE();
1058}
1059
1060
1061
1062/** Opcode 0x0f 0x26. */
1063FNIEMOP_DEF(iemOp_mov_Td_Rd)
1064{
1065 IEMOP_MNEMONIC("mov Td,Rd");
1066 return IEMOP_RAISE_INVALID_OPCODE();
1067}
1068
1069
1070/** Opcode 0x0f 0x28. */
1071FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1072/** Opcode 0x0f 0x29. */
1073FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1074/** Opcode 0x0f 0x2a. */
1075FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
1076/** Opcode 0x0f 0x2b. */
1077FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1078/** Opcode 0x0f 0x2c. */
1079FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
1080/** Opcode 0x0f 0x2d. */
1081FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1082/** Opcode 0x0f 0x2e. */
1083FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
1084/** Opcode 0x0f 0x2f. */
1085FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1086/** Opcode 0x0f 0x30. */
1087FNIEMOP_STUB(iemOp_wrmsr);
1088
1089
1090/** Opcode 0x0f 0x31. */
1091FNIEMOP_DEF(iemOp_rdtsc)
1092{
1093 IEMOP_MNEMONIC("rdtsc");
1094 IEMOP_HLP_NO_LOCK_PREFIX();
1095 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1096}
1097
1098
1099/** Opcode 0x0f 0x33. */
1100FNIEMOP_STUB(iemOp_rdmsr);
1101/** Opcode 0x0f 0x34. */
1102FNIEMOP_STUB(iemOp_rdpmc);
1103/** Opcode 0x0f 0x34. */
1104FNIEMOP_STUB(iemOp_sysenter);
1105/** Opcode 0x0f 0x35. */
1106FNIEMOP_STUB(iemOp_sysexit);
1107/** Opcode 0x0f 0x37. */
1108FNIEMOP_STUB(iemOp_getsec);
1109/** Opcode 0x0f 0x38. */
1110FNIEMOP_STUB(iemOp_3byte_Esc_A4);
1111/** Opcode 0x0f 0x39. */
1112FNIEMOP_STUB(iemOp_3byte_Esc_A5);
1113/** Opcode 0x0f 0x3c (?). */
1114FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1115
1116/**
1117 * Implements a conditional move.
1118 *
1119 * Wish there was an obvious way to do this where we could share and reduce
1120 * code bloat.
1121 *
1122 * @param a_Cnd The conditional "microcode" operation.
1123 */
1124#define CMOV_X(a_Cnd) \
1125 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1126 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1127 { \
1128 switch (pIemCpu->enmEffOpSize) \
1129 { \
1130 case IEMMODE_16BIT: \
1131 IEM_MC_BEGIN(0, 1); \
1132 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1133 a_Cnd { \
1134 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1135 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1136 } IEM_MC_ENDIF(); \
1137 IEM_MC_ADVANCE_RIP(); \
1138 IEM_MC_END(); \
1139 return VINF_SUCCESS; \
1140 \
1141 case IEMMODE_32BIT: \
1142 IEM_MC_BEGIN(0, 1); \
1143 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1144 a_Cnd { \
1145 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1146 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1147 } IEM_MC_ELSE() { \
1148 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1149 } IEM_MC_ENDIF(); \
1150 IEM_MC_ADVANCE_RIP(); \
1151 IEM_MC_END(); \
1152 return VINF_SUCCESS; \
1153 \
1154 case IEMMODE_64BIT: \
1155 IEM_MC_BEGIN(0, 1); \
1156 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1157 a_Cnd { \
1158 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1159 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1160 } IEM_MC_ENDIF(); \
1161 IEM_MC_ADVANCE_RIP(); \
1162 IEM_MC_END(); \
1163 return VINF_SUCCESS; \
1164 \
1165 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1166 } \
1167 } \
1168 else \
1169 { \
1170 switch (pIemCpu->enmEffOpSize) \
1171 { \
1172 case IEMMODE_16BIT: \
1173 IEM_MC_BEGIN(0, 2); \
1174 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1175 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1176 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1177 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1178 a_Cnd { \
1179 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1180 } IEM_MC_ENDIF(); \
1181 IEM_MC_ADVANCE_RIP(); \
1182 IEM_MC_END(); \
1183 return VINF_SUCCESS; \
1184 \
1185 case IEMMODE_32BIT: \
1186 IEM_MC_BEGIN(0, 2); \
1187 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1188 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1189 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1190 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1191 a_Cnd { \
1192 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1193 } IEM_MC_ELSE() { \
1194 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1195 } IEM_MC_ENDIF(); \
1196 IEM_MC_ADVANCE_RIP(); \
1197 IEM_MC_END(); \
1198 return VINF_SUCCESS; \
1199 \
1200 case IEMMODE_64BIT: \
1201 IEM_MC_BEGIN(0, 2); \
1202 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1203 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1204 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1205 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1206 a_Cnd { \
1207 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1208 } IEM_MC_ENDIF(); \
1209 IEM_MC_ADVANCE_RIP(); \
1210 IEM_MC_END(); \
1211 return VINF_SUCCESS; \
1212 \
1213 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1214 } \
1215 } do {} while (0)
1216
1217
1218
1219/** Opcode 0x0f 0x40. */
1220FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1221{
1222 IEMOP_MNEMONIC("cmovo Gv,Ev");
1223 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1224}
1225
1226
1227/** Opcode 0x0f 0x41. */
1228FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1229{
1230 IEMOP_MNEMONIC("cmovno Gv,Ev");
1231 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1232}
1233
1234
1235/** Opcode 0x0f 0x42. */
1236FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1237{
1238 IEMOP_MNEMONIC("cmovc Gv,Ev");
1239 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1240}
1241
1242
1243/** Opcode 0x0f 0x43. */
1244FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1245{
1246 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1247 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1248}
1249
1250
1251/** Opcode 0x0f 0x44. */
1252FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1253{
1254 IEMOP_MNEMONIC("cmove Gv,Ev");
1255 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1256}
1257
1258
1259/** Opcode 0x0f 0x45. */
1260FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1261{
1262 IEMOP_MNEMONIC("cmovne Gv,Ev");
1263 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1264}
1265
1266
1267/** Opcode 0x0f 0x46. */
1268FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1269{
1270 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1271 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1272}
1273
1274
1275/** Opcode 0x0f 0x47. */
1276FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1277{
1278 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1279 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1280}
1281
1282
1283/** Opcode 0x0f 0x48. */
1284FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1285{
1286 IEMOP_MNEMONIC("cmovs Gv,Ev");
1287 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1288}
1289
1290
1291/** Opcode 0x0f 0x49. */
1292FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1293{
1294 IEMOP_MNEMONIC("cmovns Gv,Ev");
1295 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1296}
1297
1298
1299/** Opcode 0x0f 0x4a. */
1300FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1301{
1302 IEMOP_MNEMONIC("cmovp Gv,Ev");
1303 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1304}
1305
1306
1307/** Opcode 0x0f 0x4b. */
1308FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1309{
1310 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1311 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1312}
1313
1314
1315/** Opcode 0x0f 0x4c. */
1316FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1317{
1318 IEMOP_MNEMONIC("cmovl Gv,Ev");
1319 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1320}
1321
1322
1323/** Opcode 0x0f 0x4d. */
1324FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1325{
1326 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1327 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1328}
1329
1330
1331/** Opcode 0x0f 0x4e. */
1332FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1333{
1334 IEMOP_MNEMONIC("cmovle Gv,Ev");
1335 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1336}
1337
1338
1339/** Opcode 0x0f 0x4f. */
1340FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1341{
1342 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1343 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1344}
1345
1346#undef CMOV_X
1347
1348/** Opcode 0x0f 0x50. */
1349FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1350/** Opcode 0x0f 0x51. */
1351FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1352/** Opcode 0x0f 0x52. */
1353FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1354/** Opcode 0x0f 0x53. */
1355FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1356/** Opcode 0x0f 0x54. */
1357FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1358/** Opcode 0x0f 0x55. */
1359FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1360/** Opcode 0x0f 0x56. */
1361FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1362/** Opcode 0x0f 0x57. */
1363FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1364/** Opcode 0x0f 0x58. */
1365FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
1366/** Opcode 0x0f 0x59. */
1367FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
1368/** Opcode 0x0f 0x5a. */
1369FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1370/** Opcode 0x0f 0x5b. */
1371FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1372/** Opcode 0x0f 0x5c. */
1373FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1374/** Opcode 0x0f 0x5d. */
1375FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1376/** Opcode 0x0f 0x5e. */
1377FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1378/** Opcode 0x0f 0x5f. */
1379FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1380/** Opcode 0x0f 0x60. */
1381FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
1382/** Opcode 0x0f 0x61. */
1383FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
1384/** Opcode 0x0f 0x62. */
1385FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
1386/** Opcode 0x0f 0x63. */
1387FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
1388/** Opcode 0x0f 0x64. */
1389FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
1390/** Opcode 0x0f 0x65. */
1391FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
1392/** Opcode 0x0f 0x66. */
1393FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
1394/** Opcode 0x0f 0x67. */
1395FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
1396/** Opcode 0x0f 0x68. */
1397FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
1398/** Opcode 0x0f 0x69. */
1399FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
1400/** Opcode 0x0f 0x6a. */
1401FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
1402/** Opcode 0x0f 0x6b. */
1403FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
1404/** Opcode 0x0f 0x6c. */
1405FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
1406/** Opcode 0x0f 0x6d. */
1407FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
1408/** Opcode 0x0f 0x6e. */
1409FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
1410/** Opcode 0x0f 0x6f. */
1411FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
1412/** Opcode 0x0f 0x70. */
1413FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
1414/** Opcode 0x0f 0x71. */
1415FNIEMOP_STUB(iemOp_Grp12);
1416/** Opcode 0x0f 0x72. */
1417FNIEMOP_STUB(iemOp_Grp13);
1418/** Opcode 0x0f 0x73. */
1419FNIEMOP_STUB(iemOp_Grp14);
1420/** Opcode 0x0f 0x74. */
1421FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
1422/** Opcode 0x0f 0x75. */
1423FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
1424/** Opcode 0x0f 0x76. */
1425FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
1426/** Opcode 0x0f 0x77. */
1427FNIEMOP_STUB(iemOp_emms);
1428/** Opcode 0x0f 0x78. */
1429FNIEMOP_STUB(iemOp_vmread);
1430/** Opcode 0x0f 0x79. */
1431FNIEMOP_STUB(iemOp_vmwrite);
1432/** Opcode 0x0f 0x7c. */
1433FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
1434/** Opcode 0x0f 0x7d. */
1435FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
1436/** Opcode 0x0f 0x7e. */
1437FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
1438/** Opcode 0x0f 0x7f. */
1439FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
1440
1441
1442/** Opcode 0x0f 0x80. */
1443FNIEMOP_DEF(iemOp_jo_Jv)
1444{
1445 IEMOP_MNEMONIC("jo Jv");
1446 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1447 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1448 {
1449 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1450 IEMOP_HLP_NO_LOCK_PREFIX();
1451
1452 IEM_MC_BEGIN(0, 0);
1453 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1454 IEM_MC_REL_JMP_S16(i16Imm);
1455 } IEM_MC_ELSE() {
1456 IEM_MC_ADVANCE_RIP();
1457 } IEM_MC_ENDIF();
1458 IEM_MC_END();
1459 }
1460 else
1461 {
1462 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1463 IEMOP_HLP_NO_LOCK_PREFIX();
1464
1465 IEM_MC_BEGIN(0, 0);
1466 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1467 IEM_MC_REL_JMP_S32(i32Imm);
1468 } IEM_MC_ELSE() {
1469 IEM_MC_ADVANCE_RIP();
1470 } IEM_MC_ENDIF();
1471 IEM_MC_END();
1472 }
1473 return VINF_SUCCESS;
1474}
1475
1476
1477/** Opcode 0x0f 0x81. */
1478FNIEMOP_DEF(iemOp_jno_Jv)
1479{
1480 IEMOP_MNEMONIC("jno Jv");
1481 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1482 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1483 {
1484 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1485 IEMOP_HLP_NO_LOCK_PREFIX();
1486
1487 IEM_MC_BEGIN(0, 0);
1488 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1489 IEM_MC_ADVANCE_RIP();
1490 } IEM_MC_ELSE() {
1491 IEM_MC_REL_JMP_S16(i16Imm);
1492 } IEM_MC_ENDIF();
1493 IEM_MC_END();
1494 }
1495 else
1496 {
1497 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1498 IEMOP_HLP_NO_LOCK_PREFIX();
1499
1500 IEM_MC_BEGIN(0, 0);
1501 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1502 IEM_MC_ADVANCE_RIP();
1503 } IEM_MC_ELSE() {
1504 IEM_MC_REL_JMP_S32(i32Imm);
1505 } IEM_MC_ENDIF();
1506 IEM_MC_END();
1507 }
1508 return VINF_SUCCESS;
1509}
1510
1511
1512/** Opcode 0x0f 0x82. */
1513FNIEMOP_DEF(iemOp_jc_Jv)
1514{
1515 IEMOP_MNEMONIC("jc/jb/jnae Jv");
1516 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1517 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1518 {
1519 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1520 IEMOP_HLP_NO_LOCK_PREFIX();
1521
1522 IEM_MC_BEGIN(0, 0);
1523 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1524 IEM_MC_REL_JMP_S16(i16Imm);
1525 } IEM_MC_ELSE() {
1526 IEM_MC_ADVANCE_RIP();
1527 } IEM_MC_ENDIF();
1528 IEM_MC_END();
1529 }
1530 else
1531 {
1532 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1533 IEMOP_HLP_NO_LOCK_PREFIX();
1534
1535 IEM_MC_BEGIN(0, 0);
1536 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1537 IEM_MC_REL_JMP_S32(i32Imm);
1538 } IEM_MC_ELSE() {
1539 IEM_MC_ADVANCE_RIP();
1540 } IEM_MC_ENDIF();
1541 IEM_MC_END();
1542 }
1543 return VINF_SUCCESS;
1544}
1545
1546
1547/** Opcode 0x0f 0x83. */
1548FNIEMOP_DEF(iemOp_jnc_Jv)
1549{
1550 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
1551 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1552 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1553 {
1554 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1555 IEMOP_HLP_NO_LOCK_PREFIX();
1556
1557 IEM_MC_BEGIN(0, 0);
1558 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1559 IEM_MC_ADVANCE_RIP();
1560 } IEM_MC_ELSE() {
1561 IEM_MC_REL_JMP_S16(i16Imm);
1562 } IEM_MC_ENDIF();
1563 IEM_MC_END();
1564 }
1565 else
1566 {
1567 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1568 IEMOP_HLP_NO_LOCK_PREFIX();
1569
1570 IEM_MC_BEGIN(0, 0);
1571 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1572 IEM_MC_ADVANCE_RIP();
1573 } IEM_MC_ELSE() {
1574 IEM_MC_REL_JMP_S32(i32Imm);
1575 } IEM_MC_ENDIF();
1576 IEM_MC_END();
1577 }
1578 return VINF_SUCCESS;
1579}
1580
1581
1582/** Opcode 0x0f 0x84. */
1583FNIEMOP_DEF(iemOp_je_Jv)
1584{
1585 IEMOP_MNEMONIC("je/jz Jv");
1586 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1587 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1588 {
1589 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1590 IEMOP_HLP_NO_LOCK_PREFIX();
1591
1592 IEM_MC_BEGIN(0, 0);
1593 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1594 IEM_MC_REL_JMP_S16(i16Imm);
1595 } IEM_MC_ELSE() {
1596 IEM_MC_ADVANCE_RIP();
1597 } IEM_MC_ENDIF();
1598 IEM_MC_END();
1599 }
1600 else
1601 {
1602 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1603 IEMOP_HLP_NO_LOCK_PREFIX();
1604
1605 IEM_MC_BEGIN(0, 0);
1606 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1607 IEM_MC_REL_JMP_S32(i32Imm);
1608 } IEM_MC_ELSE() {
1609 IEM_MC_ADVANCE_RIP();
1610 } IEM_MC_ENDIF();
1611 IEM_MC_END();
1612 }
1613 return VINF_SUCCESS;
1614}
1615
1616
1617/** Opcode 0x0f 0x85. */
1618FNIEMOP_DEF(iemOp_jne_Jv)
1619{
1620 IEMOP_MNEMONIC("jne/jnz Jv");
1621 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1622 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1623 {
1624 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1625 IEMOP_HLP_NO_LOCK_PREFIX();
1626
1627 IEM_MC_BEGIN(0, 0);
1628 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1629 IEM_MC_ADVANCE_RIP();
1630 } IEM_MC_ELSE() {
1631 IEM_MC_REL_JMP_S16(i16Imm);
1632 } IEM_MC_ENDIF();
1633 IEM_MC_END();
1634 }
1635 else
1636 {
1637 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1638 IEMOP_HLP_NO_LOCK_PREFIX();
1639
1640 IEM_MC_BEGIN(0, 0);
1641 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1642 IEM_MC_ADVANCE_RIP();
1643 } IEM_MC_ELSE() {
1644 IEM_MC_REL_JMP_S32(i32Imm);
1645 } IEM_MC_ENDIF();
1646 IEM_MC_END();
1647 }
1648 return VINF_SUCCESS;
1649}
1650
1651
1652/** Opcode 0x0f 0x86. */
1653FNIEMOP_DEF(iemOp_jbe_Jv)
1654{
1655 IEMOP_MNEMONIC("jbe/jna Jv");
1656 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1657 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1658 {
1659 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1660 IEMOP_HLP_NO_LOCK_PREFIX();
1661
1662 IEM_MC_BEGIN(0, 0);
1663 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1664 IEM_MC_REL_JMP_S16(i16Imm);
1665 } IEM_MC_ELSE() {
1666 IEM_MC_ADVANCE_RIP();
1667 } IEM_MC_ENDIF();
1668 IEM_MC_END();
1669 }
1670 else
1671 {
1672 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1673 IEMOP_HLP_NO_LOCK_PREFIX();
1674
1675 IEM_MC_BEGIN(0, 0);
1676 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1677 IEM_MC_REL_JMP_S32(i32Imm);
1678 } IEM_MC_ELSE() {
1679 IEM_MC_ADVANCE_RIP();
1680 } IEM_MC_ENDIF();
1681 IEM_MC_END();
1682 }
1683 return VINF_SUCCESS;
1684}
1685
1686
1687/** Opcode 0x0f 0x87. */
1688FNIEMOP_DEF(iemOp_jnbe_Jv)
1689{
1690 IEMOP_MNEMONIC("jnbe/ja Jv");
1691 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1692 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1693 {
1694 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1695 IEMOP_HLP_NO_LOCK_PREFIX();
1696
1697 IEM_MC_BEGIN(0, 0);
1698 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1699 IEM_MC_ADVANCE_RIP();
1700 } IEM_MC_ELSE() {
1701 IEM_MC_REL_JMP_S16(i16Imm);
1702 } IEM_MC_ENDIF();
1703 IEM_MC_END();
1704 }
1705 else
1706 {
1707 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1708 IEMOP_HLP_NO_LOCK_PREFIX();
1709
1710 IEM_MC_BEGIN(0, 0);
1711 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1712 IEM_MC_ADVANCE_RIP();
1713 } IEM_MC_ELSE() {
1714 IEM_MC_REL_JMP_S32(i32Imm);
1715 } IEM_MC_ENDIF();
1716 IEM_MC_END();
1717 }
1718 return VINF_SUCCESS;
1719}
1720
1721
1722/** Opcode 0x0f 0x88. */
1723FNIEMOP_DEF(iemOp_js_Jv)
1724{
1725 IEMOP_MNEMONIC("js Jv");
1726 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1727 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1728 {
1729 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1730 IEMOP_HLP_NO_LOCK_PREFIX();
1731
1732 IEM_MC_BEGIN(0, 0);
1733 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1734 IEM_MC_REL_JMP_S16(i16Imm);
1735 } IEM_MC_ELSE() {
1736 IEM_MC_ADVANCE_RIP();
1737 } IEM_MC_ENDIF();
1738 IEM_MC_END();
1739 }
1740 else
1741 {
1742 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1743 IEMOP_HLP_NO_LOCK_PREFIX();
1744
1745 IEM_MC_BEGIN(0, 0);
1746 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1747 IEM_MC_REL_JMP_S32(i32Imm);
1748 } IEM_MC_ELSE() {
1749 IEM_MC_ADVANCE_RIP();
1750 } IEM_MC_ENDIF();
1751 IEM_MC_END();
1752 }
1753 return VINF_SUCCESS;
1754}
1755
1756
1757/** Opcode 0x0f 0x89. */
1758FNIEMOP_DEF(iemOp_jns_Jv)
1759{
1760 IEMOP_MNEMONIC("jns Jv");
1761 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1762 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1763 {
1764 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1765 IEMOP_HLP_NO_LOCK_PREFIX();
1766
1767 IEM_MC_BEGIN(0, 0);
1768 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1769 IEM_MC_ADVANCE_RIP();
1770 } IEM_MC_ELSE() {
1771 IEM_MC_REL_JMP_S16(i16Imm);
1772 } IEM_MC_ENDIF();
1773 IEM_MC_END();
1774 }
1775 else
1776 {
1777 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1778 IEMOP_HLP_NO_LOCK_PREFIX();
1779
1780 IEM_MC_BEGIN(0, 0);
1781 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1782 IEM_MC_ADVANCE_RIP();
1783 } IEM_MC_ELSE() {
1784 IEM_MC_REL_JMP_S32(i32Imm);
1785 } IEM_MC_ENDIF();
1786 IEM_MC_END();
1787 }
1788 return VINF_SUCCESS;
1789}
1790
1791
1792/** Opcode 0x0f 0x8a. */
1793FNIEMOP_DEF(iemOp_jp_Jv)
1794{
1795 IEMOP_MNEMONIC("jp Jv");
1796 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1797 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1798 {
1799 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1800 IEMOP_HLP_NO_LOCK_PREFIX();
1801
1802 IEM_MC_BEGIN(0, 0);
1803 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1804 IEM_MC_REL_JMP_S16(i16Imm);
1805 } IEM_MC_ELSE() {
1806 IEM_MC_ADVANCE_RIP();
1807 } IEM_MC_ENDIF();
1808 IEM_MC_END();
1809 }
1810 else
1811 {
1812 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1813 IEMOP_HLP_NO_LOCK_PREFIX();
1814
1815 IEM_MC_BEGIN(0, 0);
1816 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1817 IEM_MC_REL_JMP_S32(i32Imm);
1818 } IEM_MC_ELSE() {
1819 IEM_MC_ADVANCE_RIP();
1820 } IEM_MC_ENDIF();
1821 IEM_MC_END();
1822 }
1823 return VINF_SUCCESS;
1824}
1825
1826
1827/** Opcode 0x0f 0x8b. */
1828FNIEMOP_DEF(iemOp_jnp_Jv)
1829{
1830 IEMOP_MNEMONIC("jo Jv");
1831 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1832 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1833 {
1834 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1835 IEMOP_HLP_NO_LOCK_PREFIX();
1836
1837 IEM_MC_BEGIN(0, 0);
1838 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1839 IEM_MC_ADVANCE_RIP();
1840 } IEM_MC_ELSE() {
1841 IEM_MC_REL_JMP_S16(i16Imm);
1842 } IEM_MC_ENDIF();
1843 IEM_MC_END();
1844 }
1845 else
1846 {
1847 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1848 IEMOP_HLP_NO_LOCK_PREFIX();
1849
1850 IEM_MC_BEGIN(0, 0);
1851 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1852 IEM_MC_ADVANCE_RIP();
1853 } IEM_MC_ELSE() {
1854 IEM_MC_REL_JMP_S32(i32Imm);
1855 } IEM_MC_ENDIF();
1856 IEM_MC_END();
1857 }
1858 return VINF_SUCCESS;
1859}
1860
1861
1862/** Opcode 0x0f 0x8c. */
1863FNIEMOP_DEF(iemOp_jl_Jv)
1864{
1865 IEMOP_MNEMONIC("jl/jnge Jv");
1866 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1867 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1868 {
1869 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1870 IEMOP_HLP_NO_LOCK_PREFIX();
1871
1872 IEM_MC_BEGIN(0, 0);
1873 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1874 IEM_MC_REL_JMP_S16(i16Imm);
1875 } IEM_MC_ELSE() {
1876 IEM_MC_ADVANCE_RIP();
1877 } IEM_MC_ENDIF();
1878 IEM_MC_END();
1879 }
1880 else
1881 {
1882 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1883 IEMOP_HLP_NO_LOCK_PREFIX();
1884
1885 IEM_MC_BEGIN(0, 0);
1886 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1887 IEM_MC_REL_JMP_S32(i32Imm);
1888 } IEM_MC_ELSE() {
1889 IEM_MC_ADVANCE_RIP();
1890 } IEM_MC_ENDIF();
1891 IEM_MC_END();
1892 }
1893 return VINF_SUCCESS;
1894}
1895
1896
1897/** Opcode 0x0f 0x8d. */
1898FNIEMOP_DEF(iemOp_jnl_Jv)
1899{
1900 IEMOP_MNEMONIC("jnl/jge Jv");
1901 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1902 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1903 {
1904 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1905 IEMOP_HLP_NO_LOCK_PREFIX();
1906
1907 IEM_MC_BEGIN(0, 0);
1908 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1909 IEM_MC_ADVANCE_RIP();
1910 } IEM_MC_ELSE() {
1911 IEM_MC_REL_JMP_S16(i16Imm);
1912 } IEM_MC_ENDIF();
1913 IEM_MC_END();
1914 }
1915 else
1916 {
1917 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1918 IEMOP_HLP_NO_LOCK_PREFIX();
1919
1920 IEM_MC_BEGIN(0, 0);
1921 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1922 IEM_MC_ADVANCE_RIP();
1923 } IEM_MC_ELSE() {
1924 IEM_MC_REL_JMP_S32(i32Imm);
1925 } IEM_MC_ENDIF();
1926 IEM_MC_END();
1927 }
1928 return VINF_SUCCESS;
1929}
1930
1931
1932/** Opcode 0x0f 0x8e. */
1933FNIEMOP_DEF(iemOp_jle_Jv)
1934{
1935 IEMOP_MNEMONIC("jle/jng Jv");
1936 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1937 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1938 {
1939 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1940 IEMOP_HLP_NO_LOCK_PREFIX();
1941
1942 IEM_MC_BEGIN(0, 0);
1943 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1944 IEM_MC_REL_JMP_S16(i16Imm);
1945 } IEM_MC_ELSE() {
1946 IEM_MC_ADVANCE_RIP();
1947 } IEM_MC_ENDIF();
1948 IEM_MC_END();
1949 }
1950 else
1951 {
1952 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1953 IEMOP_HLP_NO_LOCK_PREFIX();
1954
1955 IEM_MC_BEGIN(0, 0);
1956 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1957 IEM_MC_REL_JMP_S32(i32Imm);
1958 } IEM_MC_ELSE() {
1959 IEM_MC_ADVANCE_RIP();
1960 } IEM_MC_ENDIF();
1961 IEM_MC_END();
1962 }
1963 return VINF_SUCCESS;
1964}
1965
1966
1967/** Opcode 0x0f 0x8f. */
1968FNIEMOP_DEF(iemOp_jnle_Jv)
1969{
1970 IEMOP_MNEMONIC("jnle/jg Jv");
1971 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1972 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1973 {
1974 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1975 IEMOP_HLP_NO_LOCK_PREFIX();
1976
1977 IEM_MC_BEGIN(0, 0);
1978 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1979 IEM_MC_ADVANCE_RIP();
1980 } IEM_MC_ELSE() {
1981 IEM_MC_REL_JMP_S16(i16Imm);
1982 } IEM_MC_ENDIF();
1983 IEM_MC_END();
1984 }
1985 else
1986 {
1987 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1988 IEMOP_HLP_NO_LOCK_PREFIX();
1989
1990 IEM_MC_BEGIN(0, 0);
1991 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1992 IEM_MC_ADVANCE_RIP();
1993 } IEM_MC_ELSE() {
1994 IEM_MC_REL_JMP_S32(i32Imm);
1995 } IEM_MC_ENDIF();
1996 IEM_MC_END();
1997 }
1998 return VINF_SUCCESS;
1999}
2000
2001
2002/** Opcode 0x0f 0x90. */
2003FNIEMOP_DEF(iemOp_seto_Eb)
2004{
2005 IEMOP_MNEMONIC("seto Eb");
2006 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2007 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2008
2009 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2010 * any way. AMD says it's "unused", whatever that means. We're
2011 * ignoring for now. */
2012 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2013 {
2014 /* register target */
2015 IEM_MC_BEGIN(0, 0);
2016 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2017 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2018 } IEM_MC_ELSE() {
2019 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2020 } IEM_MC_ENDIF();
2021 IEM_MC_ADVANCE_RIP();
2022 IEM_MC_END();
2023 }
2024 else
2025 {
2026 /* memory target */
2027 IEM_MC_BEGIN(0, 1);
2028 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2030 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2031 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2032 } IEM_MC_ELSE() {
2033 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2034 } IEM_MC_ENDIF();
2035 IEM_MC_ADVANCE_RIP();
2036 IEM_MC_END();
2037 }
2038 return VINF_SUCCESS;
2039}
2040
2041
2042/** Opcode 0x0f 0x91. */
2043FNIEMOP_DEF(iemOp_setno_Eb)
2044{
2045 IEMOP_MNEMONIC("setno Eb");
2046 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2047 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2048
2049 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2050 * any way. AMD says it's "unused", whatever that means. We're
2051 * ignoring for now. */
2052 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2053 {
2054 /* register target */
2055 IEM_MC_BEGIN(0, 0);
2056 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2057 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2058 } IEM_MC_ELSE() {
2059 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2060 } IEM_MC_ENDIF();
2061 IEM_MC_ADVANCE_RIP();
2062 IEM_MC_END();
2063 }
2064 else
2065 {
2066 /* memory target */
2067 IEM_MC_BEGIN(0, 1);
2068 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2069 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2070 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2071 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2072 } IEM_MC_ELSE() {
2073 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2074 } IEM_MC_ENDIF();
2075 IEM_MC_ADVANCE_RIP();
2076 IEM_MC_END();
2077 }
2078 return VINF_SUCCESS;
2079}
2080
2081
2082/** Opcode 0x0f 0x92. */
2083FNIEMOP_DEF(iemOp_setc_Eb)
2084{
2085 IEMOP_MNEMONIC("setc Eb");
2086 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2087 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2088
2089 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2090 * any way. AMD says it's "unused", whatever that means. We're
2091 * ignoring for now. */
2092 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2093 {
2094 /* register target */
2095 IEM_MC_BEGIN(0, 0);
2096 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2097 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2098 } IEM_MC_ELSE() {
2099 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2100 } IEM_MC_ENDIF();
2101 IEM_MC_ADVANCE_RIP();
2102 IEM_MC_END();
2103 }
2104 else
2105 {
2106 /* memory target */
2107 IEM_MC_BEGIN(0, 1);
2108 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2109 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2110 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2111 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2112 } IEM_MC_ELSE() {
2113 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2114 } IEM_MC_ENDIF();
2115 IEM_MC_ADVANCE_RIP();
2116 IEM_MC_END();
2117 }
2118 return VINF_SUCCESS;
2119}
2120
2121
2122/** Opcode 0x0f 0x93. */
2123FNIEMOP_DEF(iemOp_setnc_Eb)
2124{
2125 IEMOP_MNEMONIC("setnc Eb");
2126 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2127 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2128
2129 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2130 * any way. AMD says it's "unused", whatever that means. We're
2131 * ignoring for now. */
2132 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2133 {
2134 /* register target */
2135 IEM_MC_BEGIN(0, 0);
2136 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2137 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2138 } IEM_MC_ELSE() {
2139 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2140 } IEM_MC_ENDIF();
2141 IEM_MC_ADVANCE_RIP();
2142 IEM_MC_END();
2143 }
2144 else
2145 {
2146 /* memory target */
2147 IEM_MC_BEGIN(0, 1);
2148 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2149 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2150 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2151 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2152 } IEM_MC_ELSE() {
2153 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2154 } IEM_MC_ENDIF();
2155 IEM_MC_ADVANCE_RIP();
2156 IEM_MC_END();
2157 }
2158 return VINF_SUCCESS;
2159}
2160
2161
2162/** Opcode 0x0f 0x94. */
2163FNIEMOP_DEF(iemOp_sete_Eb)
2164{
2165 IEMOP_MNEMONIC("sete Eb");
2166 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2167 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2168
2169 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2170 * any way. AMD says it's "unused", whatever that means. We're
2171 * ignoring for now. */
2172 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2173 {
2174 /* register target */
2175 IEM_MC_BEGIN(0, 0);
2176 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2177 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2178 } IEM_MC_ELSE() {
2179 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2180 } IEM_MC_ENDIF();
2181 IEM_MC_ADVANCE_RIP();
2182 IEM_MC_END();
2183 }
2184 else
2185 {
2186 /* memory target */
2187 IEM_MC_BEGIN(0, 1);
2188 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2189 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2190 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2191 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2192 } IEM_MC_ELSE() {
2193 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2194 } IEM_MC_ENDIF();
2195 IEM_MC_ADVANCE_RIP();
2196 IEM_MC_END();
2197 }
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/** Opcode 0x0f 0x95. */
2203FNIEMOP_DEF(iemOp_setne_Eb)
2204{
2205 IEMOP_MNEMONIC("setne Eb");
2206 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2207 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2208
2209 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2210 * any way. AMD says it's "unused", whatever that means. We're
2211 * ignoring for now. */
2212 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2213 {
2214 /* register target */
2215 IEM_MC_BEGIN(0, 0);
2216 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2217 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2218 } IEM_MC_ELSE() {
2219 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2220 } IEM_MC_ENDIF();
2221 IEM_MC_ADVANCE_RIP();
2222 IEM_MC_END();
2223 }
2224 else
2225 {
2226 /* memory target */
2227 IEM_MC_BEGIN(0, 1);
2228 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2229 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2230 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2231 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2232 } IEM_MC_ELSE() {
2233 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2234 } IEM_MC_ENDIF();
2235 IEM_MC_ADVANCE_RIP();
2236 IEM_MC_END();
2237 }
2238 return VINF_SUCCESS;
2239}
2240
2241
2242/** Opcode 0x0f 0x96. */
2243FNIEMOP_DEF(iemOp_setbe_Eb)
2244{
2245 IEMOP_MNEMONIC("setbe Eb");
2246 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2247 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2248
2249 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2250 * any way. AMD says it's "unused", whatever that means. We're
2251 * ignoring for now. */
2252 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2253 {
2254 /* register target */
2255 IEM_MC_BEGIN(0, 0);
2256 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2257 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2258 } IEM_MC_ELSE() {
2259 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2260 } IEM_MC_ENDIF();
2261 IEM_MC_ADVANCE_RIP();
2262 IEM_MC_END();
2263 }
2264 else
2265 {
2266 /* memory target */
2267 IEM_MC_BEGIN(0, 1);
2268 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2269 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2270 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2271 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2272 } IEM_MC_ELSE() {
2273 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2274 } IEM_MC_ENDIF();
2275 IEM_MC_ADVANCE_RIP();
2276 IEM_MC_END();
2277 }
2278 return VINF_SUCCESS;
2279}
2280
2281
2282/** Opcode 0x0f 0x97. */
2283FNIEMOP_DEF(iemOp_setnbe_Eb)
2284{
2285 IEMOP_MNEMONIC("setnbe Eb");
2286 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2287 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2288
2289 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2290 * any way. AMD says it's "unused", whatever that means. We're
2291 * ignoring for now. */
2292 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2293 {
2294 /* register target */
2295 IEM_MC_BEGIN(0, 0);
2296 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2297 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2298 } IEM_MC_ELSE() {
2299 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2300 } IEM_MC_ENDIF();
2301 IEM_MC_ADVANCE_RIP();
2302 IEM_MC_END();
2303 }
2304 else
2305 {
2306 /* memory target */
2307 IEM_MC_BEGIN(0, 1);
2308 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2309 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2310 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2311 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2312 } IEM_MC_ELSE() {
2313 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2314 } IEM_MC_ENDIF();
2315 IEM_MC_ADVANCE_RIP();
2316 IEM_MC_END();
2317 }
2318 return VINF_SUCCESS;
2319}
2320
2321
2322/** Opcode 0x0f 0x98. */
2323FNIEMOP_DEF(iemOp_sets_Eb)
2324{
2325 IEMOP_MNEMONIC("sets Eb");
2326 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2327 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2328
2329 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2330 * any way. AMD says it's "unused", whatever that means. We're
2331 * ignoring for now. */
2332 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2333 {
2334 /* register target */
2335 IEM_MC_BEGIN(0, 0);
2336 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2337 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2338 } IEM_MC_ELSE() {
2339 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2340 } IEM_MC_ENDIF();
2341 IEM_MC_ADVANCE_RIP();
2342 IEM_MC_END();
2343 }
2344 else
2345 {
2346 /* memory target */
2347 IEM_MC_BEGIN(0, 1);
2348 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2349 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2350 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2351 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2352 } IEM_MC_ELSE() {
2353 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2354 } IEM_MC_ENDIF();
2355 IEM_MC_ADVANCE_RIP();
2356 IEM_MC_END();
2357 }
2358 return VINF_SUCCESS;
2359}
2360
2361
2362/** Opcode 0x0f 0x99. */
2363FNIEMOP_DEF(iemOp_setns_Eb)
2364{
2365 IEMOP_MNEMONIC("setns Eb");
2366 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2367 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2368
2369 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2370 * any way. AMD says it's "unused", whatever that means. We're
2371 * ignoring for now. */
2372 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2373 {
2374 /* register target */
2375 IEM_MC_BEGIN(0, 0);
2376 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2377 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2378 } IEM_MC_ELSE() {
2379 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2380 } IEM_MC_ENDIF();
2381 IEM_MC_ADVANCE_RIP();
2382 IEM_MC_END();
2383 }
2384 else
2385 {
2386 /* memory target */
2387 IEM_MC_BEGIN(0, 1);
2388 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2389 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2390 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2391 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2392 } IEM_MC_ELSE() {
2393 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2394 } IEM_MC_ENDIF();
2395 IEM_MC_ADVANCE_RIP();
2396 IEM_MC_END();
2397 }
2398 return VINF_SUCCESS;
2399}
2400
2401
2402/** Opcode 0x0f 0x9a. */
2403FNIEMOP_DEF(iemOp_setp_Eb)
2404{
2405 IEMOP_MNEMONIC("setnp Eb");
2406 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2407 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2408
2409 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2410 * any way. AMD says it's "unused", whatever that means. We're
2411 * ignoring for now. */
2412 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2413 {
2414 /* register target */
2415 IEM_MC_BEGIN(0, 0);
2416 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2417 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2418 } IEM_MC_ELSE() {
2419 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2420 } IEM_MC_ENDIF();
2421 IEM_MC_ADVANCE_RIP();
2422 IEM_MC_END();
2423 }
2424 else
2425 {
2426 /* memory target */
2427 IEM_MC_BEGIN(0, 1);
2428 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2429 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2430 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2431 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2432 } IEM_MC_ELSE() {
2433 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2434 } IEM_MC_ENDIF();
2435 IEM_MC_ADVANCE_RIP();
2436 IEM_MC_END();
2437 }
2438 return VINF_SUCCESS;
2439}
2440
2441
2442/** Opcode 0x0f 0x9b. */
2443FNIEMOP_DEF(iemOp_setnp_Eb)
2444{
2445 IEMOP_MNEMONIC("setnp Eb");
2446 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2447 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2448
2449 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2450 * any way. AMD says it's "unused", whatever that means. We're
2451 * ignoring for now. */
2452 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2453 {
2454 /* register target */
2455 IEM_MC_BEGIN(0, 0);
2456 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2457 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2458 } IEM_MC_ELSE() {
2459 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2460 } IEM_MC_ENDIF();
2461 IEM_MC_ADVANCE_RIP();
2462 IEM_MC_END();
2463 }
2464 else
2465 {
2466 /* memory target */
2467 IEM_MC_BEGIN(0, 1);
2468 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2469 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2470 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2471 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2472 } IEM_MC_ELSE() {
2473 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2474 } IEM_MC_ENDIF();
2475 IEM_MC_ADVANCE_RIP();
2476 IEM_MC_END();
2477 }
2478 return VINF_SUCCESS;
2479}
2480
2481
2482/** Opcode 0x0f 0x9c. */
2483FNIEMOP_DEF(iemOp_setl_Eb)
2484{
2485 IEMOP_MNEMONIC("setl Eb");
2486 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2487 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2488
2489 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2490 * any way. AMD says it's "unused", whatever that means. We're
2491 * ignoring for now. */
2492 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2493 {
2494 /* register target */
2495 IEM_MC_BEGIN(0, 0);
2496 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2497 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2498 } IEM_MC_ELSE() {
2499 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2500 } IEM_MC_ENDIF();
2501 IEM_MC_ADVANCE_RIP();
2502 IEM_MC_END();
2503 }
2504 else
2505 {
2506 /* memory target */
2507 IEM_MC_BEGIN(0, 1);
2508 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2509 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2510 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2511 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2512 } IEM_MC_ELSE() {
2513 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2514 } IEM_MC_ENDIF();
2515 IEM_MC_ADVANCE_RIP();
2516 IEM_MC_END();
2517 }
2518 return VINF_SUCCESS;
2519}
2520
2521
2522/** Opcode 0x0f 0x9d. */
2523FNIEMOP_DEF(iemOp_setnl_Eb)
2524{
2525 IEMOP_MNEMONIC("setnl Eb");
2526 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2527 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2528
2529 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2530 * any way. AMD says it's "unused", whatever that means. We're
2531 * ignoring for now. */
2532 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2533 {
2534 /* register target */
2535 IEM_MC_BEGIN(0, 0);
2536 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2537 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2538 } IEM_MC_ELSE() {
2539 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2540 } IEM_MC_ENDIF();
2541 IEM_MC_ADVANCE_RIP();
2542 IEM_MC_END();
2543 }
2544 else
2545 {
2546 /* memory target */
2547 IEM_MC_BEGIN(0, 1);
2548 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2549 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2550 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2551 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2552 } IEM_MC_ELSE() {
2553 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2554 } IEM_MC_ENDIF();
2555 IEM_MC_ADVANCE_RIP();
2556 IEM_MC_END();
2557 }
2558 return VINF_SUCCESS;
2559}
2560
2561
2562/** Opcode 0x0f 0x9e. */
2563FNIEMOP_DEF(iemOp_setle_Eb)
2564{
2565 IEMOP_MNEMONIC("setle Eb");
2566 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2567 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2568
2569 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2570 * any way. AMD says it's "unused", whatever that means. We're
2571 * ignoring for now. */
2572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2573 {
2574 /* register target */
2575 IEM_MC_BEGIN(0, 0);
2576 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2577 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2578 } IEM_MC_ELSE() {
2579 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2580 } IEM_MC_ENDIF();
2581 IEM_MC_ADVANCE_RIP();
2582 IEM_MC_END();
2583 }
2584 else
2585 {
2586 /* memory target */
2587 IEM_MC_BEGIN(0, 1);
2588 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2589 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2590 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2591 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2592 } IEM_MC_ELSE() {
2593 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2594 } IEM_MC_ENDIF();
2595 IEM_MC_ADVANCE_RIP();
2596 IEM_MC_END();
2597 }
2598 return VINF_SUCCESS;
2599}
2600
2601
2602/** Opcode 0x0f 0x9f. */
2603FNIEMOP_DEF(iemOp_setnle_Eb)
2604{
2605 IEMOP_MNEMONIC("setnle Eb");
2606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2607 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2608
2609 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2610 * any way. AMD says it's "unused", whatever that means. We're
2611 * ignoring for now. */
2612 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2613 {
2614 /* register target */
2615 IEM_MC_BEGIN(0, 0);
2616 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2617 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2618 } IEM_MC_ELSE() {
2619 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2620 } IEM_MC_ENDIF();
2621 IEM_MC_ADVANCE_RIP();
2622 IEM_MC_END();
2623 }
2624 else
2625 {
2626 /* memory target */
2627 IEM_MC_BEGIN(0, 1);
2628 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2629 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2630 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2631 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2632 } IEM_MC_ELSE() {
2633 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2634 } IEM_MC_ENDIF();
2635 IEM_MC_ADVANCE_RIP();
2636 IEM_MC_END();
2637 }
2638 return VINF_SUCCESS;
2639}
2640
2641
2642/**
2643 * Common 'push segment-register' helper.
2644 */
2645FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
2646{
2647 IEMOP_HLP_NO_LOCK_PREFIX();
2648 if (iReg < X86_SREG_FS)
2649 IEMOP_HLP_NO_64BIT();
2650 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2651
2652 switch (pIemCpu->enmEffOpSize)
2653 {
2654 case IEMMODE_16BIT:
2655 IEM_MC_BEGIN(0, 1);
2656 IEM_MC_LOCAL(uint16_t, u16Value);
2657 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
2658 IEM_MC_PUSH_U16(u16Value);
2659 IEM_MC_ADVANCE_RIP();
2660 IEM_MC_END();
2661 break;
2662
2663 case IEMMODE_32BIT:
2664 IEM_MC_BEGIN(0, 1);
2665 IEM_MC_LOCAL(uint32_t, u32Value);
2666 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
2667 IEM_MC_PUSH_U32(u32Value);
2668 IEM_MC_ADVANCE_RIP();
2669 IEM_MC_END();
2670 break;
2671
2672 case IEMMODE_64BIT:
2673 IEM_MC_BEGIN(0, 1);
2674 IEM_MC_LOCAL(uint64_t, u64Value);
2675 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
2676 IEM_MC_PUSH_U64(u64Value);
2677 IEM_MC_ADVANCE_RIP();
2678 IEM_MC_END();
2679 break;
2680 }
2681
2682 return VINF_SUCCESS;
2683}
2684
2685
2686/** Opcode 0x0f 0xa0. */
2687FNIEMOP_DEF(iemOp_push_fs)
2688{
2689 IEMOP_MNEMONIC("push fs");
2690 IEMOP_HLP_NO_LOCK_PREFIX();
2691 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
2692}
2693
2694
2695/** Opcode 0x0f 0xa1. */
2696FNIEMOP_DEF(iemOp_pop_fs)
2697{
2698 IEMOP_MNEMONIC("pop fs");
2699 IEMOP_HLP_NO_LOCK_PREFIX();
2700 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
2701}
2702
2703
2704/** Opcode 0x0f 0xa2. */
2705FNIEMOP_DEF(iemOp_cpuid)
2706{
2707 IEMOP_MNEMONIC("cpuid");
2708 IEMOP_HLP_NO_LOCK_PREFIX();
2709 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
2710}
2711
2712
2713/**
2714 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
2715 * iemOp_bts_Ev_Gv.
2716 */
2717FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
2718{
2719 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2720 IEMOP_HLP_NO_LOCK_PREFIX();
2721 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
2722
2723 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2724 {
2725 /* register destination. */
2726 IEMOP_HLP_NO_LOCK_PREFIX();
2727 switch (pIemCpu->enmEffOpSize)
2728 {
2729 case IEMMODE_16BIT:
2730 IEM_MC_BEGIN(3, 0);
2731 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2732 IEM_MC_ARG(uint16_t, u16Src, 1);
2733 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2734
2735 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2736 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
2737 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2738 IEM_MC_REF_EFLAGS(pEFlags);
2739 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2740
2741 IEM_MC_ADVANCE_RIP();
2742 IEM_MC_END();
2743 return VINF_SUCCESS;
2744
2745 case IEMMODE_32BIT:
2746 IEM_MC_BEGIN(3, 0);
2747 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2748 IEM_MC_ARG(uint32_t, u32Src, 1);
2749 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2750
2751 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2752 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
2753 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2754 IEM_MC_REF_EFLAGS(pEFlags);
2755 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2756
2757 IEM_MC_ADVANCE_RIP();
2758 IEM_MC_END();
2759 return VINF_SUCCESS;
2760
2761 case IEMMODE_64BIT:
2762 IEM_MC_BEGIN(3, 0);
2763 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2764 IEM_MC_ARG(uint64_t, u64Src, 1);
2765 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2766
2767 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2768 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
2769 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2770 IEM_MC_REF_EFLAGS(pEFlags);
2771 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2772
2773 IEM_MC_ADVANCE_RIP();
2774 IEM_MC_END();
2775 return VINF_SUCCESS;
2776
2777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2778 }
2779 }
2780 else
2781 {
2782 /* memory destination. */
2783
2784 uint32_t fAccess;
2785 if (pImpl->pfnLockedU16)
2786 fAccess = IEM_ACCESS_DATA_RW;
2787 else /* BT */
2788 {
2789 IEMOP_HLP_NO_LOCK_PREFIX();
2790 fAccess = IEM_ACCESS_DATA_R;
2791 }
2792
2793 /** @todo test negative bit offsets! */
2794 switch (pIemCpu->enmEffOpSize)
2795 {
2796 case IEMMODE_16BIT:
2797 IEM_MC_BEGIN(3, 2);
2798 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2799 IEM_MC_ARG(uint16_t, u16Src, 1);
2800 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2801 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2802 IEM_MC_LOCAL(int16_t, i16AddrAdj);
2803
2804 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2805 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2806 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
2807 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
2808 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
2809 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
2810 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
2811 IEM_MC_FETCH_EFLAGS(EFlags);
2812
2813 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2814 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2815 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2816 else
2817 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
2818 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2819
2820 IEM_MC_COMMIT_EFLAGS(EFlags);
2821 IEM_MC_ADVANCE_RIP();
2822 IEM_MC_END();
2823 return VINF_SUCCESS;
2824
2825 case IEMMODE_32BIT:
2826 IEM_MC_BEGIN(3, 2);
2827 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2828 IEM_MC_ARG(uint32_t, u32Src, 1);
2829 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2830 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2831 IEM_MC_LOCAL(int32_t, i32AddrAdj);
2832
2833 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2834 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2835 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
2836 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
2837 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
2838 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
2839 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
2840 IEM_MC_FETCH_EFLAGS(EFlags);
2841
2842 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2843 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2844 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2845 else
2846 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
2847 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
2848
2849 IEM_MC_COMMIT_EFLAGS(EFlags);
2850 IEM_MC_ADVANCE_RIP();
2851 IEM_MC_END();
2852 return VINF_SUCCESS;
2853
2854 case IEMMODE_64BIT:
2855 IEM_MC_BEGIN(3, 2);
2856 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2857 IEM_MC_ARG(uint64_t, u64Src, 1);
2858 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2859 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2860 IEM_MC_LOCAL(int64_t, i64AddrAdj);
2861
2862 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2863 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2864 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
2865 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
2866 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
2867 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
2868 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
2869 IEM_MC_FETCH_EFLAGS(EFlags);
2870
2871 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2872 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2873 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2874 else
2875 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
2876 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
2877
2878 IEM_MC_COMMIT_EFLAGS(EFlags);
2879 IEM_MC_ADVANCE_RIP();
2880 IEM_MC_END();
2881 return VINF_SUCCESS;
2882
2883 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2884 }
2885 }
2886}
2887
2888
2889/** Opcode 0x0f 0xa3. */
2890FNIEMOP_DEF(iemOp_bt_Ev_Gv)
2891{
2892 IEMOP_MNEMONIC("bt Gv,Mp");
2893 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
2894}
2895
2896
2897/**
2898 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
2899 */
2900FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
2901{
2902 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2903 IEMOP_HLP_NO_LOCK_PREFIX();
2904 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
2905
2906 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2907 {
2908 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2909 IEMOP_HLP_NO_LOCK_PREFIX();
2910
2911 switch (pIemCpu->enmEffOpSize)
2912 {
2913 case IEMMODE_16BIT:
2914 IEM_MC_BEGIN(4, 0);
2915 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2916 IEM_MC_ARG(uint16_t, u16Src, 1);
2917 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2918 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2919
2920 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2921 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2922 IEM_MC_REF_EFLAGS(pEFlags);
2923 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2924
2925 IEM_MC_ADVANCE_RIP();
2926 IEM_MC_END();
2927 return VINF_SUCCESS;
2928
2929 case IEMMODE_32BIT:
2930 IEM_MC_BEGIN(4, 0);
2931 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2932 IEM_MC_ARG(uint32_t, u32Src, 1);
2933 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2934 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2935
2936 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2937 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2938 IEM_MC_REF_EFLAGS(pEFlags);
2939 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
2940
2941 IEM_MC_ADVANCE_RIP();
2942 IEM_MC_END();
2943 return VINF_SUCCESS;
2944
2945 case IEMMODE_64BIT:
2946 IEM_MC_BEGIN(4, 0);
2947 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2948 IEM_MC_ARG(uint64_t, u64Src, 1);
2949 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2950 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2951
2952 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2953 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2954 IEM_MC_REF_EFLAGS(pEFlags);
2955 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
2956
2957 IEM_MC_ADVANCE_RIP();
2958 IEM_MC_END();
2959 return VINF_SUCCESS;
2960
2961 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2962 }
2963 }
2964 else
2965 {
2966 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2967
2968 switch (pIemCpu->enmEffOpSize)
2969 {
2970 case IEMMODE_16BIT:
2971 IEM_MC_BEGIN(4, 2);
2972 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2973 IEM_MC_ARG(uint16_t, u16Src, 1);
2974 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2975 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2976 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2977
2978 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2979 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2980 IEM_MC_ASSIGN(cShiftArg, cShift);
2981 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2982 IEM_MC_FETCH_EFLAGS(EFlags);
2983 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2984 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2985
2986 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2987 IEM_MC_COMMIT_EFLAGS(EFlags);
2988 IEM_MC_ADVANCE_RIP();
2989 IEM_MC_END();
2990 return VINF_SUCCESS;
2991
2992 case IEMMODE_32BIT:
2993 IEM_MC_BEGIN(4, 2);
2994 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2995 IEM_MC_ARG(uint32_t, u32Src, 1);
2996 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2997 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2998 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2999
3000 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3001 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3002 IEM_MC_ASSIGN(cShiftArg, cShift);
3003 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3004 IEM_MC_FETCH_EFLAGS(EFlags);
3005 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3006 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3007
3008 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3009 IEM_MC_COMMIT_EFLAGS(EFlags);
3010 IEM_MC_ADVANCE_RIP();
3011 IEM_MC_END();
3012 return VINF_SUCCESS;
3013
3014 case IEMMODE_64BIT:
3015 IEM_MC_BEGIN(4, 2);
3016 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3017 IEM_MC_ARG(uint64_t, u64Src, 1);
3018 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3019 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3020 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3021
3022 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3023 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3024 IEM_MC_ASSIGN(cShiftArg, cShift);
3025 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3026 IEM_MC_FETCH_EFLAGS(EFlags);
3027 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3028 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3029
3030 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3031 IEM_MC_COMMIT_EFLAGS(EFlags);
3032 IEM_MC_ADVANCE_RIP();
3033 IEM_MC_END();
3034 return VINF_SUCCESS;
3035
3036 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3037 }
3038 }
3039}
3040
3041
3042/**
3043 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
3044 */
3045FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
3046{
3047 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3048 IEMOP_HLP_NO_LOCK_PREFIX();
3049 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3050
3051 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3052 {
3053 IEMOP_HLP_NO_LOCK_PREFIX();
3054
3055 switch (pIemCpu->enmEffOpSize)
3056 {
3057 case IEMMODE_16BIT:
3058 IEM_MC_BEGIN(4, 0);
3059 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3060 IEM_MC_ARG(uint16_t, u16Src, 1);
3061 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3062 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3063
3064 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3065 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3066 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3067 IEM_MC_REF_EFLAGS(pEFlags);
3068 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3069
3070 IEM_MC_ADVANCE_RIP();
3071 IEM_MC_END();
3072 return VINF_SUCCESS;
3073
3074 case IEMMODE_32BIT:
3075 IEM_MC_BEGIN(4, 0);
3076 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3077 IEM_MC_ARG(uint32_t, u32Src, 1);
3078 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3079 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3080
3081 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3082 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3083 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3084 IEM_MC_REF_EFLAGS(pEFlags);
3085 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3086
3087 IEM_MC_ADVANCE_RIP();
3088 IEM_MC_END();
3089 return VINF_SUCCESS;
3090
3091 case IEMMODE_64BIT:
3092 IEM_MC_BEGIN(4, 0);
3093 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3094 IEM_MC_ARG(uint64_t, u64Src, 1);
3095 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3096 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3097
3098 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3099 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3100 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3101 IEM_MC_REF_EFLAGS(pEFlags);
3102 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3103
3104 IEM_MC_ADVANCE_RIP();
3105 IEM_MC_END();
3106 return VINF_SUCCESS;
3107
3108 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3109 }
3110 }
3111 else
3112 {
3113 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3114
3115 switch (pIemCpu->enmEffOpSize)
3116 {
3117 case IEMMODE_16BIT:
3118 IEM_MC_BEGIN(4, 2);
3119 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3120 IEM_MC_ARG(uint16_t, u16Src, 1);
3121 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3122 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3123 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3124
3125 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3126 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3127 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3128 IEM_MC_FETCH_EFLAGS(EFlags);
3129 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3130 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3131
3132 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3133 IEM_MC_COMMIT_EFLAGS(EFlags);
3134 IEM_MC_ADVANCE_RIP();
3135 IEM_MC_END();
3136 return VINF_SUCCESS;
3137
3138 case IEMMODE_32BIT:
3139 IEM_MC_BEGIN(4, 2);
3140 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3141 IEM_MC_ARG(uint32_t, u32Src, 1);
3142 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3143 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3144 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3145
3146 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3147 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3148 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3149 IEM_MC_FETCH_EFLAGS(EFlags);
3150 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3151 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3152
3153 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3154 IEM_MC_COMMIT_EFLAGS(EFlags);
3155 IEM_MC_ADVANCE_RIP();
3156 IEM_MC_END();
3157 return VINF_SUCCESS;
3158
3159 case IEMMODE_64BIT:
3160 IEM_MC_BEGIN(4, 2);
3161 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3162 IEM_MC_ARG(uint64_t, u64Src, 1);
3163 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3164 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3165 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3166
3167 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3168 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3169 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3170 IEM_MC_FETCH_EFLAGS(EFlags);
3171 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3172 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3173
3174 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3175 IEM_MC_COMMIT_EFLAGS(EFlags);
3176 IEM_MC_ADVANCE_RIP();
3177 IEM_MC_END();
3178 return VINF_SUCCESS;
3179
3180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3181 }
3182 }
3183}
3184
3185
3186
3187/** Opcode 0x0f 0xa4. */
3188FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
3189{
3190 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
3191 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
3192}
3193
3194
3195/** Opcode 0x0f 0xa7. */
3196FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
3197{
3198 IEMOP_MNEMONIC("shld Ev,Gv,CL");
3199 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
3200}
3201
3202
3203/** Opcode 0x0f 0xa8. */
3204FNIEMOP_DEF(iemOp_push_gs)
3205{
3206 IEMOP_MNEMONIC("push gs");
3207 IEMOP_HLP_NO_LOCK_PREFIX();
3208 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
3209}
3210
3211
3212/** Opcode 0x0f 0xa9. */
3213FNIEMOP_DEF(iemOp_pop_gs)
3214{
3215 IEMOP_MNEMONIC("pop gs");
3216 IEMOP_HLP_NO_LOCK_PREFIX();
3217 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
3218}
3219
3220
3221/** Opcode 0x0f 0xaa. */
3222FNIEMOP_STUB(iemOp_rsm);
3223
3224
3225/** Opcode 0x0f 0xab. */
3226FNIEMOP_DEF(iemOp_bts_Ev_Gv)
3227{
3228 IEMOP_MNEMONIC("bts Gv,Mp");
3229 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
3230}
3231
3232
3233/** Opcode 0x0f 0xac. */
3234FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
3235{
3236 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
3237 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
3238}
3239
3240
3241/** Opcode 0x0f 0xad. */
3242FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
3243{
3244 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
3245 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
3246}
3247
3248
3249/** Opcode 0x0f 0xae. */
3250FNIEMOP_STUB(iemOp_Grp15);
3251
3252
3253/** Opcode 0x0f 0xaf. */
3254FNIEMOP_DEF(iemOp_imul_Gv_Ev)
3255{
3256 IEMOP_MNEMONIC("imul Gv,Ev");
3257 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3258 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
3259}
3260
3261
3262/** Opcode 0x0f 0xb0. */
3263FNIEMOP_STUB(iemOp_cmpxchg_Eb_Gb);
3264/** Opcode 0x0f 0xb1. */
3265FNIEMOP_STUB(iemOp_cmpxchg_Ev_Gv);
3266
3267
3268FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
3269{
3270 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3271 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3272
3273 /* The source cannot be a register. */
3274 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3275 return IEMOP_RAISE_INVALID_OPCODE();
3276 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & bRm & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
3277
3278 switch (pIemCpu->enmEffOpSize)
3279 {
3280 case IEMMODE_16BIT:
3281 IEM_MC_BEGIN(5, 1);
3282 IEM_MC_ARG(uint16_t, uSel, 0);
3283 IEM_MC_ARG(uint16_t, offSeg, 1);
3284 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3285 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3286 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3287 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3288 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3289 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3290 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
3291 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3292 IEM_MC_END();
3293 return VINF_SUCCESS;
3294
3295 case IEMMODE_32BIT:
3296 IEM_MC_BEGIN(5, 1);
3297 IEM_MC_ARG(uint16_t, uSel, 0);
3298 IEM_MC_ARG(uint32_t, offSeg, 1);
3299 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3300 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3301 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3302 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3303 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3304 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3305 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
3306 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3307 IEM_MC_END();
3308 return VINF_SUCCESS;
3309
3310 case IEMMODE_64BIT:
3311 IEM_MC_BEGIN(5, 1);
3312 IEM_MC_ARG(uint16_t, uSel, 0);
3313 IEM_MC_ARG(uint64_t, offSeg, 1);
3314 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3315 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3316 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3317 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3318 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3319 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3320 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
3321 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3322 IEM_MC_END();
3323 return VINF_SUCCESS;
3324
3325 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3326 }
3327}
3328
3329
3330/** Opcode 0x0f 0xb2. */
3331FNIEMOP_DEF(iemOp_lss_Gv_Mp)
3332{
3333 IEMOP_MNEMONIC("lss Gv,Mp");
3334 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
3335}
3336
3337
3338/** Opcode 0x0f 0xb3. */
3339FNIEMOP_DEF(iemOp_btr_Ev_Gv)
3340{
3341 IEMOP_MNEMONIC("btr Gv,Mp");
3342 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
3343}
3344
3345
3346/** Opcode 0x0f 0xb4. */
3347FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
3348{
3349 IEMOP_MNEMONIC("lfs Gv,Mp");
3350 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
3351}
3352
3353
3354/** Opcode 0x0f 0xb5. */
3355FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
3356{
3357 IEMOP_MNEMONIC("lgs Gv,Mp");
3358 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
3359}
3360
3361
3362/** Opcode 0x0f 0xb6. */
3363FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
3364{
3365 IEMOP_MNEMONIC("movzx Gv,Eb");
3366
3367 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3368 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3369
3370 /*
3371 * If rm is denoting a register, no more instruction bytes.
3372 */
3373 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3374 {
3375 switch (pIemCpu->enmEffOpSize)
3376 {
3377 case IEMMODE_16BIT:
3378 IEM_MC_BEGIN(0, 1);
3379 IEM_MC_LOCAL(uint16_t, u16Value);
3380 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3381 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3382 IEM_MC_ADVANCE_RIP();
3383 IEM_MC_END();
3384 return VINF_SUCCESS;
3385
3386 case IEMMODE_32BIT:
3387 IEM_MC_BEGIN(0, 1);
3388 IEM_MC_LOCAL(uint32_t, u32Value);
3389 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3390 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3391 IEM_MC_ADVANCE_RIP();
3392 IEM_MC_END();
3393 return VINF_SUCCESS;
3394
3395 case IEMMODE_64BIT:
3396 IEM_MC_BEGIN(0, 1);
3397 IEM_MC_LOCAL(uint64_t, u64Value);
3398 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3399 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3400 IEM_MC_ADVANCE_RIP();
3401 IEM_MC_END();
3402 return VINF_SUCCESS;
3403
3404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3405 }
3406 }
3407 else
3408 {
3409 /*
3410 * We're loading a register from memory.
3411 */
3412 switch (pIemCpu->enmEffOpSize)
3413 {
3414 case IEMMODE_16BIT:
3415 IEM_MC_BEGIN(0, 2);
3416 IEM_MC_LOCAL(uint16_t, u16Value);
3417 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3418 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3419 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3420 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3421 IEM_MC_ADVANCE_RIP();
3422 IEM_MC_END();
3423 return VINF_SUCCESS;
3424
3425 case IEMMODE_32BIT:
3426 IEM_MC_BEGIN(0, 2);
3427 IEM_MC_LOCAL(uint32_t, u32Value);
3428 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3429 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3430 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3431 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3432 IEM_MC_ADVANCE_RIP();
3433 IEM_MC_END();
3434 return VINF_SUCCESS;
3435
3436 case IEMMODE_64BIT:
3437 IEM_MC_BEGIN(0, 2);
3438 IEM_MC_LOCAL(uint64_t, u64Value);
3439 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3440 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3441 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3442 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3443 IEM_MC_ADVANCE_RIP();
3444 IEM_MC_END();
3445 return VINF_SUCCESS;
3446
3447 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3448 }
3449 }
3450}
3451
3452
3453/** Opcode 0x0f 0xb7. */
3454FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
3455{
3456 IEMOP_MNEMONIC("movzx Gv,Ew");
3457
3458 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3459 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3460
3461 /** @todo Not entirely sure how the operand size prefix is handled here,
3462 * assuming that it will be ignored. Would be nice to have a few
3463 * test for this. */
3464 /*
3465 * If rm is denoting a register, no more instruction bytes.
3466 */
3467 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3468 {
3469 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3470 {
3471 IEM_MC_BEGIN(0, 1);
3472 IEM_MC_LOCAL(uint32_t, u32Value);
3473 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3474 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3475 IEM_MC_ADVANCE_RIP();
3476 IEM_MC_END();
3477 }
3478 else
3479 {
3480 IEM_MC_BEGIN(0, 1);
3481 IEM_MC_LOCAL(uint64_t, u64Value);
3482 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3483 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3484 IEM_MC_ADVANCE_RIP();
3485 IEM_MC_END();
3486 }
3487 }
3488 else
3489 {
3490 /*
3491 * We're loading a register from memory.
3492 */
3493 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3494 {
3495 IEM_MC_BEGIN(0, 2);
3496 IEM_MC_LOCAL(uint32_t, u32Value);
3497 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3498 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3499 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3500 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3501 IEM_MC_ADVANCE_RIP();
3502 IEM_MC_END();
3503 }
3504 else
3505 {
3506 IEM_MC_BEGIN(0, 2);
3507 IEM_MC_LOCAL(uint64_t, u64Value);
3508 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3509 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3510 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3511 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3512 IEM_MC_ADVANCE_RIP();
3513 IEM_MC_END();
3514 }
3515 }
3516 return VINF_SUCCESS;
3517}
3518
3519
3520/** Opcode 0x0f 0xb8. */
3521FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
3522/** Opcode 0x0f 0xb9. */
3523FNIEMOP_STUB(iemOp_Grp10);
3524
3525
3526/** Opcode 0x0f 0xba. */
3527FNIEMOP_DEF(iemOp_Grp8)
3528{
3529 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3530 PCIEMOPBINSIZES pImpl;
3531 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3532 {
3533 case 0: case 1: case 2: case 3:
3534 return IEMOP_RAISE_INVALID_OPCODE();
3535 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
3536 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
3537 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
3538 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
3539 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3540 }
3541 IEMOP_HLP_NO_LOCK_PREFIX();
3542 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3543
3544 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3545 {
3546 /* register destination. */
3547 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3548 IEMOP_HLP_NO_LOCK_PREFIX();
3549
3550 switch (pIemCpu->enmEffOpSize)
3551 {
3552 case IEMMODE_16BIT:
3553 IEM_MC_BEGIN(3, 0);
3554 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3555 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
3556 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3557
3558 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3559 IEM_MC_REF_EFLAGS(pEFlags);
3560 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3561
3562 IEM_MC_ADVANCE_RIP();
3563 IEM_MC_END();
3564 return VINF_SUCCESS;
3565
3566 case IEMMODE_32BIT:
3567 IEM_MC_BEGIN(3, 0);
3568 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3569 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
3570 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3571
3572 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3573 IEM_MC_REF_EFLAGS(pEFlags);
3574 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3575
3576 IEM_MC_ADVANCE_RIP();
3577 IEM_MC_END();
3578 return VINF_SUCCESS;
3579
3580 case IEMMODE_64BIT:
3581 IEM_MC_BEGIN(3, 0);
3582 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3583 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
3584 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3585
3586 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3587 IEM_MC_REF_EFLAGS(pEFlags);
3588 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3589
3590 IEM_MC_ADVANCE_RIP();
3591 IEM_MC_END();
3592 return VINF_SUCCESS;
3593
3594 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3595 }
3596 }
3597 else
3598 {
3599 /* memory destination. */
3600
3601 uint32_t fAccess;
3602 if (pImpl->pfnLockedU16)
3603 fAccess = IEM_ACCESS_DATA_RW;
3604 else /* BT */
3605 {
3606 IEMOP_HLP_NO_LOCK_PREFIX();
3607 fAccess = IEM_ACCESS_DATA_R;
3608 }
3609
3610 /** @todo test negative bit offsets! */
3611 switch (pIemCpu->enmEffOpSize)
3612 {
3613 case IEMMODE_16BIT:
3614 IEM_MC_BEGIN(3, 1);
3615 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3616 IEM_MC_ARG(uint16_t, u16Src, 1);
3617 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3618 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3619
3620 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3621 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3622 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
3623 IEM_MC_FETCH_EFLAGS(EFlags);
3624 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3625 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3626 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3627 else
3628 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
3629 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3630
3631 IEM_MC_COMMIT_EFLAGS(EFlags);
3632 IEM_MC_ADVANCE_RIP();
3633 IEM_MC_END();
3634 return VINF_SUCCESS;
3635
3636 case IEMMODE_32BIT:
3637 IEM_MC_BEGIN(3, 1);
3638 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3639 IEM_MC_ARG(uint32_t, u32Src, 1);
3640 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3641 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3642
3643 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3644 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3645 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
3646 IEM_MC_FETCH_EFLAGS(EFlags);
3647 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3648 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3649 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3650 else
3651 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
3652 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3653
3654 IEM_MC_COMMIT_EFLAGS(EFlags);
3655 IEM_MC_ADVANCE_RIP();
3656 IEM_MC_END();
3657 return VINF_SUCCESS;
3658
3659 case IEMMODE_64BIT:
3660 IEM_MC_BEGIN(3, 1);
3661 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3662 IEM_MC_ARG(uint64_t, u64Src, 1);
3663 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3664 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3665
3666 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3667 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3668 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
3669 IEM_MC_FETCH_EFLAGS(EFlags);
3670 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3671 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3672 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3673 else
3674 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
3675 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3676
3677 IEM_MC_COMMIT_EFLAGS(EFlags);
3678 IEM_MC_ADVANCE_RIP();
3679 IEM_MC_END();
3680 return VINF_SUCCESS;
3681
3682 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3683 }
3684 }
3685
3686}
3687
3688
3689/** Opcode 0x0f 0xbb. */
3690FNIEMOP_DEF(iemOp_btc_Ev_Gv)
3691{
3692 IEMOP_MNEMONIC("btc Gv,Mp");
3693 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
3694}
3695
3696
3697/** Opcode 0x0f 0xbc. */
3698FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
3699{
3700 IEMOP_MNEMONIC("bsf Gv,Ev");
3701 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3702 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
3703}
3704
3705
3706/** Opcode 0x0f 0xbd. */
3707FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
3708{
3709 IEMOP_MNEMONIC("bsr Gv,Ev");
3710 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3711 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
3712}
3713
3714
3715/** Opcode 0x0f 0xbe. */
3716FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
3717{
3718 IEMOP_MNEMONIC("movsx Gv,Eb");
3719
3720 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3721 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3722
3723 /*
3724 * If rm is denoting a register, no more instruction bytes.
3725 */
3726 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3727 {
3728 switch (pIemCpu->enmEffOpSize)
3729 {
3730 case IEMMODE_16BIT:
3731 IEM_MC_BEGIN(0, 1);
3732 IEM_MC_LOCAL(uint16_t, u16Value);
3733 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3734 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3735 IEM_MC_ADVANCE_RIP();
3736 IEM_MC_END();
3737 return VINF_SUCCESS;
3738
3739 case IEMMODE_32BIT:
3740 IEM_MC_BEGIN(0, 1);
3741 IEM_MC_LOCAL(uint32_t, u32Value);
3742 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3743 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3744 IEM_MC_ADVANCE_RIP();
3745 IEM_MC_END();
3746 return VINF_SUCCESS;
3747
3748 case IEMMODE_64BIT:
3749 IEM_MC_BEGIN(0, 1);
3750 IEM_MC_LOCAL(uint64_t, u64Value);
3751 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3752 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3753 IEM_MC_ADVANCE_RIP();
3754 IEM_MC_END();
3755 return VINF_SUCCESS;
3756
3757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3758 }
3759 }
3760 else
3761 {
3762 /*
3763 * We're loading a register from memory.
3764 */
3765 switch (pIemCpu->enmEffOpSize)
3766 {
3767 case IEMMODE_16BIT:
3768 IEM_MC_BEGIN(0, 2);
3769 IEM_MC_LOCAL(uint16_t, u16Value);
3770 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3771 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3772 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3773 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3774 IEM_MC_ADVANCE_RIP();
3775 IEM_MC_END();
3776 return VINF_SUCCESS;
3777
3778 case IEMMODE_32BIT:
3779 IEM_MC_BEGIN(0, 2);
3780 IEM_MC_LOCAL(uint32_t, u32Value);
3781 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3782 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3783 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3784 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3785 IEM_MC_ADVANCE_RIP();
3786 IEM_MC_END();
3787 return VINF_SUCCESS;
3788
3789 case IEMMODE_64BIT:
3790 IEM_MC_BEGIN(0, 2);
3791 IEM_MC_LOCAL(uint64_t, u64Value);
3792 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3793 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3794 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3795 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3796 IEM_MC_ADVANCE_RIP();
3797 IEM_MC_END();
3798 return VINF_SUCCESS;
3799
3800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3801 }
3802 }
3803}
3804
3805
3806/** Opcode 0x0f 0xbf. */
3807FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
3808{
3809 IEMOP_MNEMONIC("movsx Gv,Ew");
3810
3811 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3812 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3813
3814 /** @todo Not entirely sure how the operand size prefix is handled here,
3815 * assuming that it will be ignored. Would be nice to have a few
3816 * test for this. */
3817 /*
3818 * If rm is denoting a register, no more instruction bytes.
3819 */
3820 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3821 {
3822 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3823 {
3824 IEM_MC_BEGIN(0, 1);
3825 IEM_MC_LOCAL(uint32_t, u32Value);
3826 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3827 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3828 IEM_MC_ADVANCE_RIP();
3829 IEM_MC_END();
3830 }
3831 else
3832 {
3833 IEM_MC_BEGIN(0, 1);
3834 IEM_MC_LOCAL(uint64_t, u64Value);
3835 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3836 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3837 IEM_MC_ADVANCE_RIP();
3838 IEM_MC_END();
3839 }
3840 }
3841 else
3842 {
3843 /*
3844 * We're loading a register from memory.
3845 */
3846 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3847 {
3848 IEM_MC_BEGIN(0, 2);
3849 IEM_MC_LOCAL(uint32_t, u32Value);
3850 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3851 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3852 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3853 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3854 IEM_MC_ADVANCE_RIP();
3855 IEM_MC_END();
3856 }
3857 else
3858 {
3859 IEM_MC_BEGIN(0, 2);
3860 IEM_MC_LOCAL(uint64_t, u64Value);
3861 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3862 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3863 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3864 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3865 IEM_MC_ADVANCE_RIP();
3866 IEM_MC_END();
3867 }
3868 }
3869 return VINF_SUCCESS;
3870}
3871
3872
3873/** Opcode 0x0f 0xc0. */
3874FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
3875{
3876 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3877 IEMOP_MNEMONIC("xadd Eb,Gb");
3878
3879 /*
3880 * If rm is denoting a register, no more instruction bytes.
3881 */
3882 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3883 {
3884 IEMOP_HLP_NO_LOCK_PREFIX();
3885
3886 IEM_MC_BEGIN(3, 0);
3887 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3888 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3889 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3890
3891 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3892 IEM_MC_REF_GREG_U8(pu8Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3893 IEM_MC_REF_EFLAGS(pEFlags);
3894 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3895
3896 IEM_MC_ADVANCE_RIP();
3897 IEM_MC_END();
3898 }
3899 else
3900 {
3901 /*
3902 * We're accessing memory.
3903 */
3904 IEM_MC_BEGIN(3, 3);
3905 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3906 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3907 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
3908 IEM_MC_LOCAL(uint8_t, u8RegCopy);
3909 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3910
3911 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3912 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
3913 IEM_MC_FETCH_GREG_U8(u8RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3914 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
3915 IEM_MC_FETCH_EFLAGS(EFlags);
3916 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3917 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3918 else
3919 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
3920
3921 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
3922 IEM_MC_COMMIT_EFLAGS(EFlags);
3923 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8RegCopy);
3924 IEM_MC_ADVANCE_RIP();
3925 IEM_MC_END();
3926 return VINF_SUCCESS;
3927 }
3928 return VINF_SUCCESS;
3929}
3930
3931
3932/** Opcode 0x0f 0xc1. */
3933FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
3934{
3935 IEMOP_MNEMONIC("xadd Ev,Gv");
3936 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3937
3938 /*
3939 * If rm is denoting a register, no more instruction bytes.
3940 */
3941 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3942 {
3943 IEMOP_HLP_NO_LOCK_PREFIX();
3944
3945 switch (pIemCpu->enmEffOpSize)
3946 {
3947 case IEMMODE_16BIT:
3948 IEM_MC_BEGIN(3, 0);
3949 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3950 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
3951 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3952
3953 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3954 IEM_MC_REF_GREG_U16(pu16Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3955 IEM_MC_REF_EFLAGS(pEFlags);
3956 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
3957
3958 IEM_MC_ADVANCE_RIP();
3959 IEM_MC_END();
3960 return VINF_SUCCESS;
3961
3962 case IEMMODE_32BIT:
3963 IEM_MC_BEGIN(3, 0);
3964 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3965 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
3966 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3967
3968 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3969 IEM_MC_REF_GREG_U32(pu32Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3970 IEM_MC_REF_EFLAGS(pEFlags);
3971 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
3972
3973 IEM_MC_ADVANCE_RIP();
3974 IEM_MC_END();
3975 return VINF_SUCCESS;
3976
3977 case IEMMODE_64BIT:
3978 IEM_MC_BEGIN(3, 0);
3979 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3980 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
3981 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3982
3983 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3984 IEM_MC_REF_GREG_U64(pu64Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3985 IEM_MC_REF_EFLAGS(pEFlags);
3986 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
3987
3988 IEM_MC_ADVANCE_RIP();
3989 IEM_MC_END();
3990 return VINF_SUCCESS;
3991
3992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3993 }
3994 }
3995 else
3996 {
3997 /*
3998 * We're accessing memory.
3999 */
4000 switch (pIemCpu->enmEffOpSize)
4001 {
4002 case IEMMODE_16BIT:
4003 IEM_MC_BEGIN(3, 3);
4004 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4005 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
4006 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4007 IEM_MC_LOCAL(uint16_t, u16RegCopy);
4008 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4009
4010 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4011 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4012 IEM_MC_FETCH_GREG_U16(u16RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4013 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
4014 IEM_MC_FETCH_EFLAGS(EFlags);
4015 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4016 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
4017 else
4018 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
4019
4020 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4021 IEM_MC_COMMIT_EFLAGS(EFlags);
4022 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16RegCopy);
4023 IEM_MC_ADVANCE_RIP();
4024 IEM_MC_END();
4025 return VINF_SUCCESS;
4026
4027 case IEMMODE_32BIT:
4028 IEM_MC_BEGIN(3, 3);
4029 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4030 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
4031 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4032 IEM_MC_LOCAL(uint32_t, u32RegCopy);
4033 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4034
4035 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4036 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4037 IEM_MC_FETCH_GREG_U32(u32RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4038 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
4039 IEM_MC_FETCH_EFLAGS(EFlags);
4040 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4041 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
4042 else
4043 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
4044
4045 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4046 IEM_MC_COMMIT_EFLAGS(EFlags);
4047 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32RegCopy);
4048 IEM_MC_ADVANCE_RIP();
4049 IEM_MC_END();
4050 return VINF_SUCCESS;
4051
4052 case IEMMODE_64BIT:
4053 IEM_MC_BEGIN(3, 3);
4054 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4055 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
4056 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4057 IEM_MC_LOCAL(uint64_t, u64RegCopy);
4058 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4059
4060 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4061 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4062 IEM_MC_FETCH_GREG_U64(u64RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4063 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
4064 IEM_MC_FETCH_EFLAGS(EFlags);
4065 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4066 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
4067 else
4068 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
4069
4070 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4071 IEM_MC_COMMIT_EFLAGS(EFlags);
4072 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64RegCopy);
4073 IEM_MC_ADVANCE_RIP();
4074 IEM_MC_END();
4075 return VINF_SUCCESS;
4076
4077 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4078 }
4079 }
4080}
4081
4082/** Opcode 0x0f 0xc2. */
4083FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
4084/** Opcode 0x0f 0xc3. */
4085FNIEMOP_STUB(iemOp_movnti_My_Gy);
4086/** Opcode 0x0f 0xc4. */
4087FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
4088/** Opcode 0x0f 0xc5. */
4089FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
4090/** Opcode 0x0f 0xc6. */
4091FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
4092/** Opcode 0x0f 0xc7. */
4093FNIEMOP_STUB(iemOp_Grp9);
4094/** Opcode 0x0f 0xc8. */
4095FNIEMOP_STUB(iemOp_bswap_rAX_r8);
4096/** Opcode 0x0f 0xc9. */
4097FNIEMOP_STUB(iemOp_bswap_rCX_r9);
4098/** Opcode 0x0f 0xca. */
4099FNIEMOP_STUB(iemOp_bswap_rDX_r10);
4100/** Opcode 0x0f 0xcb. */
4101FNIEMOP_STUB(iemOp_bswap_rBX_r11);
4102/** Opcode 0x0f 0xcc. */
4103FNIEMOP_STUB(iemOp_bswap_rSP_r12);
4104/** Opcode 0x0f 0xcd. */
4105FNIEMOP_STUB(iemOp_bswap_rBP_r13);
4106/** Opcode 0x0f 0xce. */
4107FNIEMOP_STUB(iemOp_bswap_rSI_r14);
4108/** Opcode 0x0f 0xcf. */
4109FNIEMOP_STUB(iemOp_bswap_rDI_r15);
4110/** Opcode 0x0f 0xd0. */
4111FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
4112/** Opcode 0x0f 0xd1. */
4113FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
4114/** Opcode 0x0f 0xd2. */
4115FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
4116/** Opcode 0x0f 0xd3. */
4117FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
4118/** Opcode 0x0f 0xd4. */
4119FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
4120/** Opcode 0x0f 0xd5. */
4121FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
4122/** Opcode 0x0f 0xd6. */
4123FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
4124/** Opcode 0x0f 0xd7. */
4125FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
4126/** Opcode 0x0f 0xd8. */
4127FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
4128/** Opcode 0x0f 0xd9. */
4129FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
4130/** Opcode 0x0f 0xda. */
4131FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
4132/** Opcode 0x0f 0xdb. */
4133FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
4134/** Opcode 0x0f 0xdc. */
4135FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
4136/** Opcode 0x0f 0xdd. */
4137FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
4138/** Opcode 0x0f 0xde. */
4139FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
4140/** Opcode 0x0f 0xdf. */
4141FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
4142/** Opcode 0x0f 0xe0. */
4143FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
4144/** Opcode 0x0f 0xe1. */
4145FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
4146/** Opcode 0x0f 0xe2. */
4147FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
4148/** Opcode 0x0f 0xe3. */
4149FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
4150/** Opcode 0x0f 0xe4. */
4151FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
4152/** Opcode 0x0f 0xe5. */
4153FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
4154/** Opcode 0x0f 0xe6. */
4155FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
4156/** Opcode 0x0f 0xe7. */
4157FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
4158/** Opcode 0x0f 0xe8. */
4159FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
4160/** Opcode 0x0f 0xe9. */
4161FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
4162/** Opcode 0x0f 0xea. */
4163FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
4164/** Opcode 0x0f 0xeb. */
4165FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
4166/** Opcode 0x0f 0xec. */
4167FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
4168/** Opcode 0x0f 0xed. */
4169FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
4170/** Opcode 0x0f 0xee. */
4171FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
4172/** Opcode 0x0f 0xef. */
4173FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
4174/** Opcode 0x0f 0xf0. */
4175FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
4176/** Opcode 0x0f 0xf1. */
4177FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
4178/** Opcode 0x0f 0xf2. */
4179FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
4180/** Opcode 0x0f 0xf3. */
4181FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
4182/** Opcode 0x0f 0xf4. */
4183FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
4184/** Opcode 0x0f 0xf5. */
4185FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
4186/** Opcode 0x0f 0xf6. */
4187FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
4188/** Opcode 0x0f 0xf7. */
4189FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
4190/** Opcode 0x0f 0xf8. */
4191FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
4192/** Opcode 0x0f 0xf9. */
4193FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
4194/** Opcode 0x0f 0xfa. */
4195FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
4196/** Opcode 0x0f 0xfb. */
4197FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
4198/** Opcode 0x0f 0xfc. */
4199FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
4200/** Opcode 0x0f 0xfd. */
4201FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
4202/** Opcode 0x0f 0xfe. */
4203FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
4204
4205
4206const PFNIEMOP g_apfnTwoByteMap[256] =
4207{
4208 /* 0x00 */ iemOp_Grp6, iemOp_Grp7, iemOp_lar_Gv_Ew, iemOp_lsl_Gv_Ew,
4209 /* 0x04 */ iemOp_Invalid, iemOp_syscall, iemOp_clts, iemOp_sysret,
4210 /* 0x08 */ iemOp_invd, iemOp_wbinvd, iemOp_Invalid, iemOp_ud2,
4211 /* 0x0c */ iemOp_Invalid, iemOp_nop_Ev_prefetch, iemOp_femms, iemOp_3Dnow,
4212 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
4213 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
4214 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
4215 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
4216 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
4217 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
4218 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
4219 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
4220 /* 0x18 */ iemOp_prefetch_Grp16, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4221 /* 0x1c */ iemOp_Invalid, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4222 /* 0x20 */ iemOp_mov_Rd_Cd, iemOp_mov_Rd_Dd, iemOp_mov_Cd_Rd, iemOp_mov_Dd_Rd,
4223 /* 0x24 */ iemOp_mov_Rd_Td, iemOp_Invalid, iemOp_mov_Td_Rd, iemOp_Invalid,
4224 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
4225 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
4226 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
4227 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
4228 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
4229 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
4230 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
4231 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
4232 /* 0x30 */ iemOp_wrmsr, iemOp_rdtsc, iemOp_rdmsr, iemOp_rdpmc,
4233 /* 0x34 */ iemOp_sysenter, iemOp_sysexit, iemOp_Invalid, iemOp_getsec,
4234 /* 0x38 */ iemOp_3byte_Esc_A4, iemOp_Invalid, iemOp_3byte_Esc_A5, iemOp_Invalid,
4235 /* 0x3c */ iemOp_movnti_Gv_Ev/*?*/,iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4236 /* 0x40 */ iemOp_cmovo_Gv_Ev, iemOp_cmovno_Gv_Ev, iemOp_cmovc_Gv_Ev, iemOp_cmovnc_Gv_Ev,
4237 /* 0x44 */ iemOp_cmove_Gv_Ev, iemOp_cmovne_Gv_Ev, iemOp_cmovbe_Gv_Ev, iemOp_cmovnbe_Gv_Ev,
4238 /* 0x48 */ iemOp_cmovs_Gv_Ev, iemOp_cmovns_Gv_Ev, iemOp_cmovp_Gv_Ev, iemOp_cmovnp_Gv_Ev,
4239 /* 0x4c */ iemOp_cmovl_Gv_Ev, iemOp_cmovnl_Gv_Ev, iemOp_cmovle_Gv_Ev, iemOp_cmovnle_Gv_Ev,
4240 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
4241 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
4242 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
4243 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
4244 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
4245 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
4246 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
4247 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
4248 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
4249 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
4250 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
4251 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
4252 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
4253 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
4254 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
4255 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
4256 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
4257 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
4258 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
4259 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
4260 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
4261 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
4262 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
4263 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
4264 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
4265 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
4266 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
4267 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
4268 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
4269 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
4270 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
4271 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
4272 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
4273 /* 0x71 */ iemOp_Grp12,
4274 /* 0x72 */ iemOp_Grp13,
4275 /* 0x73 */ iemOp_Grp14,
4276 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
4277 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
4278 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
4279 /* 0x77 */ iemOp_emms,
4280 /* 0x78 */ iemOp_vmread, iemOp_vmwrite, iemOp_Invalid, iemOp_Invalid,
4281 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
4282 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
4283 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
4284 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
4285 /* 0x80 */ iemOp_jo_Jv, iemOp_jno_Jv, iemOp_jc_Jv, iemOp_jnc_Jv,
4286 /* 0x84 */ iemOp_je_Jv, iemOp_jne_Jv, iemOp_jbe_Jv, iemOp_jnbe_Jv,
4287 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv,
4288 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv,
4289 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb,
4290 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb,
4291 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb,
4292 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb,
4293 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv,
4294 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid,
4295 /* 0xa8 */ iemOp_push_gs, iemOp_pop_gs, iemOp_rsm, iemOp_bts_Ev_Gv,
4296 /* 0xac */ iemOp_shrd_Ev_Gv_Ib, iemOp_shrd_Ev_Gv_CL, iemOp_Grp15, iemOp_imul_Gv_Ev,
4297 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb, iemOp_cmpxchg_Ev_Gv, iemOp_lss_Gv_Mp, iemOp_btr_Ev_Gv,
4298 /* 0xb4 */ iemOp_lfs_Gv_Mp, iemOp_lgs_Gv_Mp, iemOp_movzx_Gv_Eb, iemOp_movzx_Gv_Ew,
4299 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,iemOp_Grp10, iemOp_Grp8, iemOp_btc_Ev_Gv,
4300 /* 0xbc */ iemOp_bsf_Gv_Ev, iemOp_bsr_Gv_Ev, iemOp_movsx_Gv_Eb, iemOp_movsx_Gv_Ew,
4301 /* 0xc0 */ iemOp_xadd_Eb_Gb,
4302 /* 0xc1 */ iemOp_xadd_Ev_Gv,
4303 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
4304 /* 0xc3 */ iemOp_movnti_My_Gy,
4305 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
4306 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
4307 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
4308 /* 0xc7 */ iemOp_Grp9,
4309 /* 0xc8 */ iemOp_bswap_rAX_r8, iemOp_bswap_rCX_r9, iemOp_bswap_rDX_r10, iemOp_bswap_rBX_r11,
4310 /* 0xcc */ iemOp_bswap_rSP_r12, iemOp_bswap_rBP_r13, iemOp_bswap_rSI_r14, iemOp_bswap_rDI_r15,
4311 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
4312 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
4313 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
4314 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
4315 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
4316 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
4317 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
4318 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
4319 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
4320 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
4321 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
4322 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
4323 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
4324 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
4325 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
4326 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
4327 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
4328 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
4329 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
4330 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
4331 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
4332 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
4333 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
4334 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
4335 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
4336 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
4337 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
4338 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
4339 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
4340 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
4341 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
4342 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
4343 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
4344 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
4345 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
4346 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
4347 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
4348 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
4349 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
4350 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
4351 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
4352 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
4353 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
4354 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
4355 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
4356 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
4357 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
4358 /* 0xff */ iemOp_Invalid
4359};
4360
4361/** @} */
4362
4363
4364/** @name One byte opcodes.
4365 *
4366 * @{
4367 */
4368
4369/** Opcode 0x00. */
4370FNIEMOP_DEF(iemOp_add_Eb_Gb)
4371{
4372 IEMOP_MNEMONIC("add Eb,Gb");
4373 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
4374}
4375
4376
4377/** Opcode 0x01. */
4378FNIEMOP_DEF(iemOp_add_Ev_Gv)
4379{
4380 IEMOP_MNEMONIC("add Ev,Gv");
4381 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
4382}
4383
4384
4385/** Opcode 0x02. */
4386FNIEMOP_DEF(iemOp_add_Gb_Eb)
4387{
4388 IEMOP_MNEMONIC("add Gb,Eb");
4389 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
4390}
4391
4392
4393/** Opcode 0x03. */
4394FNIEMOP_DEF(iemOp_add_Gv_Ev)
4395{
4396 IEMOP_MNEMONIC("add Gv,Ev");
4397 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
4398}
4399
4400
4401/** Opcode 0x04. */
4402FNIEMOP_DEF(iemOp_add_Al_Ib)
4403{
4404 IEMOP_MNEMONIC("add al,Ib");
4405 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
4406}
4407
4408
4409/** Opcode 0x05. */
4410FNIEMOP_DEF(iemOp_add_eAX_Iz)
4411{
4412 IEMOP_MNEMONIC("add rAX,Iz");
4413 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
4414}
4415
4416
4417/** Opcode 0x06. */
4418FNIEMOP_DEF(iemOp_push_ES)
4419{
4420 IEMOP_MNEMONIC("push es");
4421 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
4422}
4423
4424
4425/** Opcode 0x07. */
4426FNIEMOP_DEF(iemOp_pop_ES)
4427{
4428 IEMOP_MNEMONIC("pop es");
4429 IEMOP_HLP_NO_64BIT();
4430 IEMOP_HLP_NO_LOCK_PREFIX();
4431 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
4432}
4433
4434
4435/** Opcode 0x08. */
4436FNIEMOP_DEF(iemOp_or_Eb_Gb)
4437{
4438 IEMOP_MNEMONIC("or Eb,Gb");
4439 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4440 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
4441}
4442
4443
4444/** Opcode 0x09. */
4445FNIEMOP_DEF(iemOp_or_Ev_Gv)
4446{
4447 IEMOP_MNEMONIC("or Ev,Gv ");
4448 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4449 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
4450}
4451
4452
4453/** Opcode 0x0a. */
4454FNIEMOP_DEF(iemOp_or_Gb_Eb)
4455{
4456 IEMOP_MNEMONIC("or Gb,Eb");
4457 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4458 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
4459}
4460
4461
4462/** Opcode 0x0b. */
4463FNIEMOP_DEF(iemOp_or_Gv_Ev)
4464{
4465 IEMOP_MNEMONIC("or Gv,Ev");
4466 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4467 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
4468}
4469
4470
4471/** Opcode 0x0c. */
4472FNIEMOP_DEF(iemOp_or_Al_Ib)
4473{
4474 IEMOP_MNEMONIC("or al,Ib");
4475 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4476 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
4477}
4478
4479
4480/** Opcode 0x0d. */
4481FNIEMOP_DEF(iemOp_or_eAX_Iz)
4482{
4483 IEMOP_MNEMONIC("or rAX,Iz");
4484 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4485 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
4486}
4487
4488
4489/** Opcode 0x0e. */
4490FNIEMOP_DEF(iemOp_push_CS)
4491{
4492 IEMOP_MNEMONIC("push cs");
4493 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
4494}
4495
4496
4497/** Opcode 0x0f. */
4498FNIEMOP_DEF(iemOp_2byteEscape)
4499{
4500 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4501 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
4502}
4503
4504/** Opcode 0x10. */
4505FNIEMOP_DEF(iemOp_adc_Eb_Gb)
4506{
4507 IEMOP_MNEMONIC("adc Eb,Gb");
4508 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
4509}
4510
4511
4512/** Opcode 0x11. */
4513FNIEMOP_DEF(iemOp_adc_Ev_Gv)
4514{
4515 IEMOP_MNEMONIC("adc Ev,Gv");
4516 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
4517}
4518
4519
4520/** Opcode 0x12. */
4521FNIEMOP_DEF(iemOp_adc_Gb_Eb)
4522{
4523 IEMOP_MNEMONIC("adc Gb,Eb");
4524 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
4525}
4526
4527
4528/** Opcode 0x13. */
4529FNIEMOP_DEF(iemOp_adc_Gv_Ev)
4530{
4531 IEMOP_MNEMONIC("adc Gv,Ev");
4532 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
4533}
4534
4535
4536/** Opcode 0x14. */
4537FNIEMOP_DEF(iemOp_adc_Al_Ib)
4538{
4539 IEMOP_MNEMONIC("adc al,Ib");
4540 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
4541}
4542
4543
4544/** Opcode 0x15. */
4545FNIEMOP_DEF(iemOp_adc_eAX_Iz)
4546{
4547 IEMOP_MNEMONIC("adc rAX,Iz");
4548 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
4549}
4550
4551
4552/** Opcode 0x16. */
4553FNIEMOP_DEF(iemOp_push_SS)
4554{
4555 IEMOP_MNEMONIC("push ss");
4556 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
4557}
4558
4559
4560/** Opcode 0x17. */
4561FNIEMOP_DEF(iemOp_pop_SS)
4562{
4563 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
4564 IEMOP_HLP_NO_LOCK_PREFIX();
4565 IEMOP_HLP_NO_64BIT();
4566 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
4567}
4568
4569
4570/** Opcode 0x18. */
4571FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
4572{
4573 IEMOP_MNEMONIC("sbb Eb,Gb");
4574 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
4575}
4576
4577
4578/** Opcode 0x19. */
4579FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
4580{
4581 IEMOP_MNEMONIC("sbb Ev,Gv");
4582 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
4583}
4584
4585
4586/** Opcode 0x1a. */
4587FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
4588{
4589 IEMOP_MNEMONIC("sbb Gb,Eb");
4590 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
4591}
4592
4593
4594/** Opcode 0x1b. */
4595FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
4596{
4597 IEMOP_MNEMONIC("sbb Gv,Ev");
4598 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
4599}
4600
4601
4602/** Opcode 0x1c. */
4603FNIEMOP_DEF(iemOp_sbb_Al_Ib)
4604{
4605 IEMOP_MNEMONIC("sbb al,Ib");
4606 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
4607}
4608
4609
4610/** Opcode 0x1d. */
4611FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
4612{
4613 IEMOP_MNEMONIC("sbb rAX,Iz");
4614 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
4615}
4616
4617
4618/** Opcode 0x1e. */
4619FNIEMOP_DEF(iemOp_push_DS)
4620{
4621 IEMOP_MNEMONIC("push ds");
4622 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
4623}
4624
4625
4626/** Opcode 0x1f. */
4627FNIEMOP_DEF(iemOp_pop_DS)
4628{
4629 IEMOP_MNEMONIC("pop ds");
4630 IEMOP_HLP_NO_LOCK_PREFIX();
4631 IEMOP_HLP_NO_64BIT();
4632 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
4633}
4634
4635
4636/** Opcode 0x20. */
4637FNIEMOP_DEF(iemOp_and_Eb_Gb)
4638{
4639 IEMOP_MNEMONIC("and Eb,Gb");
4640 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4641 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
4642}
4643
4644
4645/** Opcode 0x21. */
4646FNIEMOP_DEF(iemOp_and_Ev_Gv)
4647{
4648 IEMOP_MNEMONIC("and Ev,Gv");
4649 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4650 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
4651}
4652
4653
4654/** Opcode 0x22. */
4655FNIEMOP_DEF(iemOp_and_Gb_Eb)
4656{
4657 IEMOP_MNEMONIC("and Gb,Eb");
4658 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4659 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
4660}
4661
4662
4663/** Opcode 0x23. */
4664FNIEMOP_DEF(iemOp_and_Gv_Ev)
4665{
4666 IEMOP_MNEMONIC("and Gv,Ev");
4667 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4668 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
4669}
4670
4671
4672/** Opcode 0x24. */
4673FNIEMOP_DEF(iemOp_and_Al_Ib)
4674{
4675 IEMOP_MNEMONIC("and al,Ib");
4676 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4677 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
4678}
4679
4680
4681/** Opcode 0x25. */
4682FNIEMOP_DEF(iemOp_and_eAX_Iz)
4683{
4684 IEMOP_MNEMONIC("and rAX,Iz");
4685 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4686 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
4687}
4688
4689
4690/** Opcode 0x26. */
4691FNIEMOP_DEF(iemOp_seg_ES)
4692{
4693 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
4694 pIemCpu->iEffSeg = X86_SREG_ES;
4695
4696 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4697 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4698}
4699
4700
4701/** Opcode 0x27. */
4702FNIEMOP_STUB(iemOp_daa);
4703
4704
4705/** Opcode 0x28. */
4706FNIEMOP_DEF(iemOp_sub_Eb_Gb)
4707{
4708 IEMOP_MNEMONIC("sub Eb,Gb");
4709 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
4710}
4711
4712
4713/** Opcode 0x29. */
4714FNIEMOP_DEF(iemOp_sub_Ev_Gv)
4715{
4716 IEMOP_MNEMONIC("sub Ev,Gv");
4717 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
4718}
4719
4720
4721/** Opcode 0x2a. */
4722FNIEMOP_DEF(iemOp_sub_Gb_Eb)
4723{
4724 IEMOP_MNEMONIC("sub Gb,Eb");
4725 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
4726}
4727
4728
4729/** Opcode 0x2b. */
4730FNIEMOP_DEF(iemOp_sub_Gv_Ev)
4731{
4732 IEMOP_MNEMONIC("sub Gv,Ev");
4733 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
4734}
4735
4736
4737/** Opcode 0x2c. */
4738FNIEMOP_DEF(iemOp_sub_Al_Ib)
4739{
4740 IEMOP_MNEMONIC("sub al,Ib");
4741 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
4742}
4743
4744
4745/** Opcode 0x2d. */
4746FNIEMOP_DEF(iemOp_sub_eAX_Iz)
4747{
4748 IEMOP_MNEMONIC("sub rAX,Iz");
4749 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
4750}
4751
4752
4753/** Opcode 0x2e. */
4754FNIEMOP_DEF(iemOp_seg_CS)
4755{
4756 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
4757 pIemCpu->iEffSeg = X86_SREG_CS;
4758
4759 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4760 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4761}
4762
4763
4764/** Opcode 0x2f. */
4765FNIEMOP_STUB(iemOp_das);
4766
4767
4768/** Opcode 0x30. */
4769FNIEMOP_DEF(iemOp_xor_Eb_Gb)
4770{
4771 IEMOP_MNEMONIC("xor Eb,Gb");
4772 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4773 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
4774}
4775
4776
4777/** Opcode 0x31. */
4778FNIEMOP_DEF(iemOp_xor_Ev_Gv)
4779{
4780 IEMOP_MNEMONIC("xor Ev,Gv");
4781 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4782 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
4783}
4784
4785
4786/** Opcode 0x32. */
4787FNIEMOP_DEF(iemOp_xor_Gb_Eb)
4788{
4789 IEMOP_MNEMONIC("xor Gb,Eb");
4790 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4791 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
4792}
4793
4794
4795/** Opcode 0x33. */
4796FNIEMOP_DEF(iemOp_xor_Gv_Ev)
4797{
4798 IEMOP_MNEMONIC("xor Gv,Ev");
4799 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4800 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
4801}
4802
4803
4804/** Opcode 0x34. */
4805FNIEMOP_DEF(iemOp_xor_Al_Ib)
4806{
4807 IEMOP_MNEMONIC("xor al,Ib");
4808 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4809 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
4810}
4811
4812
4813/** Opcode 0x35. */
4814FNIEMOP_DEF(iemOp_xor_eAX_Iz)
4815{
4816 IEMOP_MNEMONIC("xor rAX,Iz");
4817 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4818 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
4819}
4820
4821
4822/** Opcode 0x36. */
4823FNIEMOP_DEF(iemOp_seg_SS)
4824{
4825 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
4826 pIemCpu->iEffSeg = X86_SREG_SS;
4827
4828 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4829 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4830}
4831
4832
4833/** Opcode 0x37. */
4834FNIEMOP_STUB(iemOp_aaa);
4835
4836
4837/** Opcode 0x38. */
4838FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
4839{
4840 IEMOP_MNEMONIC("cmp Eb,Gb");
4841 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4842 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
4843}
4844
4845
4846/** Opcode 0x39. */
4847FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
4848{
4849 IEMOP_MNEMONIC("cmp Ev,Gv");
4850 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4851 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
4852}
4853
4854
4855/** Opcode 0x3a. */
4856FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
4857{
4858 IEMOP_MNEMONIC("cmp Gb,Eb");
4859 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
4860}
4861
4862
4863/** Opcode 0x3b. */
4864FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
4865{
4866 IEMOP_MNEMONIC("cmp Gv,Ev");
4867 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
4868}
4869
4870
4871/** Opcode 0x3c. */
4872FNIEMOP_DEF(iemOp_cmp_Al_Ib)
4873{
4874 IEMOP_MNEMONIC("cmp al,Ib");
4875 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
4876}
4877
4878
4879/** Opcode 0x3d. */
4880FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
4881{
4882 IEMOP_MNEMONIC("cmp rAX,Iz");
4883 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
4884}
4885
4886
4887/** Opcode 0x3e. */
4888FNIEMOP_DEF(iemOp_seg_DS)
4889{
4890 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
4891 pIemCpu->iEffSeg = X86_SREG_DS;
4892
4893 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4894 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4895}
4896
4897
4898/** Opcode 0x3f. */
4899FNIEMOP_STUB(iemOp_aas);
4900
4901/**
4902 * Common 'inc/dec/not/neg register' helper.
4903 */
4904FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
4905{
4906 IEMOP_HLP_NO_LOCK_PREFIX();
4907 switch (pIemCpu->enmEffOpSize)
4908 {
4909 case IEMMODE_16BIT:
4910 IEM_MC_BEGIN(2, 0);
4911 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4912 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4913 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
4914 IEM_MC_REF_EFLAGS(pEFlags);
4915 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
4916 IEM_MC_ADVANCE_RIP();
4917 IEM_MC_END();
4918 return VINF_SUCCESS;
4919
4920 case IEMMODE_32BIT:
4921 IEM_MC_BEGIN(2, 0);
4922 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4923 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4924 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4925 IEM_MC_REF_EFLAGS(pEFlags);
4926 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
4927 IEM_MC_ADVANCE_RIP();
4928 IEM_MC_END();
4929 return VINF_SUCCESS;
4930
4931 case IEMMODE_64BIT:
4932 IEM_MC_BEGIN(2, 0);
4933 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4934 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4935 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
4936 IEM_MC_REF_EFLAGS(pEFlags);
4937 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
4938 IEM_MC_ADVANCE_RIP();
4939 IEM_MC_END();
4940 return VINF_SUCCESS;
4941 }
4942 return VINF_SUCCESS;
4943}
4944
4945
4946/** Opcode 0x40. */
4947FNIEMOP_DEF(iemOp_inc_eAX)
4948{
4949 /*
4950 * This is a REX prefix in 64-bit mode.
4951 */
4952 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4953 {
4954 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
4955
4956 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4957 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4958 }
4959
4960 IEMOP_MNEMONIC("inc eAX");
4961 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
4962}
4963
4964
4965/** Opcode 0x41. */
4966FNIEMOP_DEF(iemOp_inc_eCX)
4967{
4968 /*
4969 * This is a REX prefix in 64-bit mode.
4970 */
4971 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4972 {
4973 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
4974 pIemCpu->uRexB = 1 << 3;
4975
4976 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4977 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4978 }
4979
4980 IEMOP_MNEMONIC("inc eCX");
4981 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
4982}
4983
4984
4985/** Opcode 0x42. */
4986FNIEMOP_DEF(iemOp_inc_eDX)
4987{
4988 /*
4989 * This is a REX prefix in 64-bit mode.
4990 */
4991 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4992 {
4993 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
4994 pIemCpu->uRexIndex = 1 << 3;
4995
4996 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4997 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4998 }
4999
5000 IEMOP_MNEMONIC("inc eDX");
5001 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
5002}
5003
5004
5005
5006/** Opcode 0x43. */
5007FNIEMOP_DEF(iemOp_inc_eBX)
5008{
5009 /*
5010 * This is a REX prefix in 64-bit mode.
5011 */
5012 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5013 {
5014 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5015 pIemCpu->uRexB = 1 << 3;
5016 pIemCpu->uRexIndex = 1 << 3;
5017
5018 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5019 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5020 }
5021
5022 IEMOP_MNEMONIC("inc eBX");
5023 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
5024}
5025
5026
5027/** Opcode 0x44. */
5028FNIEMOP_DEF(iemOp_inc_eSP)
5029{
5030 /*
5031 * This is a REX prefix in 64-bit mode.
5032 */
5033 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5034 {
5035 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
5036 pIemCpu->uRexReg = 1 << 3;
5037
5038 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5039 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5040 }
5041
5042 IEMOP_MNEMONIC("inc eSP");
5043 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
5044}
5045
5046
5047/** Opcode 0x45. */
5048FNIEMOP_DEF(iemOp_inc_eBP)
5049{
5050 /*
5051 * This is a REX prefix in 64-bit mode.
5052 */
5053 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5054 {
5055 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
5056 pIemCpu->uRexReg = 1 << 3;
5057 pIemCpu->uRexB = 1 << 3;
5058
5059 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5060 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5061 }
5062
5063 IEMOP_MNEMONIC("inc eBP");
5064 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
5065}
5066
5067
5068/** Opcode 0x46. */
5069FNIEMOP_DEF(iemOp_inc_eSI)
5070{
5071 /*
5072 * This is a REX prefix in 64-bit mode.
5073 */
5074 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5075 {
5076 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
5077 pIemCpu->uRexReg = 1 << 3;
5078 pIemCpu->uRexIndex = 1 << 3;
5079
5080 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5081 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5082 }
5083
5084 IEMOP_MNEMONIC("inc eSI");
5085 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
5086}
5087
5088
5089/** Opcode 0x47. */
5090FNIEMOP_DEF(iemOp_inc_eDI)
5091{
5092 /*
5093 * This is a REX prefix in 64-bit mode.
5094 */
5095 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5096 {
5097 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5098 pIemCpu->uRexReg = 1 << 3;
5099 pIemCpu->uRexB = 1 << 3;
5100 pIemCpu->uRexIndex = 1 << 3;
5101
5102 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5103 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5104 }
5105
5106 IEMOP_MNEMONIC("inc eDI");
5107 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
5108}
5109
5110
5111/** Opcode 0x48. */
5112FNIEMOP_DEF(iemOp_dec_eAX)
5113{
5114 /*
5115 * This is a REX prefix in 64-bit mode.
5116 */
5117 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5118 {
5119 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
5120 iemRecalEffOpSize(pIemCpu);
5121
5122 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5123 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5124 }
5125
5126 IEMOP_MNEMONIC("dec eAX");
5127 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
5128}
5129
5130
5131/** Opcode 0x49. */
5132FNIEMOP_DEF(iemOp_dec_eCX)
5133{
5134 /*
5135 * This is a REX prefix in 64-bit mode.
5136 */
5137 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5138 {
5139 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5140 pIemCpu->uRexB = 1 << 3;
5141 iemRecalEffOpSize(pIemCpu);
5142
5143 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5144 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5145 }
5146
5147 IEMOP_MNEMONIC("dec eCX");
5148 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
5149}
5150
5151
5152/** Opcode 0x4a. */
5153FNIEMOP_DEF(iemOp_dec_eDX)
5154{
5155 /*
5156 * This is a REX prefix in 64-bit mode.
5157 */
5158 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5159 {
5160 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5161 pIemCpu->uRexIndex = 1 << 3;
5162 iemRecalEffOpSize(pIemCpu);
5163
5164 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5165 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5166 }
5167
5168 IEMOP_MNEMONIC("dec eDX");
5169 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
5170}
5171
5172
5173/** Opcode 0x4b. */
5174FNIEMOP_DEF(iemOp_dec_eBX)
5175{
5176 /*
5177 * This is a REX prefix in 64-bit mode.
5178 */
5179 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5180 {
5181 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5182 pIemCpu->uRexB = 1 << 3;
5183 pIemCpu->uRexIndex = 1 << 3;
5184 iemRecalEffOpSize(pIemCpu);
5185
5186 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5187 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5188 }
5189
5190 IEMOP_MNEMONIC("dec eBX");
5191 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
5192}
5193
5194
5195/** Opcode 0x4c. */
5196FNIEMOP_DEF(iemOp_dec_eSP)
5197{
5198 /*
5199 * This is a REX prefix in 64-bit mode.
5200 */
5201 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5202 {
5203 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
5204 pIemCpu->uRexReg = 1 << 3;
5205 iemRecalEffOpSize(pIemCpu);
5206
5207 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5208 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5209 }
5210
5211 IEMOP_MNEMONIC("dec eSP");
5212 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
5213}
5214
5215
5216/** Opcode 0x4d. */
5217FNIEMOP_DEF(iemOp_dec_eBP)
5218{
5219 /*
5220 * This is a REX prefix in 64-bit mode.
5221 */
5222 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5223 {
5224 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5225 pIemCpu->uRexReg = 1 << 3;
5226 pIemCpu->uRexB = 1 << 3;
5227 iemRecalEffOpSize(pIemCpu);
5228
5229 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5230 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5231 }
5232
5233 IEMOP_MNEMONIC("dec eBP");
5234 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
5235}
5236
5237
5238/** Opcode 0x4e. */
5239FNIEMOP_DEF(iemOp_dec_eSI)
5240{
5241 /*
5242 * This is a REX prefix in 64-bit mode.
5243 */
5244 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5245 {
5246 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5247 pIemCpu->uRexReg = 1 << 3;
5248 pIemCpu->uRexIndex = 1 << 3;
5249 iemRecalEffOpSize(pIemCpu);
5250
5251 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5252 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5253 }
5254
5255 IEMOP_MNEMONIC("dec eSI");
5256 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
5257}
5258
5259
5260/** Opcode 0x4f. */
5261FNIEMOP_DEF(iemOp_dec_eDI)
5262{
5263 /*
5264 * This is a REX prefix in 64-bit mode.
5265 */
5266 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5267 {
5268 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5269 pIemCpu->uRexReg = 1 << 3;
5270 pIemCpu->uRexB = 1 << 3;
5271 pIemCpu->uRexIndex = 1 << 3;
5272 iemRecalEffOpSize(pIemCpu);
5273
5274 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5275 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5276 }
5277
5278 IEMOP_MNEMONIC("dec eDI");
5279 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
5280}
5281
5282
5283/**
5284 * Common 'push register' helper.
5285 */
5286FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
5287{
5288 IEMOP_HLP_NO_LOCK_PREFIX();
5289 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5290 {
5291 iReg |= pIemCpu->uRexB;
5292 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5293 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5294 }
5295
5296 switch (pIemCpu->enmEffOpSize)
5297 {
5298 case IEMMODE_16BIT:
5299 IEM_MC_BEGIN(0, 1);
5300 IEM_MC_LOCAL(uint16_t, u16Value);
5301 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
5302 IEM_MC_PUSH_U16(u16Value);
5303 IEM_MC_ADVANCE_RIP();
5304 IEM_MC_END();
5305 break;
5306
5307 case IEMMODE_32BIT:
5308 IEM_MC_BEGIN(0, 1);
5309 IEM_MC_LOCAL(uint32_t, u32Value);
5310 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
5311 IEM_MC_PUSH_U32(u32Value);
5312 IEM_MC_ADVANCE_RIP();
5313 IEM_MC_END();
5314 break;
5315
5316 case IEMMODE_64BIT:
5317 IEM_MC_BEGIN(0, 1);
5318 IEM_MC_LOCAL(uint64_t, u64Value);
5319 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
5320 IEM_MC_PUSH_U64(u64Value);
5321 IEM_MC_ADVANCE_RIP();
5322 IEM_MC_END();
5323 break;
5324 }
5325
5326 return VINF_SUCCESS;
5327}
5328
5329
5330/** Opcode 0x50. */
5331FNIEMOP_DEF(iemOp_push_eAX)
5332{
5333 IEMOP_MNEMONIC("push rAX");
5334 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
5335}
5336
5337
5338/** Opcode 0x51. */
5339FNIEMOP_DEF(iemOp_push_eCX)
5340{
5341 IEMOP_MNEMONIC("push rCX");
5342 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
5343}
5344
5345
5346/** Opcode 0x52. */
5347FNIEMOP_DEF(iemOp_push_eDX)
5348{
5349 IEMOP_MNEMONIC("push rDX");
5350 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
5351}
5352
5353
5354/** Opcode 0x53. */
5355FNIEMOP_DEF(iemOp_push_eBX)
5356{
5357 IEMOP_MNEMONIC("push rBX");
5358 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
5359}
5360
5361
5362/** Opcode 0x54. */
5363FNIEMOP_DEF(iemOp_push_eSP)
5364{
5365 IEMOP_MNEMONIC("push rSP");
5366 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
5367}
5368
5369
5370/** Opcode 0x55. */
5371FNIEMOP_DEF(iemOp_push_eBP)
5372{
5373 IEMOP_MNEMONIC("push rBP");
5374 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
5375}
5376
5377
5378/** Opcode 0x56. */
5379FNIEMOP_DEF(iemOp_push_eSI)
5380{
5381 IEMOP_MNEMONIC("push rSI");
5382 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
5383}
5384
5385
5386/** Opcode 0x57. */
5387FNIEMOP_DEF(iemOp_push_eDI)
5388{
5389 IEMOP_MNEMONIC("push rDI");
5390 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
5391}
5392
5393
5394/**
5395 * Common 'pop register' helper.
5396 */
5397FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
5398{
5399 IEMOP_HLP_NO_LOCK_PREFIX();
5400 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5401 {
5402 iReg |= pIemCpu->uRexB;
5403 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5404 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5405 }
5406
5407/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
5408 * handle it, for that matter (Intel pseudo code hints that the popped
5409 * value is incremented by the stack item size.) Test it, both encodings
5410 * and all three register sizes. */
5411 switch (pIemCpu->enmEffOpSize)
5412 {
5413 case IEMMODE_16BIT:
5414 IEM_MC_BEGIN(0, 1);
5415 IEM_MC_LOCAL(uint16_t, *pu16Dst);
5416 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5417 IEM_MC_POP_U16(pu16Dst);
5418 IEM_MC_ADVANCE_RIP();
5419 IEM_MC_END();
5420 break;
5421
5422 case IEMMODE_32BIT:
5423 IEM_MC_BEGIN(0, 1);
5424 IEM_MC_LOCAL(uint32_t, *pu32Dst);
5425 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5426 IEM_MC_POP_U32(pu32Dst);
5427 IEM_MC_ADVANCE_RIP();
5428 IEM_MC_END();
5429 break;
5430
5431 case IEMMODE_64BIT:
5432 IEM_MC_BEGIN(0, 1);
5433 IEM_MC_LOCAL(uint64_t, *pu64Dst);
5434 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5435 IEM_MC_POP_U64(pu64Dst);
5436 IEM_MC_ADVANCE_RIP();
5437 IEM_MC_END();
5438 break;
5439 }
5440
5441 return VINF_SUCCESS;
5442}
5443
5444
5445/** Opcode 0x58. */
5446FNIEMOP_DEF(iemOp_pop_eAX)
5447{
5448 IEMOP_MNEMONIC("pop rAX");
5449 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
5450}
5451
5452
5453/** Opcode 0x59. */
5454FNIEMOP_DEF(iemOp_pop_eCX)
5455{
5456 IEMOP_MNEMONIC("pop rCX");
5457 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
5458}
5459
5460
5461/** Opcode 0x5a. */
5462FNIEMOP_DEF(iemOp_pop_eDX)
5463{
5464 IEMOP_MNEMONIC("pop rDX");
5465 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
5466}
5467
5468
5469/** Opcode 0x5b. */
5470FNIEMOP_DEF(iemOp_pop_eBX)
5471{
5472 IEMOP_MNEMONIC("pop rBX");
5473 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
5474}
5475
5476
5477/** Opcode 0x5c. */
5478FNIEMOP_DEF(iemOp_pop_eSP)
5479{
5480 IEMOP_MNEMONIC("pop rSP");
5481 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
5482}
5483
5484
5485/** Opcode 0x5d. */
5486FNIEMOP_DEF(iemOp_pop_eBP)
5487{
5488 IEMOP_MNEMONIC("pop rBP");
5489 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
5490}
5491
5492
5493/** Opcode 0x5e. */
5494FNIEMOP_DEF(iemOp_pop_eSI)
5495{
5496 IEMOP_MNEMONIC("pop rSI");
5497 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
5498}
5499
5500
5501/** Opcode 0x5f. */
5502FNIEMOP_DEF(iemOp_pop_eDI)
5503{
5504 IEMOP_MNEMONIC("pop rDI");
5505 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
5506}
5507
5508
5509/** Opcode 0x60. */
5510FNIEMOP_DEF(iemOp_pusha)
5511{
5512 IEMOP_MNEMONIC("pusha");
5513 IEMOP_HLP_NO_64BIT();
5514 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5515 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
5516 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5517 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
5518}
5519
5520
5521/** Opcode 0x61. */
5522FNIEMOP_DEF(iemOp_popa)
5523{
5524 IEMOP_MNEMONIC("popa");
5525 IEMOP_HLP_NO_64BIT();
5526 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5527 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
5528 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5529 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
5530}
5531
5532
5533/** Opcode 0x62. */
5534FNIEMOP_STUB(iemOp_bound_Gv_Ma);
5535/** Opcode 0x63. */
5536FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
5537
5538
5539/** Opcode 0x64. */
5540FNIEMOP_DEF(iemOp_seg_FS)
5541{
5542 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
5543 pIemCpu->iEffSeg = X86_SREG_FS;
5544
5545 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5546 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5547}
5548
5549
5550/** Opcode 0x65. */
5551FNIEMOP_DEF(iemOp_seg_GS)
5552{
5553 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
5554 pIemCpu->iEffSeg = X86_SREG_GS;
5555
5556 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5557 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5558}
5559
5560
5561/** Opcode 0x66. */
5562FNIEMOP_DEF(iemOp_op_size)
5563{
5564 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
5565 iemRecalEffOpSize(pIemCpu);
5566
5567 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5568 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5569}
5570
5571
5572/** Opcode 0x67. */
5573FNIEMOP_DEF(iemOp_addr_size)
5574{
5575 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
5576 switch (pIemCpu->enmDefAddrMode)
5577 {
5578 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5579 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
5580 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5581 default: AssertFailed();
5582 }
5583
5584 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5585 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5586}
5587
5588
5589/** Opcode 0x68. */
5590FNIEMOP_DEF(iemOp_push_Iz)
5591{
5592 IEMOP_MNEMONIC("push Iz");
5593 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5594 switch (pIemCpu->enmEffOpSize)
5595 {
5596 case IEMMODE_16BIT:
5597 {
5598 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5599 IEMOP_HLP_NO_LOCK_PREFIX();
5600 IEM_MC_BEGIN(0,0);
5601 IEM_MC_PUSH_U16(u16Imm);
5602 IEM_MC_ADVANCE_RIP();
5603 IEM_MC_END();
5604 return VINF_SUCCESS;
5605 }
5606
5607 case IEMMODE_32BIT:
5608 {
5609 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5610 IEMOP_HLP_NO_LOCK_PREFIX();
5611 IEM_MC_BEGIN(0,0);
5612 IEM_MC_PUSH_U32(u32Imm);
5613 IEM_MC_ADVANCE_RIP();
5614 IEM_MC_END();
5615 return VINF_SUCCESS;
5616 }
5617
5618 case IEMMODE_64BIT:
5619 {
5620 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5621 IEMOP_HLP_NO_LOCK_PREFIX();
5622 IEM_MC_BEGIN(0,0);
5623 IEM_MC_PUSH_U64(u64Imm);
5624 IEM_MC_ADVANCE_RIP();
5625 IEM_MC_END();
5626 return VINF_SUCCESS;
5627 }
5628
5629 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5630 }
5631}
5632
5633
5634/** Opcode 0x69. */
5635FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
5636{
5637 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
5638 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5639 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5640
5641 switch (pIemCpu->enmEffOpSize)
5642 {
5643 case IEMMODE_16BIT:
5644 {
5645 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5646 IEMOP_HLP_NO_LOCK_PREFIX();
5647 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5648 {
5649 /* register operand */
5650 IEM_MC_BEGIN(3, 1);
5651 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5652 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5653 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5654 IEM_MC_LOCAL(uint16_t, u16Tmp);
5655
5656 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5657 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5658 IEM_MC_REF_EFLAGS(pEFlags);
5659 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5660 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5661
5662 IEM_MC_ADVANCE_RIP();
5663 IEM_MC_END();
5664 }
5665 else
5666 {
5667 /* memory operand */
5668 IEM_MC_BEGIN(3, 2);
5669 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5670 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5671 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5672 IEM_MC_LOCAL(uint16_t, u16Tmp);
5673 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5674
5675 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5676 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5677 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5678 IEM_MC_REF_EFLAGS(pEFlags);
5679 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5680 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5681
5682 IEM_MC_ADVANCE_RIP();
5683 IEM_MC_END();
5684 }
5685 return VINF_SUCCESS;
5686 }
5687
5688 case IEMMODE_32BIT:
5689 {
5690 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5691 IEMOP_HLP_NO_LOCK_PREFIX();
5692 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5693 {
5694 /* register operand */
5695 IEM_MC_BEGIN(3, 1);
5696 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5697 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5698 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5699 IEM_MC_LOCAL(uint32_t, u32Tmp);
5700
5701 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5702 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5703 IEM_MC_REF_EFLAGS(pEFlags);
5704 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5705 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5706
5707 IEM_MC_ADVANCE_RIP();
5708 IEM_MC_END();
5709 }
5710 else
5711 {
5712 /* memory operand */
5713 IEM_MC_BEGIN(3, 2);
5714 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5715 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5716 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5717 IEM_MC_LOCAL(uint32_t, u32Tmp);
5718 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5719
5720 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5721 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5722 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5723 IEM_MC_REF_EFLAGS(pEFlags);
5724 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5725 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5726
5727 IEM_MC_ADVANCE_RIP();
5728 IEM_MC_END();
5729 }
5730 return VINF_SUCCESS;
5731 }
5732
5733 case IEMMODE_64BIT:
5734 {
5735 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5736 IEMOP_HLP_NO_LOCK_PREFIX();
5737 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5738 {
5739 /* register operand */
5740 IEM_MC_BEGIN(3, 1);
5741 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5742 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5743 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5744 IEM_MC_LOCAL(uint64_t, u64Tmp);
5745
5746 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5747 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5748 IEM_MC_REF_EFLAGS(pEFlags);
5749 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5750 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5751
5752 IEM_MC_ADVANCE_RIP();
5753 IEM_MC_END();
5754 }
5755 else
5756 {
5757 /* memory operand */
5758 IEM_MC_BEGIN(3, 2);
5759 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5760 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5761 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5762 IEM_MC_LOCAL(uint64_t, u64Tmp);
5763 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5764
5765 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5766 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5767 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5768 IEM_MC_REF_EFLAGS(pEFlags);
5769 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5770 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5771
5772 IEM_MC_ADVANCE_RIP();
5773 IEM_MC_END();
5774 }
5775 return VINF_SUCCESS;
5776 }
5777 }
5778 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5779}
5780
5781
5782/** Opcode 0x6a. */
5783FNIEMOP_DEF(iemOp_push_Ib)
5784{
5785 IEMOP_MNEMONIC("push Ib");
5786 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5787 IEMOP_HLP_NO_LOCK_PREFIX();
5788 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5789
5790 IEM_MC_BEGIN(0,0);
5791 switch (pIemCpu->enmEffOpSize)
5792 {
5793 case IEMMODE_16BIT:
5794 IEM_MC_PUSH_U16(i8Imm);
5795 break;
5796 case IEMMODE_32BIT:
5797 IEM_MC_PUSH_U32(i8Imm);
5798 break;
5799 case IEMMODE_64BIT:
5800 IEM_MC_PUSH_U64(i8Imm);
5801 break;
5802 }
5803 IEM_MC_ADVANCE_RIP();
5804 IEM_MC_END();
5805 return VINF_SUCCESS;
5806}
5807
5808
5809/** Opcode 0x6b. */
5810FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
5811{
5812 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
5813 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5814 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
5815 IEMOP_HLP_NO_LOCK_PREFIX();
5816 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5817
5818 switch (pIemCpu->enmEffOpSize)
5819 {
5820 case IEMMODE_16BIT:
5821 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5822 {
5823 /* register operand */
5824 IEM_MC_BEGIN(3, 1);
5825 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5826 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5827 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5828 IEM_MC_LOCAL(uint16_t, u16Tmp);
5829
5830 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5831 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5832 IEM_MC_REF_EFLAGS(pEFlags);
5833 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5834 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5835
5836 IEM_MC_ADVANCE_RIP();
5837 IEM_MC_END();
5838 }
5839 else
5840 {
5841 /* memory operand */
5842 IEM_MC_BEGIN(3, 2);
5843 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5844 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5845 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5846 IEM_MC_LOCAL(uint16_t, u16Tmp);
5847 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5848
5849 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5850 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5851 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5852 IEM_MC_REF_EFLAGS(pEFlags);
5853 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5854 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5855
5856 IEM_MC_ADVANCE_RIP();
5857 IEM_MC_END();
5858 }
5859 return VINF_SUCCESS;
5860
5861 case IEMMODE_32BIT:
5862 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5863 {
5864 /* register operand */
5865 IEM_MC_BEGIN(3, 1);
5866 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5867 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5868 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5869 IEM_MC_LOCAL(uint32_t, u32Tmp);
5870
5871 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5872 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5873 IEM_MC_REF_EFLAGS(pEFlags);
5874 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5875 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5876
5877 IEM_MC_ADVANCE_RIP();
5878 IEM_MC_END();
5879 }
5880 else
5881 {
5882 /* memory operand */
5883 IEM_MC_BEGIN(3, 2);
5884 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5885 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5886 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5887 IEM_MC_LOCAL(uint32_t, u32Tmp);
5888 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5889
5890 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5891 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5892 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5893 IEM_MC_REF_EFLAGS(pEFlags);
5894 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5895 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5896
5897 IEM_MC_ADVANCE_RIP();
5898 IEM_MC_END();
5899 }
5900 return VINF_SUCCESS;
5901
5902 case IEMMODE_64BIT:
5903 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5904 {
5905 /* register operand */
5906 IEM_MC_BEGIN(3, 1);
5907 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5908 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5909 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5910 IEM_MC_LOCAL(uint64_t, u64Tmp);
5911
5912 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5913 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5914 IEM_MC_REF_EFLAGS(pEFlags);
5915 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5916 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5917
5918 IEM_MC_ADVANCE_RIP();
5919 IEM_MC_END();
5920 }
5921 else
5922 {
5923 /* memory operand */
5924 IEM_MC_BEGIN(3, 2);
5925 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5926 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5927 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5928 IEM_MC_LOCAL(uint64_t, u64Tmp);
5929 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5930
5931 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5932 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5933 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5934 IEM_MC_REF_EFLAGS(pEFlags);
5935 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5936 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5937
5938 IEM_MC_ADVANCE_RIP();
5939 IEM_MC_END();
5940 }
5941 return VINF_SUCCESS;
5942 }
5943 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5944}
5945
5946
5947/** Opcode 0x6c. */
5948FNIEMOP_DEF(iemOp_insb_Yb_DX)
5949{
5950 IEMOP_HLP_NO_LOCK_PREFIX();
5951 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
5952 {
5953 IEMOP_MNEMONIC("rep ins Yb,DX");
5954 switch (pIemCpu->enmEffAddrMode)
5955 {
5956 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
5957 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
5958 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
5959 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5960 }
5961 }
5962 else
5963 {
5964 IEMOP_MNEMONIC("ins Yb,DX");
5965 switch (pIemCpu->enmEffAddrMode)
5966 {
5967 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
5968 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
5969 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
5970 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5971 }
5972 }
5973}
5974
5975
5976/** Opcode 0x6d. */
5977FNIEMOP_DEF(iemOp_inswd_Yv_DX)
5978{
5979 IEMOP_HLP_NO_LOCK_PREFIX();
5980 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
5981 {
5982 IEMOP_MNEMONIC("rep ins Yv,DX");
5983 switch (pIemCpu->enmEffOpSize)
5984 {
5985 case IEMMODE_16BIT:
5986 switch (pIemCpu->enmEffAddrMode)
5987 {
5988 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
5989 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
5990 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
5991 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5992 }
5993 break;
5994 case IEMMODE_64BIT:
5995 case IEMMODE_32BIT:
5996 switch (pIemCpu->enmEffAddrMode)
5997 {
5998 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
5999 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
6000 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
6001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6002 }
6003 break;
6004 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6005 }
6006 }
6007 else
6008 {
6009 IEMOP_MNEMONIC("ins Yv,DX");
6010 switch (pIemCpu->enmEffOpSize)
6011 {
6012 case IEMMODE_16BIT:
6013 switch (pIemCpu->enmEffAddrMode)
6014 {
6015 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
6016 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
6017 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
6018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6019 }
6020 break;
6021 case IEMMODE_64BIT:
6022 case IEMMODE_32BIT:
6023 switch (pIemCpu->enmEffAddrMode)
6024 {
6025 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
6026 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
6027 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
6028 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6029 }
6030 break;
6031 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6032 }
6033 }
6034}
6035
6036
6037/** Opcode 0x6e. */
6038FNIEMOP_DEF(iemOp_outsb_Yb_DX)
6039{
6040 IEMOP_HLP_NO_LOCK_PREFIX();
6041 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6042 {
6043 IEMOP_MNEMONIC("rep out DX,Yb");
6044 switch (pIemCpu->enmEffAddrMode)
6045 {
6046 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
6047 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
6048 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
6049 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6050 }
6051 }
6052 else
6053 {
6054 IEMOP_MNEMONIC("out DX,Yb");
6055 switch (pIemCpu->enmEffAddrMode)
6056 {
6057 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
6058 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
6059 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
6060 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6061 }
6062 }
6063}
6064
6065
6066/** Opcode 0x6f. */
6067FNIEMOP_DEF(iemOp_outswd_Yv_DX)
6068{
6069 IEMOP_HLP_NO_LOCK_PREFIX();
6070 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6071 {
6072 IEMOP_MNEMONIC("rep outs DX,Yv");
6073 switch (pIemCpu->enmEffOpSize)
6074 {
6075 case IEMMODE_16BIT:
6076 switch (pIemCpu->enmEffAddrMode)
6077 {
6078 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
6079 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
6080 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
6081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6082 }
6083 break;
6084 case IEMMODE_64BIT:
6085 case IEMMODE_32BIT:
6086 switch (pIemCpu->enmEffAddrMode)
6087 {
6088 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
6089 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
6090 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
6091 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6092 }
6093 break;
6094 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6095 }
6096 }
6097 else
6098 {
6099 IEMOP_MNEMONIC("outs DX,Yv");
6100 switch (pIemCpu->enmEffOpSize)
6101 {
6102 case IEMMODE_16BIT:
6103 switch (pIemCpu->enmEffAddrMode)
6104 {
6105 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
6106 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
6107 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
6108 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6109 }
6110 break;
6111 case IEMMODE_64BIT:
6112 case IEMMODE_32BIT:
6113 switch (pIemCpu->enmEffAddrMode)
6114 {
6115 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
6116 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
6117 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
6118 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6119 }
6120 break;
6121 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6122 }
6123 }
6124}
6125
6126
6127/** Opcode 0x70. */
6128FNIEMOP_DEF(iemOp_jo_Jb)
6129{
6130 IEMOP_MNEMONIC("jo Jb");
6131 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6132 IEMOP_HLP_NO_LOCK_PREFIX();
6133 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6134
6135 IEM_MC_BEGIN(0, 0);
6136 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6137 IEM_MC_REL_JMP_S8(i8Imm);
6138 } IEM_MC_ELSE() {
6139 IEM_MC_ADVANCE_RIP();
6140 } IEM_MC_ENDIF();
6141 IEM_MC_END();
6142 return VINF_SUCCESS;
6143}
6144
6145
6146/** Opcode 0x71. */
6147FNIEMOP_DEF(iemOp_jno_Jb)
6148{
6149 IEMOP_MNEMONIC("jno Jb");
6150 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6151 IEMOP_HLP_NO_LOCK_PREFIX();
6152 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6153
6154 IEM_MC_BEGIN(0, 0);
6155 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6156 IEM_MC_ADVANCE_RIP();
6157 } IEM_MC_ELSE() {
6158 IEM_MC_REL_JMP_S8(i8Imm);
6159 } IEM_MC_ENDIF();
6160 IEM_MC_END();
6161 return VINF_SUCCESS;
6162}
6163
6164/** Opcode 0x72. */
6165FNIEMOP_DEF(iemOp_jc_Jb)
6166{
6167 IEMOP_MNEMONIC("jc/jnae Jb");
6168 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6169 IEMOP_HLP_NO_LOCK_PREFIX();
6170 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6171
6172 IEM_MC_BEGIN(0, 0);
6173 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6174 IEM_MC_REL_JMP_S8(i8Imm);
6175 } IEM_MC_ELSE() {
6176 IEM_MC_ADVANCE_RIP();
6177 } IEM_MC_ENDIF();
6178 IEM_MC_END();
6179 return VINF_SUCCESS;
6180}
6181
6182
6183/** Opcode 0x73. */
6184FNIEMOP_DEF(iemOp_jnc_Jb)
6185{
6186 IEMOP_MNEMONIC("jnc/jnb Jb");
6187 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6188 IEMOP_HLP_NO_LOCK_PREFIX();
6189 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6190
6191 IEM_MC_BEGIN(0, 0);
6192 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6193 IEM_MC_ADVANCE_RIP();
6194 } IEM_MC_ELSE() {
6195 IEM_MC_REL_JMP_S8(i8Imm);
6196 } IEM_MC_ENDIF();
6197 IEM_MC_END();
6198 return VINF_SUCCESS;
6199}
6200
6201
6202/** Opcode 0x74. */
6203FNIEMOP_DEF(iemOp_je_Jb)
6204{
6205 IEMOP_MNEMONIC("je/jz Jb");
6206 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6207 IEMOP_HLP_NO_LOCK_PREFIX();
6208 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6209
6210 IEM_MC_BEGIN(0, 0);
6211 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6212 IEM_MC_REL_JMP_S8(i8Imm);
6213 } IEM_MC_ELSE() {
6214 IEM_MC_ADVANCE_RIP();
6215 } IEM_MC_ENDIF();
6216 IEM_MC_END();
6217 return VINF_SUCCESS;
6218}
6219
6220
6221/** Opcode 0x75. */
6222FNIEMOP_DEF(iemOp_jne_Jb)
6223{
6224 IEMOP_MNEMONIC("jne/jnz Jb");
6225 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6226 IEMOP_HLP_NO_LOCK_PREFIX();
6227 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6228
6229 IEM_MC_BEGIN(0, 0);
6230 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6231 IEM_MC_ADVANCE_RIP();
6232 } IEM_MC_ELSE() {
6233 IEM_MC_REL_JMP_S8(i8Imm);
6234 } IEM_MC_ENDIF();
6235 IEM_MC_END();
6236 return VINF_SUCCESS;
6237}
6238
6239
6240/** Opcode 0x76. */
6241FNIEMOP_DEF(iemOp_jbe_Jb)
6242{
6243 IEMOP_MNEMONIC("jbe/jna Jb");
6244 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6245 IEMOP_HLP_NO_LOCK_PREFIX();
6246 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6247
6248 IEM_MC_BEGIN(0, 0);
6249 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6250 IEM_MC_REL_JMP_S8(i8Imm);
6251 } IEM_MC_ELSE() {
6252 IEM_MC_ADVANCE_RIP();
6253 } IEM_MC_ENDIF();
6254 IEM_MC_END();
6255 return VINF_SUCCESS;
6256}
6257
6258
6259/** Opcode 0x77. */
6260FNIEMOP_DEF(iemOp_jnbe_Jb)
6261{
6262 IEMOP_MNEMONIC("jnbe/ja Jb");
6263 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6264 IEMOP_HLP_NO_LOCK_PREFIX();
6265 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6266
6267 IEM_MC_BEGIN(0, 0);
6268 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6269 IEM_MC_ADVANCE_RIP();
6270 } IEM_MC_ELSE() {
6271 IEM_MC_REL_JMP_S8(i8Imm);
6272 } IEM_MC_ENDIF();
6273 IEM_MC_END();
6274 return VINF_SUCCESS;
6275}
6276
6277
6278/** Opcode 0x78. */
6279FNIEMOP_DEF(iemOp_js_Jb)
6280{
6281 IEMOP_MNEMONIC("js Jb");
6282 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6283 IEMOP_HLP_NO_LOCK_PREFIX();
6284 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6285
6286 IEM_MC_BEGIN(0, 0);
6287 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6288 IEM_MC_REL_JMP_S8(i8Imm);
6289 } IEM_MC_ELSE() {
6290 IEM_MC_ADVANCE_RIP();
6291 } IEM_MC_ENDIF();
6292 IEM_MC_END();
6293 return VINF_SUCCESS;
6294}
6295
6296
6297/** Opcode 0x79. */
6298FNIEMOP_DEF(iemOp_jns_Jb)
6299{
6300 IEMOP_MNEMONIC("jns Jb");
6301 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6302 IEMOP_HLP_NO_LOCK_PREFIX();
6303 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6304
6305 IEM_MC_BEGIN(0, 0);
6306 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6307 IEM_MC_ADVANCE_RIP();
6308 } IEM_MC_ELSE() {
6309 IEM_MC_REL_JMP_S8(i8Imm);
6310 } IEM_MC_ENDIF();
6311 IEM_MC_END();
6312 return VINF_SUCCESS;
6313}
6314
6315
6316/** Opcode 0x7a. */
6317FNIEMOP_DEF(iemOp_jp_Jb)
6318{
6319 IEMOP_MNEMONIC("jp Jb");
6320 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6321 IEMOP_HLP_NO_LOCK_PREFIX();
6322 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6323
6324 IEM_MC_BEGIN(0, 0);
6325 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6326 IEM_MC_REL_JMP_S8(i8Imm);
6327 } IEM_MC_ELSE() {
6328 IEM_MC_ADVANCE_RIP();
6329 } IEM_MC_ENDIF();
6330 IEM_MC_END();
6331 return VINF_SUCCESS;
6332}
6333
6334
6335/** Opcode 0x7b. */
6336FNIEMOP_DEF(iemOp_jnp_Jb)
6337{
6338 IEMOP_MNEMONIC("jnp Jb");
6339 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6340 IEMOP_HLP_NO_LOCK_PREFIX();
6341 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6342
6343 IEM_MC_BEGIN(0, 0);
6344 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6345 IEM_MC_ADVANCE_RIP();
6346 } IEM_MC_ELSE() {
6347 IEM_MC_REL_JMP_S8(i8Imm);
6348 } IEM_MC_ENDIF();
6349 IEM_MC_END();
6350 return VINF_SUCCESS;
6351}
6352
6353
6354/** Opcode 0x7c. */
6355FNIEMOP_DEF(iemOp_jl_Jb)
6356{
6357 IEMOP_MNEMONIC("jl/jnge Jb");
6358 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6359 IEMOP_HLP_NO_LOCK_PREFIX();
6360 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6361
6362 IEM_MC_BEGIN(0, 0);
6363 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6364 IEM_MC_REL_JMP_S8(i8Imm);
6365 } IEM_MC_ELSE() {
6366 IEM_MC_ADVANCE_RIP();
6367 } IEM_MC_ENDIF();
6368 IEM_MC_END();
6369 return VINF_SUCCESS;
6370}
6371
6372
6373/** Opcode 0x7d. */
6374FNIEMOP_DEF(iemOp_jnl_Jb)
6375{
6376 IEMOP_MNEMONIC("jnl/jge Jb");
6377 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6378 IEMOP_HLP_NO_LOCK_PREFIX();
6379 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6380
6381 IEM_MC_BEGIN(0, 0);
6382 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6383 IEM_MC_ADVANCE_RIP();
6384 } IEM_MC_ELSE() {
6385 IEM_MC_REL_JMP_S8(i8Imm);
6386 } IEM_MC_ENDIF();
6387 IEM_MC_END();
6388 return VINF_SUCCESS;
6389}
6390
6391
6392/** Opcode 0x7e. */
6393FNIEMOP_DEF(iemOp_jle_Jb)
6394{
6395 IEMOP_MNEMONIC("jle/jng Jb");
6396 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6397 IEMOP_HLP_NO_LOCK_PREFIX();
6398 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6399
6400 IEM_MC_BEGIN(0, 0);
6401 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6402 IEM_MC_REL_JMP_S8(i8Imm);
6403 } IEM_MC_ELSE() {
6404 IEM_MC_ADVANCE_RIP();
6405 } IEM_MC_ENDIF();
6406 IEM_MC_END();
6407 return VINF_SUCCESS;
6408}
6409
6410
6411/** Opcode 0x7f. */
6412FNIEMOP_DEF(iemOp_jnle_Jb)
6413{
6414 IEMOP_MNEMONIC("jnle/jg Jb");
6415 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6416 IEMOP_HLP_NO_LOCK_PREFIX();
6417 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6418
6419 IEM_MC_BEGIN(0, 0);
6420 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6421 IEM_MC_ADVANCE_RIP();
6422 } IEM_MC_ELSE() {
6423 IEM_MC_REL_JMP_S8(i8Imm);
6424 } IEM_MC_ENDIF();
6425 IEM_MC_END();
6426 return VINF_SUCCESS;
6427}
6428
6429
6430/** Opcode 0x80. */
6431FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
6432{
6433 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6434 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
6435 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6436
6437 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6438 {
6439 /* register target */
6440 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6441 IEMOP_HLP_NO_LOCK_PREFIX();
6442 IEM_MC_BEGIN(3, 0);
6443 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6444 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6445 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6446
6447 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6448 IEM_MC_REF_EFLAGS(pEFlags);
6449 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6450
6451 IEM_MC_ADVANCE_RIP();
6452 IEM_MC_END();
6453 }
6454 else
6455 {
6456 /* memory target */
6457 uint32_t fAccess;
6458 if (pImpl->pfnLockedU8)
6459 fAccess = IEM_ACCESS_DATA_RW;
6460 else
6461 { /* CMP */
6462 IEMOP_HLP_NO_LOCK_PREFIX();
6463 fAccess = IEM_ACCESS_DATA_R;
6464 }
6465 IEM_MC_BEGIN(3, 2);
6466 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6467 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6468 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6469
6470 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6471 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6472 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6473
6474 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6475 IEM_MC_FETCH_EFLAGS(EFlags);
6476 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6477 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6478 else
6479 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
6480
6481 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
6482 IEM_MC_COMMIT_EFLAGS(EFlags);
6483 IEM_MC_ADVANCE_RIP();
6484 IEM_MC_END();
6485 }
6486 return VINF_SUCCESS;
6487}
6488
6489
6490/** Opcode 0x81. */
6491FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
6492{
6493 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6494 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
6495 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6496
6497 switch (pIemCpu->enmEffOpSize)
6498 {
6499 case IEMMODE_16BIT:
6500 {
6501 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6502 {
6503 /* register target */
6504 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6505 IEMOP_HLP_NO_LOCK_PREFIX();
6506 IEM_MC_BEGIN(3, 0);
6507 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6508 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
6509 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6510
6511 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6512 IEM_MC_REF_EFLAGS(pEFlags);
6513 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6514
6515 IEM_MC_ADVANCE_RIP();
6516 IEM_MC_END();
6517 }
6518 else
6519 {
6520 /* memory target */
6521 uint32_t fAccess;
6522 if (pImpl->pfnLockedU16)
6523 fAccess = IEM_ACCESS_DATA_RW;
6524 else
6525 { /* CMP, TEST */
6526 IEMOP_HLP_NO_LOCK_PREFIX();
6527 fAccess = IEM_ACCESS_DATA_R;
6528 }
6529 IEM_MC_BEGIN(3, 2);
6530 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6531 IEM_MC_ARG(uint16_t, u16Src, 1);
6532 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6533 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6534
6535 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6536 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6537 IEM_MC_ASSIGN(u16Src, u16Imm);
6538 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6539 IEM_MC_FETCH_EFLAGS(EFlags);
6540 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6541 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6542 else
6543 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6544
6545 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6546 IEM_MC_COMMIT_EFLAGS(EFlags);
6547 IEM_MC_ADVANCE_RIP();
6548 IEM_MC_END();
6549 }
6550 break;
6551 }
6552
6553 case IEMMODE_32BIT:
6554 {
6555 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6556 {
6557 /* register target */
6558 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6559 IEMOP_HLP_NO_LOCK_PREFIX();
6560 IEM_MC_BEGIN(3, 0);
6561 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6562 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
6563 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6564
6565 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6566 IEM_MC_REF_EFLAGS(pEFlags);
6567 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6568
6569 IEM_MC_ADVANCE_RIP();
6570 IEM_MC_END();
6571 }
6572 else
6573 {
6574 /* memory target */
6575 uint32_t fAccess;
6576 if (pImpl->pfnLockedU32)
6577 fAccess = IEM_ACCESS_DATA_RW;
6578 else
6579 { /* CMP, TEST */
6580 IEMOP_HLP_NO_LOCK_PREFIX();
6581 fAccess = IEM_ACCESS_DATA_R;
6582 }
6583 IEM_MC_BEGIN(3, 2);
6584 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6585 IEM_MC_ARG(uint32_t, u32Src, 1);
6586 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6587 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6588
6589 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6590 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6591 IEM_MC_ASSIGN(u32Src, u32Imm);
6592 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6593 IEM_MC_FETCH_EFLAGS(EFlags);
6594 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6595 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6596 else
6597 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6598
6599 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6600 IEM_MC_COMMIT_EFLAGS(EFlags);
6601 IEM_MC_ADVANCE_RIP();
6602 IEM_MC_END();
6603 }
6604 break;
6605 }
6606
6607 case IEMMODE_64BIT:
6608 {
6609 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6610 {
6611 /* register target */
6612 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6613 IEMOP_HLP_NO_LOCK_PREFIX();
6614 IEM_MC_BEGIN(3, 0);
6615 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6616 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
6617 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6618
6619 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6620 IEM_MC_REF_EFLAGS(pEFlags);
6621 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6622
6623 IEM_MC_ADVANCE_RIP();
6624 IEM_MC_END();
6625 }
6626 else
6627 {
6628 /* memory target */
6629 uint32_t fAccess;
6630 if (pImpl->pfnLockedU64)
6631 fAccess = IEM_ACCESS_DATA_RW;
6632 else
6633 { /* CMP */
6634 IEMOP_HLP_NO_LOCK_PREFIX();
6635 fAccess = IEM_ACCESS_DATA_R;
6636 }
6637 IEM_MC_BEGIN(3, 2);
6638 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6639 IEM_MC_ARG(uint64_t, u64Src, 1);
6640 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6641 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6642
6643 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6644 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6645 IEM_MC_ASSIGN(u64Src, u64Imm);
6646 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6647 IEM_MC_FETCH_EFLAGS(EFlags);
6648 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6649 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6650 else
6651 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6652
6653 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6654 IEM_MC_COMMIT_EFLAGS(EFlags);
6655 IEM_MC_ADVANCE_RIP();
6656 IEM_MC_END();
6657 }
6658 break;
6659 }
6660 }
6661 return VINF_SUCCESS;
6662}
6663
6664
6665/** Opcode 0x82. */
6666 FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
6667{
6668 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
6669 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
6670}
6671
6672
6673/** Opcode 0x83. */
6674FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
6675{
6676 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6677 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
6678 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6679
6680 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6681 {
6682 /*
6683 * Register target
6684 */
6685 IEMOP_HLP_NO_LOCK_PREFIX();
6686 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6687 switch (pIemCpu->enmEffOpSize)
6688 {
6689 case IEMMODE_16BIT:
6690 {
6691 IEM_MC_BEGIN(3, 0);
6692 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6693 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
6694 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6695
6696 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6697 IEM_MC_REF_EFLAGS(pEFlags);
6698 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6699
6700 IEM_MC_ADVANCE_RIP();
6701 IEM_MC_END();
6702 break;
6703 }
6704
6705 case IEMMODE_32BIT:
6706 {
6707 IEM_MC_BEGIN(3, 0);
6708 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6709 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
6710 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6711
6712 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6713 IEM_MC_REF_EFLAGS(pEFlags);
6714 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6715
6716 IEM_MC_ADVANCE_RIP();
6717 IEM_MC_END();
6718 break;
6719 }
6720
6721 case IEMMODE_64BIT:
6722 {
6723 IEM_MC_BEGIN(3, 0);
6724 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6725 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
6726 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6727
6728 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6729 IEM_MC_REF_EFLAGS(pEFlags);
6730 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6731
6732 IEM_MC_ADVANCE_RIP();
6733 IEM_MC_END();
6734 break;
6735 }
6736 }
6737 }
6738 else
6739 {
6740 /*
6741 * Memory target.
6742 */
6743 uint32_t fAccess;
6744 if (pImpl->pfnLockedU16)
6745 fAccess = IEM_ACCESS_DATA_RW;
6746 else
6747 { /* CMP */
6748 IEMOP_HLP_NO_LOCK_PREFIX();
6749 fAccess = IEM_ACCESS_DATA_R;
6750 }
6751
6752 switch (pIemCpu->enmEffOpSize)
6753 {
6754 case IEMMODE_16BIT:
6755 {
6756 IEM_MC_BEGIN(3, 2);
6757 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6758 IEM_MC_ARG(uint16_t, u16Src, 1);
6759 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6760 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6761
6762 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6763 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6764 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
6765 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6766 IEM_MC_FETCH_EFLAGS(EFlags);
6767 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6768 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6769 else
6770 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6771
6772 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6773 IEM_MC_COMMIT_EFLAGS(EFlags);
6774 IEM_MC_ADVANCE_RIP();
6775 IEM_MC_END();
6776 break;
6777 }
6778
6779 case IEMMODE_32BIT:
6780 {
6781 IEM_MC_BEGIN(3, 2);
6782 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6783 IEM_MC_ARG(uint32_t, u32Src, 1);
6784 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6785 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6786
6787 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6788 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6789 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
6790 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6791 IEM_MC_FETCH_EFLAGS(EFlags);
6792 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6793 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6794 else
6795 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6796
6797 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6798 IEM_MC_COMMIT_EFLAGS(EFlags);
6799 IEM_MC_ADVANCE_RIP();
6800 IEM_MC_END();
6801 break;
6802 }
6803
6804 case IEMMODE_64BIT:
6805 {
6806 IEM_MC_BEGIN(3, 2);
6807 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6808 IEM_MC_ARG(uint64_t, u64Src, 1);
6809 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6810 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6811
6812 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6813 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6814 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
6815 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6816 IEM_MC_FETCH_EFLAGS(EFlags);
6817 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6818 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6819 else
6820 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6821
6822 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6823 IEM_MC_COMMIT_EFLAGS(EFlags);
6824 IEM_MC_ADVANCE_RIP();
6825 IEM_MC_END();
6826 break;
6827 }
6828 }
6829 }
6830 return VINF_SUCCESS;
6831}
6832
6833
6834/** Opcode 0x84. */
6835FNIEMOP_DEF(iemOp_test_Eb_Gb)
6836{
6837 IEMOP_MNEMONIC("test Eb,Gb");
6838 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6839 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6840 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
6841}
6842
6843
6844/** Opcode 0x85. */
6845FNIEMOP_DEF(iemOp_test_Ev_Gv)
6846{
6847 IEMOP_MNEMONIC("test Ev,Gv");
6848 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6849 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6850 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
6851}
6852
6853
6854/** Opcode 0x86. */
6855FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
6856{
6857 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6858 IEMOP_MNEMONIC("xchg Eb,Gb");
6859
6860 /*
6861 * If rm is denoting a register, no more instruction bytes.
6862 */
6863 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6864 {
6865 IEMOP_HLP_NO_LOCK_PREFIX();
6866
6867 IEM_MC_BEGIN(0, 2);
6868 IEM_MC_LOCAL(uint8_t, uTmp1);
6869 IEM_MC_LOCAL(uint8_t, uTmp2);
6870
6871 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6872 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6873 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6874 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6875
6876 IEM_MC_ADVANCE_RIP();
6877 IEM_MC_END();
6878 }
6879 else
6880 {
6881 /*
6882 * We're accessing memory.
6883 */
6884/** @todo the register must be committed separately! */
6885 IEM_MC_BEGIN(2, 2);
6886 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
6887 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6888 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6889
6890 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6891 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6892 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6893 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
6894 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
6895
6896 IEM_MC_ADVANCE_RIP();
6897 IEM_MC_END();
6898 }
6899 return VINF_SUCCESS;
6900}
6901
6902
6903/** Opcode 0x87. */
6904FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
6905{
6906 IEMOP_MNEMONIC("xchg Ev,Gv");
6907 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6908
6909 /*
6910 * If rm is denoting a register, no more instruction bytes.
6911 */
6912 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6913 {
6914 IEMOP_HLP_NO_LOCK_PREFIX();
6915
6916 switch (pIemCpu->enmEffOpSize)
6917 {
6918 case IEMMODE_16BIT:
6919 IEM_MC_BEGIN(0, 2);
6920 IEM_MC_LOCAL(uint16_t, uTmp1);
6921 IEM_MC_LOCAL(uint16_t, uTmp2);
6922
6923 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6924 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6925 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6926 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6927
6928 IEM_MC_ADVANCE_RIP();
6929 IEM_MC_END();
6930 return VINF_SUCCESS;
6931
6932 case IEMMODE_32BIT:
6933 IEM_MC_BEGIN(0, 2);
6934 IEM_MC_LOCAL(uint32_t, uTmp1);
6935 IEM_MC_LOCAL(uint32_t, uTmp2);
6936
6937 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6938 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6939 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6940 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6941
6942 IEM_MC_ADVANCE_RIP();
6943 IEM_MC_END();
6944 return VINF_SUCCESS;
6945
6946 case IEMMODE_64BIT:
6947 IEM_MC_BEGIN(0, 2);
6948 IEM_MC_LOCAL(uint64_t, uTmp1);
6949 IEM_MC_LOCAL(uint64_t, uTmp2);
6950
6951 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6952 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6953 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6954 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6955
6956 IEM_MC_ADVANCE_RIP();
6957 IEM_MC_END();
6958 return VINF_SUCCESS;
6959
6960 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6961 }
6962 }
6963 else
6964 {
6965 /*
6966 * We're accessing memory.
6967 */
6968 switch (pIemCpu->enmEffOpSize)
6969 {
6970/** @todo the register must be committed separately! */
6971 case IEMMODE_16BIT:
6972 IEM_MC_BEGIN(2, 2);
6973 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
6974 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6975 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6976
6977 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6978 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6979 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6980 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
6981 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
6982
6983 IEM_MC_ADVANCE_RIP();
6984 IEM_MC_END();
6985 return VINF_SUCCESS;
6986
6987 case IEMMODE_32BIT:
6988 IEM_MC_BEGIN(2, 2);
6989 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
6990 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6991 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6992
6993 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6994 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6995 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6996 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
6997 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
6998
6999 IEM_MC_ADVANCE_RIP();
7000 IEM_MC_END();
7001 return VINF_SUCCESS;
7002
7003 case IEMMODE_64BIT:
7004 IEM_MC_BEGIN(2, 2);
7005 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
7006 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
7007 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7008
7009 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7010 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7011 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7012 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
7013 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
7014
7015 IEM_MC_ADVANCE_RIP();
7016 IEM_MC_END();
7017 return VINF_SUCCESS;
7018
7019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7020 }
7021 }
7022}
7023
7024
7025/** Opcode 0x88. */
7026FNIEMOP_DEF(iemOp_mov_Eb_Gb)
7027{
7028 IEMOP_MNEMONIC("mov Eb,Gb");
7029
7030 uint8_t bRm;
7031 IEM_OPCODE_GET_NEXT_U8(&bRm);
7032 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7033
7034 /*
7035 * If rm is denoting a register, no more instruction bytes.
7036 */
7037 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7038 {
7039 IEM_MC_BEGIN(0, 1);
7040 IEM_MC_LOCAL(uint8_t, u8Value);
7041 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7042 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
7043 IEM_MC_ADVANCE_RIP();
7044 IEM_MC_END();
7045 }
7046 else
7047 {
7048 /*
7049 * We're writing a register to memory.
7050 */
7051 IEM_MC_BEGIN(0, 2);
7052 IEM_MC_LOCAL(uint8_t, u8Value);
7053 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7054 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7055 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7056 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
7057 IEM_MC_ADVANCE_RIP();
7058 IEM_MC_END();
7059 }
7060 return VINF_SUCCESS;
7061
7062}
7063
7064
7065/** Opcode 0x89. */
7066FNIEMOP_DEF(iemOp_mov_Ev_Gv)
7067{
7068 IEMOP_MNEMONIC("mov Ev,Gv");
7069
7070 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7071 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7072
7073 /*
7074 * If rm is denoting a register, no more instruction bytes.
7075 */
7076 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7077 {
7078 switch (pIemCpu->enmEffOpSize)
7079 {
7080 case IEMMODE_16BIT:
7081 IEM_MC_BEGIN(0, 1);
7082 IEM_MC_LOCAL(uint16_t, u16Value);
7083 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7084 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7085 IEM_MC_ADVANCE_RIP();
7086 IEM_MC_END();
7087 break;
7088
7089 case IEMMODE_32BIT:
7090 IEM_MC_BEGIN(0, 1);
7091 IEM_MC_LOCAL(uint32_t, u32Value);
7092 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7093 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7094 IEM_MC_ADVANCE_RIP();
7095 IEM_MC_END();
7096 break;
7097
7098 case IEMMODE_64BIT:
7099 IEM_MC_BEGIN(0, 1);
7100 IEM_MC_LOCAL(uint64_t, u64Value);
7101 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7102 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7103 IEM_MC_ADVANCE_RIP();
7104 IEM_MC_END();
7105 break;
7106 }
7107 }
7108 else
7109 {
7110 /*
7111 * We're writing a register to memory.
7112 */
7113 switch (pIemCpu->enmEffOpSize)
7114 {
7115 case IEMMODE_16BIT:
7116 IEM_MC_BEGIN(0, 2);
7117 IEM_MC_LOCAL(uint16_t, u16Value);
7118 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7119 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7120 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7121 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7122 IEM_MC_ADVANCE_RIP();
7123 IEM_MC_END();
7124 break;
7125
7126 case IEMMODE_32BIT:
7127 IEM_MC_BEGIN(0, 2);
7128 IEM_MC_LOCAL(uint32_t, u32Value);
7129 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7130 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7131 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7132 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
7133 IEM_MC_ADVANCE_RIP();
7134 IEM_MC_END();
7135 break;
7136
7137 case IEMMODE_64BIT:
7138 IEM_MC_BEGIN(0, 2);
7139 IEM_MC_LOCAL(uint64_t, u64Value);
7140 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7141 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7142 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7143 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
7144 IEM_MC_ADVANCE_RIP();
7145 IEM_MC_END();
7146 break;
7147 }
7148 }
7149 return VINF_SUCCESS;
7150}
7151
7152
7153/** Opcode 0x8a. */
7154FNIEMOP_DEF(iemOp_mov_Gb_Eb)
7155{
7156 IEMOP_MNEMONIC("mov Gb,Eb");
7157
7158 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7159 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7160
7161 /*
7162 * If rm is denoting a register, no more instruction bytes.
7163 */
7164 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7165 {
7166 IEM_MC_BEGIN(0, 1);
7167 IEM_MC_LOCAL(uint8_t, u8Value);
7168 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7169 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7170 IEM_MC_ADVANCE_RIP();
7171 IEM_MC_END();
7172 }
7173 else
7174 {
7175 /*
7176 * We're loading a register from memory.
7177 */
7178 IEM_MC_BEGIN(0, 2);
7179 IEM_MC_LOCAL(uint8_t, u8Value);
7180 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7181 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7182 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
7183 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7184 IEM_MC_ADVANCE_RIP();
7185 IEM_MC_END();
7186 }
7187 return VINF_SUCCESS;
7188}
7189
7190
7191/** Opcode 0x8b. */
7192FNIEMOP_DEF(iemOp_mov_Gv_Ev)
7193{
7194 IEMOP_MNEMONIC("mov Gv,Ev");
7195
7196 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7197 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7198
7199 /*
7200 * If rm is denoting a register, no more instruction bytes.
7201 */
7202 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7203 {
7204 switch (pIemCpu->enmEffOpSize)
7205 {
7206 case IEMMODE_16BIT:
7207 IEM_MC_BEGIN(0, 1);
7208 IEM_MC_LOCAL(uint16_t, u16Value);
7209 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7210 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7211 IEM_MC_ADVANCE_RIP();
7212 IEM_MC_END();
7213 break;
7214
7215 case IEMMODE_32BIT:
7216 IEM_MC_BEGIN(0, 1);
7217 IEM_MC_LOCAL(uint32_t, u32Value);
7218 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7219 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7220 IEM_MC_ADVANCE_RIP();
7221 IEM_MC_END();
7222 break;
7223
7224 case IEMMODE_64BIT:
7225 IEM_MC_BEGIN(0, 1);
7226 IEM_MC_LOCAL(uint64_t, u64Value);
7227 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7228 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7229 IEM_MC_ADVANCE_RIP();
7230 IEM_MC_END();
7231 break;
7232 }
7233 }
7234 else
7235 {
7236 /*
7237 * We're loading a register from memory.
7238 */
7239 switch (pIemCpu->enmEffOpSize)
7240 {
7241 case IEMMODE_16BIT:
7242 IEM_MC_BEGIN(0, 2);
7243 IEM_MC_LOCAL(uint16_t, u16Value);
7244 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7245 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7246 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7247 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7248 IEM_MC_ADVANCE_RIP();
7249 IEM_MC_END();
7250 break;
7251
7252 case IEMMODE_32BIT:
7253 IEM_MC_BEGIN(0, 2);
7254 IEM_MC_LOCAL(uint32_t, u32Value);
7255 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7256 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7257 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
7258 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7259 IEM_MC_ADVANCE_RIP();
7260 IEM_MC_END();
7261 break;
7262
7263 case IEMMODE_64BIT:
7264 IEM_MC_BEGIN(0, 2);
7265 IEM_MC_LOCAL(uint64_t, u64Value);
7266 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7267 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7268 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
7269 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7270 IEM_MC_ADVANCE_RIP();
7271 IEM_MC_END();
7272 break;
7273 }
7274 }
7275 return VINF_SUCCESS;
7276}
7277
7278
7279/** Opcode 0x8c. */
7280FNIEMOP_DEF(iemOp_mov_Ev_Sw)
7281{
7282 IEMOP_MNEMONIC("mov Ev,Sw");
7283
7284 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7285 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7286
7287 /*
7288 * Check that the destination register exists. The REX.R prefix is ignored.
7289 */
7290 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7291 if ( iSegReg > X86_SREG_GS)
7292 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7293
7294 /*
7295 * If rm is denoting a register, no more instruction bytes.
7296 * In that case, the operand size is respected and the upper bits are
7297 * cleared (starting with some pentium).
7298 */
7299 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7300 {
7301 switch (pIemCpu->enmEffOpSize)
7302 {
7303 case IEMMODE_16BIT:
7304 IEM_MC_BEGIN(0, 1);
7305 IEM_MC_LOCAL(uint16_t, u16Value);
7306 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7307 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7308 IEM_MC_ADVANCE_RIP();
7309 IEM_MC_END();
7310 break;
7311
7312 case IEMMODE_32BIT:
7313 IEM_MC_BEGIN(0, 1);
7314 IEM_MC_LOCAL(uint32_t, u32Value);
7315 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
7316 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7317 IEM_MC_ADVANCE_RIP();
7318 IEM_MC_END();
7319 break;
7320
7321 case IEMMODE_64BIT:
7322 IEM_MC_BEGIN(0, 1);
7323 IEM_MC_LOCAL(uint64_t, u64Value);
7324 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
7325 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7326 IEM_MC_ADVANCE_RIP();
7327 IEM_MC_END();
7328 break;
7329 }
7330 }
7331 else
7332 {
7333 /*
7334 * We're saving the register to memory. The access is word sized
7335 * regardless of operand size prefixes.
7336 */
7337#if 0 /* not necessary */
7338 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7339#endif
7340 IEM_MC_BEGIN(0, 2);
7341 IEM_MC_LOCAL(uint16_t, u16Value);
7342 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7343 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7344 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7345 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7346 IEM_MC_ADVANCE_RIP();
7347 IEM_MC_END();
7348 }
7349 return VINF_SUCCESS;
7350}
7351
7352
7353
7354
7355/** Opcode 0x8d. */
7356FNIEMOP_DEF(iemOp_lea_Gv_M)
7357{
7358 IEMOP_MNEMONIC("lea Gv,M");
7359 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7360 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7361 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7362 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
7363
7364 switch (pIemCpu->enmEffOpSize)
7365 {
7366 case IEMMODE_16BIT:
7367 IEM_MC_BEGIN(0, 2);
7368 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7369 IEM_MC_LOCAL(uint16_t, u16Cast);
7370 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7371 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
7372 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
7373 IEM_MC_ADVANCE_RIP();
7374 IEM_MC_END();
7375 return VINF_SUCCESS;
7376
7377 case IEMMODE_32BIT:
7378 IEM_MC_BEGIN(0, 2);
7379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7380 IEM_MC_LOCAL(uint32_t, u32Cast);
7381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7382 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
7383 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
7384 IEM_MC_ADVANCE_RIP();
7385 IEM_MC_END();
7386 return VINF_SUCCESS;
7387
7388 case IEMMODE_64BIT:
7389 IEM_MC_BEGIN(0, 1);
7390 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7391 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7392 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7393 IEM_MC_ADVANCE_RIP();
7394 IEM_MC_END();
7395 return VINF_SUCCESS;
7396 }
7397 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
7398}
7399
7400
7401/** Opcode 0x8e. */
7402FNIEMOP_DEF(iemOp_mov_Sw_Ev)
7403{
7404 IEMOP_MNEMONIC("mov Sw,Ev");
7405
7406 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7407 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7408
7409 /*
7410 * The practical operand size is 16-bit.
7411 */
7412#if 0 /* not necessary */
7413 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7414#endif
7415
7416 /*
7417 * Check that the destination register exists and can be used with this
7418 * instruction. The REX.R prefix is ignored.
7419 */
7420 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7421 if ( iSegReg == X86_SREG_CS
7422 || iSegReg > X86_SREG_GS)
7423 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7424
7425 /*
7426 * If rm is denoting a register, no more instruction bytes.
7427 */
7428 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7429 {
7430 IEM_MC_BEGIN(2, 0);
7431 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7432 IEM_MC_ARG(uint16_t, u16Value, 1);
7433 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7434 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7435 IEM_MC_END();
7436 }
7437 else
7438 {
7439 /*
7440 * We're loading the register from memory. The access is word sized
7441 * regardless of operand size prefixes.
7442 */
7443 IEM_MC_BEGIN(2, 1);
7444 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7445 IEM_MC_ARG(uint16_t, u16Value, 1);
7446 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7447 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7448 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7449 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7450 IEM_MC_END();
7451 }
7452 return VINF_SUCCESS;
7453}
7454
7455
7456/** Opcode 0x8f. */
7457FNIEMOP_DEF(iemOp_pop_Ev)
7458{
7459 /* This bugger is rather annoying as it requires rSP to be updated before
7460 doing the effective address calculations. Will eventually require a
7461 split between the R/M+SIB decoding and the effective address
7462 calculation - which is something that is required for any attempt at
7463 reusing this code for a recompiler. It may also be good to have if we
7464 need to delay #UD exception caused by invalid lock prefixes.
7465
7466 For now, we'll do a mostly safe interpreter-only implementation here. */
7467 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
7468 * now until tests show it's checked.. */
7469 IEMOP_MNEMONIC("pop Ev");
7470 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7471 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7472
7473 /* Register access is relatively easy and can share code. */
7474 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7475 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7476
7477 /*
7478 * Memory target.
7479 *
7480 * Intel says that RSP is incremented before it's used in any effective
7481 * address calcuations. This means some serious extra annoyance here since
7482 * we decode and caclulate the effective address in one step and like to
7483 * delay committing registers till everything is done.
7484 *
7485 * So, we'll decode and calculate the effective address twice. This will
7486 * require some recoding if turned into a recompiler.
7487 */
7488 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
7489
7490#ifndef TST_IEM_CHECK_MC
7491 /* Calc effective address with modified ESP. */
7492 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
7493 RTGCPTR GCPtrEff;
7494 VBOXSTRICTRC rcStrict;
7495 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7496 if (rcStrict != VINF_SUCCESS)
7497 return rcStrict;
7498 pIemCpu->offOpcode = offOpcodeSaved;
7499
7500 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7501 uint64_t const RspSaved = pCtx->rsp;
7502 switch (pIemCpu->enmEffOpSize)
7503 {
7504 case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
7505 case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
7506 case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
7507 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7508 }
7509 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7510 Assert(rcStrict == VINF_SUCCESS);
7511 pCtx->rsp = RspSaved;
7512
7513 /* Perform the operation - this should be CImpl. */
7514 RTUINT64U TmpRsp;
7515 TmpRsp.u = pCtx->rsp;
7516 switch (pIemCpu->enmEffOpSize)
7517 {
7518 case IEMMODE_16BIT:
7519 {
7520 uint16_t u16Value;
7521 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
7522 if (rcStrict == VINF_SUCCESS)
7523 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
7524 break;
7525 }
7526
7527 case IEMMODE_32BIT:
7528 {
7529 uint32_t u32Value;
7530 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
7531 if (rcStrict == VINF_SUCCESS)
7532 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
7533 break;
7534 }
7535
7536 case IEMMODE_64BIT:
7537 {
7538 uint64_t u64Value;
7539 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
7540 if (rcStrict == VINF_SUCCESS)
7541 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
7542 break;
7543 }
7544
7545 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7546 }
7547 if (rcStrict == VINF_SUCCESS)
7548 {
7549 pCtx->rsp = TmpRsp.u;
7550 iemRegUpdateRip(pIemCpu);
7551 }
7552 return rcStrict;
7553
7554#else
7555 return VERR_NOT_IMPLEMENTED;
7556#endif
7557}
7558
7559
7560/**
7561 * Common 'xchg reg,rAX' helper.
7562 */
7563FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
7564{
7565 IEMOP_HLP_NO_LOCK_PREFIX();
7566
7567 iReg |= pIemCpu->uRexB;
7568 switch (pIemCpu->enmEffOpSize)
7569 {
7570 case IEMMODE_16BIT:
7571 IEM_MC_BEGIN(0, 2);
7572 IEM_MC_LOCAL(uint16_t, u16Tmp1);
7573 IEM_MC_LOCAL(uint16_t, u16Tmp2);
7574 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
7575 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
7576 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
7577 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
7578 IEM_MC_ADVANCE_RIP();
7579 IEM_MC_END();
7580 return VINF_SUCCESS;
7581
7582 case IEMMODE_32BIT:
7583 IEM_MC_BEGIN(0, 2);
7584 IEM_MC_LOCAL(uint32_t, u32Tmp1);
7585 IEM_MC_LOCAL(uint32_t, u32Tmp2);
7586 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
7587 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
7588 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
7589 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
7590 IEM_MC_ADVANCE_RIP();
7591 IEM_MC_END();
7592 return VINF_SUCCESS;
7593
7594 case IEMMODE_64BIT:
7595 IEM_MC_BEGIN(0, 2);
7596 IEM_MC_LOCAL(uint64_t, u64Tmp1);
7597 IEM_MC_LOCAL(uint64_t, u64Tmp2);
7598 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
7599 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
7600 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
7601 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
7602 IEM_MC_ADVANCE_RIP();
7603 IEM_MC_END();
7604 return VINF_SUCCESS;
7605
7606 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7607 }
7608}
7609
7610
7611/** Opcode 0x90. */
7612FNIEMOP_DEF(iemOp_nop)
7613{
7614 /* R8/R8D and RAX/EAX can be exchanged. */
7615 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
7616 {
7617 IEMOP_MNEMONIC("xchg r8,rAX");
7618 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
7619 }
7620
7621 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
7622 IEMOP_MNEMONIC("pause");
7623 else
7624 IEMOP_MNEMONIC("nop");
7625 IEM_MC_BEGIN(0, 0);
7626 IEM_MC_ADVANCE_RIP();
7627 IEM_MC_END();
7628 return VINF_SUCCESS;
7629}
7630
7631
7632/** Opcode 0x91. */
7633FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
7634{
7635 IEMOP_MNEMONIC("xchg rCX,rAX");
7636 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
7637}
7638
7639
7640/** Opcode 0x92. */
7641FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
7642{
7643 IEMOP_MNEMONIC("xchg rDX,rAX");
7644 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
7645}
7646
7647
7648/** Opcode 0x93. */
7649FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
7650{
7651 IEMOP_MNEMONIC("xchg rBX,rAX");
7652 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
7653}
7654
7655
7656/** Opcode 0x94. */
7657FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
7658{
7659 IEMOP_MNEMONIC("xchg rSX,rAX");
7660 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
7661}
7662
7663
7664/** Opcode 0x95. */
7665FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
7666{
7667 IEMOP_MNEMONIC("xchg rBP,rAX");
7668 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
7669}
7670
7671
7672/** Opcode 0x96. */
7673FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
7674{
7675 IEMOP_MNEMONIC("xchg rSI,rAX");
7676 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
7677}
7678
7679
7680/** Opcode 0x97. */
7681FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
7682{
7683 IEMOP_MNEMONIC("xchg rDI,rAX");
7684 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
7685}
7686
7687
7688/** Opcode 0x98. */
7689FNIEMOP_STUB(iemOp_cbw);
7690
7691
7692/** Opcode 0x99. */
7693FNIEMOP_DEF(iemOp_cwd)
7694{
7695 IEMOP_HLP_NO_LOCK_PREFIX();
7696 switch (pIemCpu->enmEffOpSize)
7697 {
7698 case IEMMODE_16BIT:
7699 IEMOP_MNEMONIC("cwd");
7700 IEM_MC_BEGIN(0, 1);
7701 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7702 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
7703 } IEM_MC_ELSE() {
7704 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
7705 } IEM_MC_ENDIF();
7706 IEM_MC_ADVANCE_RIP();
7707 IEM_MC_END();
7708 return VINF_SUCCESS;
7709
7710 case IEMMODE_32BIT:
7711 IEMOP_MNEMONIC("cwq");
7712 IEM_MC_BEGIN(0, 1);
7713 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7714 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
7715 } IEM_MC_ELSE() {
7716 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
7717 } IEM_MC_ENDIF();
7718 IEM_MC_ADVANCE_RIP();
7719 IEM_MC_END();
7720 return VINF_SUCCESS;
7721
7722 case IEMMODE_64BIT:
7723 IEMOP_MNEMONIC("cqo");
7724 IEM_MC_BEGIN(0, 1);
7725 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
7726 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
7727 } IEM_MC_ELSE() {
7728 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
7729 } IEM_MC_ENDIF();
7730 IEM_MC_ADVANCE_RIP();
7731 IEM_MC_END();
7732 return VINF_SUCCESS;
7733
7734 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7735 }
7736}
7737
7738
7739/** Opcode 0x9a. */
7740FNIEMOP_STUB(iemOp_call_Ap);
7741
7742
7743/** Opcode 0x9b. (aka fwait) */
7744FNIEMOP_DEF(iemOp_wait)
7745{
7746 IEMOP_MNEMONIC("wait");
7747 IEMOP_HLP_NO_LOCK_PREFIX();
7748
7749 IEM_MC_BEGIN(0, 0);
7750 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
7751 IEM_MC_MAYBE_RAISE_FPU_XCPT();
7752 IEM_MC_ADVANCE_RIP();
7753 IEM_MC_END();
7754 return VINF_SUCCESS;
7755}
7756
7757
7758/** Opcode 0x9c. */
7759FNIEMOP_DEF(iemOp_pushf_Fv)
7760{
7761 IEMOP_HLP_NO_LOCK_PREFIX();
7762 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7763 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
7764}
7765
7766
7767/** Opcode 0x9d. */
7768FNIEMOP_DEF(iemOp_popf_Fv)
7769{
7770 IEMOP_HLP_NO_LOCK_PREFIX();
7771 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7772 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
7773}
7774
7775
7776/** Opcode 0x9e. */
7777FNIEMOP_STUB(iemOp_sahf);
7778/** Opcode 0x9f. */
7779FNIEMOP_STUB(iemOp_lahf);
7780
7781/**
7782 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
7783 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
7784 * prefixes. Will return on failures.
7785 * @param a_GCPtrMemOff The variable to store the offset in.
7786 */
7787#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
7788 do \
7789 { \
7790 switch (pIemCpu->enmEffAddrMode) \
7791 { \
7792 case IEMMODE_16BIT: \
7793 { \
7794 uint16_t u16Off; IEM_OPCODE_GET_NEXT_U16(&u16Off); \
7795 (a_GCPtrMemOff) = u16Off; \
7796 break; \
7797 } \
7798 case IEMMODE_32BIT: \
7799 { \
7800 uint32_t u32Off; IEM_OPCODE_GET_NEXT_U32(&u32Off); \
7801 (a_GCPtrMemOff) = u32Off; \
7802 break; \
7803 } \
7804 case IEMMODE_64BIT: \
7805 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
7806 break; \
7807 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
7808 } \
7809 IEMOP_HLP_NO_LOCK_PREFIX(); \
7810 } while (0)
7811
7812/** Opcode 0xa0. */
7813FNIEMOP_DEF(iemOp_mov_Al_Ob)
7814{
7815 /*
7816 * Get the offset and fend of lock prefixes.
7817 */
7818 RTGCPTR GCPtrMemOff;
7819 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7820
7821 /*
7822 * Fetch AL.
7823 */
7824 IEM_MC_BEGIN(0,1);
7825 IEM_MC_LOCAL(uint8_t, u8Tmp);
7826 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7827 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
7828 IEM_MC_ADVANCE_RIP();
7829 IEM_MC_END();
7830 return VINF_SUCCESS;
7831}
7832
7833
7834/** Opcode 0xa1. */
7835FNIEMOP_DEF(iemOp_mov_rAX_Ov)
7836{
7837 /*
7838 * Get the offset and fend of lock prefixes.
7839 */
7840 IEMOP_MNEMONIC("mov rAX,Ov");
7841 RTGCPTR GCPtrMemOff;
7842 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7843
7844 /*
7845 * Fetch rAX.
7846 */
7847 switch (pIemCpu->enmEffOpSize)
7848 {
7849 case IEMMODE_16BIT:
7850 IEM_MC_BEGIN(0,1);
7851 IEM_MC_LOCAL(uint16_t, u16Tmp);
7852 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7853 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
7854 IEM_MC_ADVANCE_RIP();
7855 IEM_MC_END();
7856 return VINF_SUCCESS;
7857
7858 case IEMMODE_32BIT:
7859 IEM_MC_BEGIN(0,1);
7860 IEM_MC_LOCAL(uint32_t, u32Tmp);
7861 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7862 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
7863 IEM_MC_ADVANCE_RIP();
7864 IEM_MC_END();
7865 return VINF_SUCCESS;
7866
7867 case IEMMODE_64BIT:
7868 IEM_MC_BEGIN(0,1);
7869 IEM_MC_LOCAL(uint64_t, u64Tmp);
7870 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7871 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
7872 IEM_MC_ADVANCE_RIP();
7873 IEM_MC_END();
7874 return VINF_SUCCESS;
7875
7876 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7877 }
7878}
7879
7880
7881/** Opcode 0xa2. */
7882FNIEMOP_DEF(iemOp_mov_Ob_AL)
7883{
7884 /*
7885 * Get the offset and fend of lock prefixes.
7886 */
7887 RTGCPTR GCPtrMemOff;
7888 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7889
7890 /*
7891 * Store AL.
7892 */
7893 IEM_MC_BEGIN(0,1);
7894 IEM_MC_LOCAL(uint8_t, u8Tmp);
7895 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
7896 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
7897 IEM_MC_ADVANCE_RIP();
7898 IEM_MC_END();
7899 return VINF_SUCCESS;
7900}
7901
7902
7903/** Opcode 0xa3. */
7904FNIEMOP_DEF(iemOp_mov_Ov_rAX)
7905{
7906 /*
7907 * Get the offset and fend of lock prefixes.
7908 */
7909 RTGCPTR GCPtrMemOff;
7910 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7911
7912 /*
7913 * Store rAX.
7914 */
7915 switch (pIemCpu->enmEffOpSize)
7916 {
7917 case IEMMODE_16BIT:
7918 IEM_MC_BEGIN(0,1);
7919 IEM_MC_LOCAL(uint16_t, u16Tmp);
7920 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
7921 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
7922 IEM_MC_ADVANCE_RIP();
7923 IEM_MC_END();
7924 return VINF_SUCCESS;
7925
7926 case IEMMODE_32BIT:
7927 IEM_MC_BEGIN(0,1);
7928 IEM_MC_LOCAL(uint32_t, u32Tmp);
7929 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
7930 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
7931 IEM_MC_ADVANCE_RIP();
7932 IEM_MC_END();
7933 return VINF_SUCCESS;
7934
7935 case IEMMODE_64BIT:
7936 IEM_MC_BEGIN(0,1);
7937 IEM_MC_LOCAL(uint64_t, u64Tmp);
7938 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
7939 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
7940 IEM_MC_ADVANCE_RIP();
7941 IEM_MC_END();
7942 return VINF_SUCCESS;
7943
7944 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7945 }
7946}
7947
7948/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
7949#define IEM_MOVS_CASE(ValBits, AddrBits) \
7950 IEM_MC_BEGIN(0, 2); \
7951 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
7952 IEM_MC_LOCAL(RTGCPTR, uAddr); \
7953 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
7954 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
7955 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
7956 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
7957 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
7958 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7959 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7960 } IEM_MC_ELSE() { \
7961 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
7962 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
7963 } IEM_MC_ENDIF(); \
7964 IEM_MC_ADVANCE_RIP(); \
7965 IEM_MC_END();
7966
7967/** Opcode 0xa4. */
7968FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
7969{
7970 IEMOP_HLP_NO_LOCK_PREFIX();
7971
7972 /*
7973 * Use the C implementation if a repeat prefix is encountered.
7974 */
7975 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
7976 {
7977 IEMOP_MNEMONIC("rep movsb Xb,Yb");
7978 switch (pIemCpu->enmEffAddrMode)
7979 {
7980 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
7981 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
7982 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
7983 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7984 }
7985 }
7986 IEMOP_MNEMONIC("movsb Xb,Yb");
7987
7988 /*
7989 * Sharing case implementation with movs[wdq] below.
7990 */
7991 switch (pIemCpu->enmEffAddrMode)
7992 {
7993 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
7994 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
7995 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
7996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7997 }
7998 return VINF_SUCCESS;
7999}
8000
8001
8002/** Opcode 0xa5. */
8003FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
8004{
8005 IEMOP_HLP_NO_LOCK_PREFIX();
8006
8007 /*
8008 * Use the C implementation if a repeat prefix is encountered.
8009 */
8010 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8011 {
8012 IEMOP_MNEMONIC("rep movs Xv,Yv");
8013 switch (pIemCpu->enmEffOpSize)
8014 {
8015 case IEMMODE_16BIT:
8016 switch (pIemCpu->enmEffAddrMode)
8017 {
8018 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
8019 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
8020 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
8021 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8022 }
8023 break;
8024 case IEMMODE_32BIT:
8025 switch (pIemCpu->enmEffAddrMode)
8026 {
8027 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
8028 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
8029 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
8030 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8031 }
8032 case IEMMODE_64BIT:
8033 switch (pIemCpu->enmEffAddrMode)
8034 {
8035 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8036 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
8037 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
8038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8039 }
8040 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8041 }
8042 }
8043 IEMOP_MNEMONIC("movs Xv,Yv");
8044
8045 /*
8046 * Annoying double switch here.
8047 * Using ugly macro for implementing the cases, sharing it with movsb.
8048 */
8049 switch (pIemCpu->enmEffOpSize)
8050 {
8051 case IEMMODE_16BIT:
8052 switch (pIemCpu->enmEffAddrMode)
8053 {
8054 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
8055 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
8056 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
8057 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8058 }
8059 break;
8060
8061 case IEMMODE_32BIT:
8062 switch (pIemCpu->enmEffAddrMode)
8063 {
8064 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
8065 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
8066 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
8067 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8068 }
8069 break;
8070
8071 case IEMMODE_64BIT:
8072 switch (pIemCpu->enmEffAddrMode)
8073 {
8074 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8075 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
8076 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
8077 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8078 }
8079 break;
8080 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8081 }
8082 return VINF_SUCCESS;
8083}
8084
8085#undef IEM_MOVS_CASE
8086
8087/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
8088#define IEM_CMPS_CASE(ValBits, AddrBits) \
8089 IEM_MC_BEGIN(3, 3); \
8090 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
8091 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
8092 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8093 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
8094 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8095 \
8096 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8097 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
8098 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8099 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
8100 IEM_MC_REF_LOCAL(puValue1, uValue1); \
8101 IEM_MC_REF_EFLAGS(pEFlags); \
8102 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
8103 \
8104 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8105 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8106 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8107 } IEM_MC_ELSE() { \
8108 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8109 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8110 } IEM_MC_ENDIF(); \
8111 IEM_MC_ADVANCE_RIP(); \
8112 IEM_MC_END(); \
8113
8114/** Opcode 0xa6. */
8115FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
8116{
8117 IEMOP_HLP_NO_LOCK_PREFIX();
8118
8119 /*
8120 * Use the C implementation if a repeat prefix is encountered.
8121 */
8122 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8123 {
8124 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8125 switch (pIemCpu->enmEffAddrMode)
8126 {
8127 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
8128 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
8129 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
8130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8131 }
8132 }
8133 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8134 {
8135 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8136 switch (pIemCpu->enmEffAddrMode)
8137 {
8138 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
8139 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
8140 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
8141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8142 }
8143 }
8144 IEMOP_MNEMONIC("cmps Xb,Yb");
8145
8146 /*
8147 * Sharing case implementation with cmps[wdq] below.
8148 */
8149 switch (pIemCpu->enmEffAddrMode)
8150 {
8151 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
8152 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
8153 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
8154 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8155 }
8156 return VINF_SUCCESS;
8157
8158}
8159
8160
8161/** Opcode 0xa7. */
8162FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
8163{
8164 IEMOP_HLP_NO_LOCK_PREFIX();
8165
8166 /*
8167 * Use the C implementation if a repeat prefix is encountered.
8168 */
8169 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8170 {
8171 IEMOP_MNEMONIC("repe cmps Xv,Yv");
8172 switch (pIemCpu->enmEffOpSize)
8173 {
8174 case IEMMODE_16BIT:
8175 switch (pIemCpu->enmEffAddrMode)
8176 {
8177 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
8178 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
8179 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
8180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8181 }
8182 break;
8183 case IEMMODE_32BIT:
8184 switch (pIemCpu->enmEffAddrMode)
8185 {
8186 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
8187 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
8188 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
8189 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8190 }
8191 case IEMMODE_64BIT:
8192 switch (pIemCpu->enmEffAddrMode)
8193 {
8194 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8195 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
8196 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
8197 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8198 }
8199 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8200 }
8201 }
8202
8203 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8204 {
8205 IEMOP_MNEMONIC("repne cmps Xv,Yv");
8206 switch (pIemCpu->enmEffOpSize)
8207 {
8208 case IEMMODE_16BIT:
8209 switch (pIemCpu->enmEffAddrMode)
8210 {
8211 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
8212 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
8213 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
8214 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8215 }
8216 break;
8217 case IEMMODE_32BIT:
8218 switch (pIemCpu->enmEffAddrMode)
8219 {
8220 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
8221 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
8222 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
8223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8224 }
8225 case IEMMODE_64BIT:
8226 switch (pIemCpu->enmEffAddrMode)
8227 {
8228 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8229 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
8230 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
8231 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8232 }
8233 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8234 }
8235 }
8236
8237 IEMOP_MNEMONIC("cmps Xv,Yv");
8238
8239 /*
8240 * Annoying double switch here.
8241 * Using ugly macro for implementing the cases, sharing it with cmpsb.
8242 */
8243 switch (pIemCpu->enmEffOpSize)
8244 {
8245 case IEMMODE_16BIT:
8246 switch (pIemCpu->enmEffAddrMode)
8247 {
8248 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
8249 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
8250 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
8251 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8252 }
8253 break;
8254
8255 case IEMMODE_32BIT:
8256 switch (pIemCpu->enmEffAddrMode)
8257 {
8258 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
8259 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
8260 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
8261 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8262 }
8263 break;
8264
8265 case IEMMODE_64BIT:
8266 switch (pIemCpu->enmEffAddrMode)
8267 {
8268 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8269 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
8270 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
8271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8272 }
8273 break;
8274 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8275 }
8276 return VINF_SUCCESS;
8277
8278}
8279
8280#undef IEM_CMPS_CASE
8281
8282/** Opcode 0xa8. */
8283FNIEMOP_DEF(iemOp_test_AL_Ib)
8284{
8285 IEMOP_MNEMONIC("test al,Ib");
8286 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8287 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
8288}
8289
8290
8291/** Opcode 0xa9. */
8292FNIEMOP_DEF(iemOp_test_eAX_Iz)
8293{
8294 IEMOP_MNEMONIC("test rAX,Iz");
8295 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8296 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
8297}
8298
8299
8300/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
8301#define IEM_STOS_CASE(ValBits, AddrBits) \
8302 IEM_MC_BEGIN(0, 2); \
8303 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8304 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8305 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
8306 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8307 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8308 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8309 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8310 } IEM_MC_ELSE() { \
8311 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8312 } IEM_MC_ENDIF(); \
8313 IEM_MC_ADVANCE_RIP(); \
8314 IEM_MC_END(); \
8315
8316/** Opcode 0xaa. */
8317FNIEMOP_DEF(iemOp_stosb_Yb_AL)
8318{
8319 IEMOP_HLP_NO_LOCK_PREFIX();
8320
8321 /*
8322 * Use the C implementation if a repeat prefix is encountered.
8323 */
8324 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8325 {
8326 IEMOP_MNEMONIC("rep stos Yb,al");
8327 switch (pIemCpu->enmEffAddrMode)
8328 {
8329 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
8330 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
8331 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
8332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8333 }
8334 }
8335 IEMOP_MNEMONIC("stos Yb,al");
8336
8337 /*
8338 * Sharing case implementation with stos[wdq] below.
8339 */
8340 switch (pIemCpu->enmEffAddrMode)
8341 {
8342 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
8343 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
8344 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
8345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8346 }
8347 return VINF_SUCCESS;
8348}
8349
8350
8351/** Opcode 0xab. */
8352FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
8353{
8354 IEMOP_HLP_NO_LOCK_PREFIX();
8355
8356 /*
8357 * Use the C implementation if a repeat prefix is encountered.
8358 */
8359 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8360 {
8361 IEMOP_MNEMONIC("rep stos Yv,rAX");
8362 switch (pIemCpu->enmEffOpSize)
8363 {
8364 case IEMMODE_16BIT:
8365 switch (pIemCpu->enmEffAddrMode)
8366 {
8367 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
8368 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
8369 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
8370 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8371 }
8372 break;
8373 case IEMMODE_32BIT:
8374 switch (pIemCpu->enmEffAddrMode)
8375 {
8376 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
8377 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
8378 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
8379 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8380 }
8381 case IEMMODE_64BIT:
8382 switch (pIemCpu->enmEffAddrMode)
8383 {
8384 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8385 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
8386 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
8387 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8388 }
8389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8390 }
8391 }
8392 IEMOP_MNEMONIC("stos Yv,rAX");
8393
8394 /*
8395 * Annoying double switch here.
8396 * Using ugly macro for implementing the cases, sharing it with stosb.
8397 */
8398 switch (pIemCpu->enmEffOpSize)
8399 {
8400 case IEMMODE_16BIT:
8401 switch (pIemCpu->enmEffAddrMode)
8402 {
8403 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
8404 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
8405 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
8406 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8407 }
8408 break;
8409
8410 case IEMMODE_32BIT:
8411 switch (pIemCpu->enmEffAddrMode)
8412 {
8413 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
8414 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
8415 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
8416 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8417 }
8418 break;
8419
8420 case IEMMODE_64BIT:
8421 switch (pIemCpu->enmEffAddrMode)
8422 {
8423 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8424 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
8425 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
8426 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8427 }
8428 break;
8429 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8430 }
8431 return VINF_SUCCESS;
8432}
8433
8434#undef IEM_STOS_CASE
8435
8436/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
8437#define IEM_LODS_CASE(ValBits, AddrBits) \
8438 IEM_MC_BEGIN(0, 2); \
8439 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8440 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8441 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8442 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8443 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
8444 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8445 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8446 } IEM_MC_ELSE() { \
8447 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8448 } IEM_MC_ENDIF(); \
8449 IEM_MC_ADVANCE_RIP(); \
8450 IEM_MC_END();
8451
8452/** Opcode 0xac. */
8453FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
8454{
8455 IEMOP_HLP_NO_LOCK_PREFIX();
8456
8457 /*
8458 * Use the C implementation if a repeat prefix is encountered.
8459 */
8460 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8461 {
8462 IEMOP_MNEMONIC("rep lodsb al,Xb");
8463 switch (pIemCpu->enmEffAddrMode)
8464 {
8465 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
8466 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
8467 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
8468 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8469 }
8470 }
8471 IEMOP_MNEMONIC("lodsb al,Xb");
8472
8473 /*
8474 * Sharing case implementation with stos[wdq] below.
8475 */
8476 switch (pIemCpu->enmEffAddrMode)
8477 {
8478 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
8479 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
8480 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
8481 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8482 }
8483 return VINF_SUCCESS;
8484}
8485
8486
8487/** Opcode 0xad. */
8488FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
8489{
8490 IEMOP_HLP_NO_LOCK_PREFIX();
8491
8492 /*
8493 * Use the C implementation if a repeat prefix is encountered.
8494 */
8495 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8496 {
8497 IEMOP_MNEMONIC("rep lods rAX,Xv");
8498 switch (pIemCpu->enmEffOpSize)
8499 {
8500 case IEMMODE_16BIT:
8501 switch (pIemCpu->enmEffAddrMode)
8502 {
8503 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
8504 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
8505 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
8506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8507 }
8508 break;
8509 case IEMMODE_32BIT:
8510 switch (pIemCpu->enmEffAddrMode)
8511 {
8512 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
8513 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
8514 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
8515 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8516 }
8517 case IEMMODE_64BIT:
8518 switch (pIemCpu->enmEffAddrMode)
8519 {
8520 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8521 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
8522 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
8523 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8524 }
8525 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8526 }
8527 }
8528 IEMOP_MNEMONIC("lods rAX,Xv");
8529
8530 /*
8531 * Annoying double switch here.
8532 * Using ugly macro for implementing the cases, sharing it with lodsb.
8533 */
8534 switch (pIemCpu->enmEffOpSize)
8535 {
8536 case IEMMODE_16BIT:
8537 switch (pIemCpu->enmEffAddrMode)
8538 {
8539 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
8540 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
8541 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
8542 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8543 }
8544 break;
8545
8546 case IEMMODE_32BIT:
8547 switch (pIemCpu->enmEffAddrMode)
8548 {
8549 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
8550 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
8551 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
8552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8553 }
8554 break;
8555
8556 case IEMMODE_64BIT:
8557 switch (pIemCpu->enmEffAddrMode)
8558 {
8559 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8560 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
8561 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
8562 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8563 }
8564 break;
8565 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8566 }
8567 return VINF_SUCCESS;
8568}
8569
8570#undef IEM_LODS_CASE
8571
8572/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
8573#define IEM_SCAS_CASE(ValBits, AddrBits) \
8574 IEM_MC_BEGIN(3, 2); \
8575 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
8576 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
8577 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8578 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8579 \
8580 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8581 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
8582 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
8583 IEM_MC_REF_EFLAGS(pEFlags); \
8584 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
8585 \
8586 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8587 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8588 } IEM_MC_ELSE() { \
8589 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8590 } IEM_MC_ENDIF(); \
8591 IEM_MC_ADVANCE_RIP(); \
8592 IEM_MC_END();
8593
8594/** Opcode 0xae. */
8595FNIEMOP_DEF(iemOp_scasb_AL_Xb)
8596{
8597 IEMOP_HLP_NO_LOCK_PREFIX();
8598
8599 /*
8600 * Use the C implementation if a repeat prefix is encountered.
8601 */
8602 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8603 {
8604 IEMOP_MNEMONIC("repe scasb al,Xb");
8605 switch (pIemCpu->enmEffAddrMode)
8606 {
8607 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
8608 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
8609 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
8610 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8611 }
8612 }
8613 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8614 {
8615 IEMOP_MNEMONIC("repne scasb al,Xb");
8616 switch (pIemCpu->enmEffAddrMode)
8617 {
8618 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
8619 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
8620 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
8621 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8622 }
8623 }
8624 IEMOP_MNEMONIC("scasb al,Xb");
8625
8626 /*
8627 * Sharing case implementation with stos[wdq] below.
8628 */
8629 switch (pIemCpu->enmEffAddrMode)
8630 {
8631 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
8632 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
8633 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
8634 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8635 }
8636 return VINF_SUCCESS;
8637}
8638
8639
8640/** Opcode 0xaf. */
8641FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
8642{
8643 IEMOP_HLP_NO_LOCK_PREFIX();
8644
8645 /*
8646 * Use the C implementation if a repeat prefix is encountered.
8647 */
8648 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8649 {
8650 IEMOP_MNEMONIC("repe scas rAX,Xv");
8651 switch (pIemCpu->enmEffOpSize)
8652 {
8653 case IEMMODE_16BIT:
8654 switch (pIemCpu->enmEffAddrMode)
8655 {
8656 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8657 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8658 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8659 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8660 }
8661 break;
8662 case IEMMODE_32BIT:
8663 switch (pIemCpu->enmEffAddrMode)
8664 {
8665 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8666 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8667 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8668 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8669 }
8670 case IEMMODE_64BIT:
8671 switch (pIemCpu->enmEffAddrMode)
8672 {
8673 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
8674 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8675 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8676 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8677 }
8678 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8679 }
8680 }
8681 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8682 {
8683 IEMOP_MNEMONIC("repne scas rAX,Xv");
8684 switch (pIemCpu->enmEffOpSize)
8685 {
8686 case IEMMODE_16BIT:
8687 switch (pIemCpu->enmEffAddrMode)
8688 {
8689 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8690 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8691 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8692 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8693 }
8694 break;
8695 case IEMMODE_32BIT:
8696 switch (pIemCpu->enmEffAddrMode)
8697 {
8698 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8699 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8700 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8701 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8702 }
8703 case IEMMODE_64BIT:
8704 switch (pIemCpu->enmEffAddrMode)
8705 {
8706 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8707 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8708 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8710 }
8711 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8712 }
8713 }
8714 IEMOP_MNEMONIC("scas rAX,Xv");
8715
8716 /*
8717 * Annoying double switch here.
8718 * Using ugly macro for implementing the cases, sharing it with scasb.
8719 */
8720 switch (pIemCpu->enmEffOpSize)
8721 {
8722 case IEMMODE_16BIT:
8723 switch (pIemCpu->enmEffAddrMode)
8724 {
8725 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
8726 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
8727 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
8728 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8729 }
8730 break;
8731
8732 case IEMMODE_32BIT:
8733 switch (pIemCpu->enmEffAddrMode)
8734 {
8735 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
8736 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
8737 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
8738 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8739 }
8740 break;
8741
8742 case IEMMODE_64BIT:
8743 switch (pIemCpu->enmEffAddrMode)
8744 {
8745 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8746 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
8747 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
8748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8749 }
8750 break;
8751 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8752 }
8753 return VINF_SUCCESS;
8754}
8755
8756#undef IEM_SCAS_CASE
8757
8758/**
8759 * Common 'mov r8, imm8' helper.
8760 */
8761FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
8762{
8763 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8764 IEMOP_HLP_NO_LOCK_PREFIX();
8765
8766 IEM_MC_BEGIN(0, 1);
8767 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
8768 IEM_MC_STORE_GREG_U8(iReg, u8Value);
8769 IEM_MC_ADVANCE_RIP();
8770 IEM_MC_END();
8771
8772 return VINF_SUCCESS;
8773}
8774
8775
8776/** Opcode 0xb0. */
8777FNIEMOP_DEF(iemOp_mov_AL_Ib)
8778{
8779 IEMOP_MNEMONIC("mov AL,Ib");
8780 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
8781}
8782
8783
8784/** Opcode 0xb1. */
8785FNIEMOP_DEF(iemOp_CL_Ib)
8786{
8787 IEMOP_MNEMONIC("mov CL,Ib");
8788 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
8789}
8790
8791
8792/** Opcode 0xb2. */
8793FNIEMOP_DEF(iemOp_DL_Ib)
8794{
8795 IEMOP_MNEMONIC("mov DL,Ib");
8796 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
8797}
8798
8799
8800/** Opcode 0xb3. */
8801FNIEMOP_DEF(iemOp_BL_Ib)
8802{
8803 IEMOP_MNEMONIC("mov BL,Ib");
8804 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
8805}
8806
8807
8808/** Opcode 0xb4. */
8809FNIEMOP_DEF(iemOp_mov_AH_Ib)
8810{
8811 IEMOP_MNEMONIC("mov AH,Ib");
8812 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
8813}
8814
8815
8816/** Opcode 0xb5. */
8817FNIEMOP_DEF(iemOp_CH_Ib)
8818{
8819 IEMOP_MNEMONIC("mov CH,Ib");
8820 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
8821}
8822
8823
8824/** Opcode 0xb6. */
8825FNIEMOP_DEF(iemOp_DH_Ib)
8826{
8827 IEMOP_MNEMONIC("mov DH,Ib");
8828 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
8829}
8830
8831
8832/** Opcode 0xb7. */
8833FNIEMOP_DEF(iemOp_BH_Ib)
8834{
8835 IEMOP_MNEMONIC("mov BH,Ib");
8836 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
8837}
8838
8839
8840/**
8841 * Common 'mov regX,immX' helper.
8842 */
8843FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
8844{
8845 switch (pIemCpu->enmEffOpSize)
8846 {
8847 case IEMMODE_16BIT:
8848 {
8849 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8850 IEMOP_HLP_NO_LOCK_PREFIX();
8851
8852 IEM_MC_BEGIN(0, 1);
8853 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
8854 IEM_MC_STORE_GREG_U16(iReg, u16Value);
8855 IEM_MC_ADVANCE_RIP();
8856 IEM_MC_END();
8857 break;
8858 }
8859
8860 case IEMMODE_32BIT:
8861 {
8862 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8863 IEMOP_HLP_NO_LOCK_PREFIX();
8864
8865 IEM_MC_BEGIN(0, 1);
8866 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
8867 IEM_MC_STORE_GREG_U32(iReg, u32Value);
8868 IEM_MC_ADVANCE_RIP();
8869 IEM_MC_END();
8870 break;
8871 }
8872 case IEMMODE_64BIT:
8873 {
8874 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
8875 IEMOP_HLP_NO_LOCK_PREFIX();
8876
8877 IEM_MC_BEGIN(0, 1);
8878 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
8879 IEM_MC_STORE_GREG_U64(iReg, u64Value);
8880 IEM_MC_ADVANCE_RIP();
8881 IEM_MC_END();
8882 break;
8883 }
8884 }
8885
8886 return VINF_SUCCESS;
8887}
8888
8889
8890/** Opcode 0xb8. */
8891FNIEMOP_DEF(iemOp_eAX_Iv)
8892{
8893 IEMOP_MNEMONIC("mov rAX,IV");
8894 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
8895}
8896
8897
8898/** Opcode 0xb9. */
8899FNIEMOP_DEF(iemOp_eCX_Iv)
8900{
8901 IEMOP_MNEMONIC("mov rCX,IV");
8902 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
8903}
8904
8905
8906/** Opcode 0xba. */
8907FNIEMOP_DEF(iemOp_eDX_Iv)
8908{
8909 IEMOP_MNEMONIC("mov rDX,IV");
8910 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
8911}
8912
8913
8914/** Opcode 0xbb. */
8915FNIEMOP_DEF(iemOp_eBX_Iv)
8916{
8917 IEMOP_MNEMONIC("mov rBX,IV");
8918 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
8919}
8920
8921
8922/** Opcode 0xbc. */
8923FNIEMOP_DEF(iemOp_eSP_Iv)
8924{
8925 IEMOP_MNEMONIC("mov rSP,IV");
8926 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
8927}
8928
8929
8930/** Opcode 0xbd. */
8931FNIEMOP_DEF(iemOp_eBP_Iv)
8932{
8933 IEMOP_MNEMONIC("mov rBP,IV");
8934 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
8935}
8936
8937
8938/** Opcode 0xbe. */
8939FNIEMOP_DEF(iemOp_eSI_Iv)
8940{
8941 IEMOP_MNEMONIC("mov rSI,IV");
8942 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
8943}
8944
8945
8946/** Opcode 0xbf. */
8947FNIEMOP_DEF(iemOp_eDI_Iv)
8948{
8949 IEMOP_MNEMONIC("mov rDI,IV");
8950 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
8951}
8952
8953
8954/** Opcode 0xc0. */
8955FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
8956{
8957 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8958 PCIEMOPSHIFTSIZES pImpl;
8959 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
8960 {
8961 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
8962 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
8963 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
8964 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
8965 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
8966 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
8967 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
8968 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
8969 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
8970 }
8971 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
8972
8973 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8974 {
8975 /* register */
8976 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
8977 IEMOP_HLP_NO_LOCK_PREFIX();
8978 IEM_MC_BEGIN(3, 0);
8979 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
8980 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
8981 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8982 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8983 IEM_MC_REF_EFLAGS(pEFlags);
8984 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
8985 IEM_MC_ADVANCE_RIP();
8986 IEM_MC_END();
8987 }
8988 else
8989 {
8990 /* memory */
8991 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
8992 IEM_MC_BEGIN(3, 2);
8993 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
8994 IEM_MC_ARG(uint8_t, cShiftArg, 1);
8995 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8996 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8997
8998 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
8999 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9000 IEM_MC_ASSIGN(cShiftArg, cShift);
9001 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9002 IEM_MC_FETCH_EFLAGS(EFlags);
9003 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9004
9005 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9006 IEM_MC_COMMIT_EFLAGS(EFlags);
9007 IEM_MC_ADVANCE_RIP();
9008 IEM_MC_END();
9009 }
9010 return VINF_SUCCESS;
9011}
9012
9013
9014/** Opcode 0xc1. */
9015FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
9016{
9017 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9018 PCIEMOPSHIFTSIZES pImpl;
9019 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9020 {
9021 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
9022 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
9023 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
9024 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
9025 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
9026 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
9027 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
9028 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9029 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9030 }
9031 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9032
9033 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9034 {
9035 /* register */
9036 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9037 IEMOP_HLP_NO_LOCK_PREFIX();
9038 switch (pIemCpu->enmEffOpSize)
9039 {
9040 case IEMMODE_16BIT:
9041 IEM_MC_BEGIN(3, 0);
9042 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9043 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9044 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9045 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9046 IEM_MC_REF_EFLAGS(pEFlags);
9047 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9048 IEM_MC_ADVANCE_RIP();
9049 IEM_MC_END();
9050 return VINF_SUCCESS;
9051
9052 case IEMMODE_32BIT:
9053 IEM_MC_BEGIN(3, 0);
9054 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9055 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9056 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9057 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9058 IEM_MC_REF_EFLAGS(pEFlags);
9059 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9060 IEM_MC_ADVANCE_RIP();
9061 IEM_MC_END();
9062 return VINF_SUCCESS;
9063
9064 case IEMMODE_64BIT:
9065 IEM_MC_BEGIN(3, 0);
9066 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9067 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9068 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9069 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9070 IEM_MC_REF_EFLAGS(pEFlags);
9071 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9072 IEM_MC_ADVANCE_RIP();
9073 IEM_MC_END();
9074 return VINF_SUCCESS;
9075
9076 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9077 }
9078 }
9079 else
9080 {
9081 /* memory */
9082 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9083 switch (pIemCpu->enmEffOpSize)
9084 {
9085 case IEMMODE_16BIT:
9086 IEM_MC_BEGIN(3, 2);
9087 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9088 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9089 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9090 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9091
9092 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9093 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9094 IEM_MC_ASSIGN(cShiftArg, cShift);
9095 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9096 IEM_MC_FETCH_EFLAGS(EFlags);
9097 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9098
9099 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9100 IEM_MC_COMMIT_EFLAGS(EFlags);
9101 IEM_MC_ADVANCE_RIP();
9102 IEM_MC_END();
9103 return VINF_SUCCESS;
9104
9105 case IEMMODE_32BIT:
9106 IEM_MC_BEGIN(3, 2);
9107 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9108 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9109 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9110 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9111
9112 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9113 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9114 IEM_MC_ASSIGN(cShiftArg, cShift);
9115 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9116 IEM_MC_FETCH_EFLAGS(EFlags);
9117 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9118
9119 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9120 IEM_MC_COMMIT_EFLAGS(EFlags);
9121 IEM_MC_ADVANCE_RIP();
9122 IEM_MC_END();
9123 return VINF_SUCCESS;
9124
9125 case IEMMODE_64BIT:
9126 IEM_MC_BEGIN(3, 2);
9127 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9128 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9129 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9130 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9131
9132 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9133 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9134 IEM_MC_ASSIGN(cShiftArg, cShift);
9135 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9136 IEM_MC_FETCH_EFLAGS(EFlags);
9137 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9138
9139 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9140 IEM_MC_COMMIT_EFLAGS(EFlags);
9141 IEM_MC_ADVANCE_RIP();
9142 IEM_MC_END();
9143 return VINF_SUCCESS;
9144
9145 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9146 }
9147 }
9148}
9149
9150
9151/** Opcode 0xc2. */
9152FNIEMOP_DEF(iemOp_retn_Iw)
9153{
9154 IEMOP_MNEMONIC("retn Iw");
9155 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9156 IEMOP_HLP_NO_LOCK_PREFIX();
9157 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9158 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
9159}
9160
9161
9162/** Opcode 0xc3. */
9163FNIEMOP_DEF(iemOp_retn)
9164{
9165 IEMOP_MNEMONIC("retn");
9166 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9167 IEMOP_HLP_NO_LOCK_PREFIX();
9168 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
9169}
9170
9171
9172/** Opcode 0xc4. */
9173FNIEMOP_DEF(iemOp_les_Gv_Mp)
9174{
9175 IEMOP_MNEMONIC("les Gv,Mp");
9176 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
9177}
9178
9179
9180/** Opcode 0xc5. */
9181FNIEMOP_DEF(iemOp_lds_Gv_Mp)
9182{
9183 IEMOP_MNEMONIC("lds Gv,Mp");
9184 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
9185}
9186
9187
9188/** Opcode 0xc6. */
9189FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
9190{
9191 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9192 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9193 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9194 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9195 IEMOP_MNEMONIC("mov Eb,Ib");
9196
9197 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9198 {
9199 /* register access */
9200 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9201 IEM_MC_BEGIN(0, 0);
9202 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
9203 IEM_MC_ADVANCE_RIP();
9204 IEM_MC_END();
9205 }
9206 else
9207 {
9208 /* memory access. */
9209 IEM_MC_BEGIN(0, 1);
9210 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9211 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9212 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9213 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
9214 IEM_MC_ADVANCE_RIP();
9215 IEM_MC_END();
9216 }
9217 return VINF_SUCCESS;
9218}
9219
9220
9221/** Opcode 0xc7. */
9222FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
9223{
9224 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9225 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9226 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9227 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9228 IEMOP_MNEMONIC("mov Ev,Iz");
9229
9230 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9231 {
9232 /* register access */
9233 switch (pIemCpu->enmEffOpSize)
9234 {
9235 case IEMMODE_16BIT:
9236 IEM_MC_BEGIN(0, 0);
9237 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9238 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
9239 IEM_MC_ADVANCE_RIP();
9240 IEM_MC_END();
9241 return VINF_SUCCESS;
9242
9243 case IEMMODE_32BIT:
9244 IEM_MC_BEGIN(0, 0);
9245 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9246 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
9247 IEM_MC_ADVANCE_RIP();
9248 IEM_MC_END();
9249 return VINF_SUCCESS;
9250
9251 case IEMMODE_64BIT:
9252 IEM_MC_BEGIN(0, 0);
9253 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9254 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
9255 IEM_MC_ADVANCE_RIP();
9256 IEM_MC_END();
9257 return VINF_SUCCESS;
9258
9259 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9260 }
9261 }
9262 else
9263 {
9264 /* memory access. */
9265 switch (pIemCpu->enmEffOpSize)
9266 {
9267 case IEMMODE_16BIT:
9268 IEM_MC_BEGIN(0, 1);
9269 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9270 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9271 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9272 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
9273 IEM_MC_ADVANCE_RIP();
9274 IEM_MC_END();
9275 return VINF_SUCCESS;
9276
9277 case IEMMODE_32BIT:
9278 IEM_MC_BEGIN(0, 1);
9279 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9280 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9281 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9282 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
9283 IEM_MC_ADVANCE_RIP();
9284 IEM_MC_END();
9285 return VINF_SUCCESS;
9286
9287 case IEMMODE_64BIT:
9288 IEM_MC_BEGIN(0, 1);
9289 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9290 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9291 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9292 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
9293 IEM_MC_ADVANCE_RIP();
9294 IEM_MC_END();
9295 return VINF_SUCCESS;
9296
9297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9298 }
9299 }
9300}
9301
9302
9303
9304
9305/** Opcode 0xc8. */
9306FNIEMOP_STUB(iemOp_enter_Iw_Ib);
9307
9308
9309/** Opcode 0xc9. */
9310FNIEMOP_DEF(iemOp_leave)
9311{
9312 IEMOP_MNEMONIC("retn");
9313 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9314 IEMOP_HLP_NO_LOCK_PREFIX();
9315 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
9316}
9317
9318
9319/** Opcode 0xca. */
9320FNIEMOP_DEF(iemOp_retf_Iw)
9321{
9322 IEMOP_MNEMONIC("retf Iw");
9323 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9324 IEMOP_HLP_NO_LOCK_PREFIX();
9325 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9326 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
9327}
9328
9329
9330/** Opcode 0xcb. */
9331FNIEMOP_DEF(iemOp_retf)
9332{
9333 IEMOP_MNEMONIC("retf");
9334 IEMOP_HLP_NO_LOCK_PREFIX();
9335 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9336 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
9337}
9338
9339
9340/** Opcode 0xcc. */
9341FNIEMOP_DEF(iemOp_int_3)
9342{
9343 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
9344}
9345
9346
9347/** Opcode 0xcd. */
9348FNIEMOP_DEF(iemOp_int_Ib)
9349{
9350 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
9351 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
9352}
9353
9354
9355/** Opcode 0xce. */
9356FNIEMOP_DEF(iemOp_into)
9357{
9358 IEM_MC_BEGIN(2, 0);
9359 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
9360 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
9361 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
9362 IEM_MC_END();
9363 return VINF_SUCCESS;
9364}
9365
9366
9367/** Opcode 0xcf. */
9368FNIEMOP_DEF(iemOp_iret)
9369{
9370 IEMOP_MNEMONIC("iret");
9371 IEMOP_HLP_NO_LOCK_PREFIX();
9372 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
9373}
9374
9375
9376/** Opcode 0xd0. */
9377FNIEMOP_DEF(iemOp_Grp2_Eb_1)
9378{
9379 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9380 PCIEMOPSHIFTSIZES pImpl;
9381 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9382 {
9383 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
9384 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
9385 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
9386 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
9387 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
9388 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
9389 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
9390 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9391 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9392 }
9393 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9394
9395 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9396 {
9397 /* register */
9398 IEMOP_HLP_NO_LOCK_PREFIX();
9399 IEM_MC_BEGIN(3, 0);
9400 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9401 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9402 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9403 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9404 IEM_MC_REF_EFLAGS(pEFlags);
9405 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9406 IEM_MC_ADVANCE_RIP();
9407 IEM_MC_END();
9408 }
9409 else
9410 {
9411 /* memory */
9412 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9413 IEM_MC_BEGIN(3, 2);
9414 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9415 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9416 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9417 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9418
9419 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9420 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9421 IEM_MC_FETCH_EFLAGS(EFlags);
9422 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9423
9424 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9425 IEM_MC_COMMIT_EFLAGS(EFlags);
9426 IEM_MC_ADVANCE_RIP();
9427 IEM_MC_END();
9428 }
9429 return VINF_SUCCESS;
9430}
9431
9432
9433
9434/** Opcode 0xd1. */
9435FNIEMOP_DEF(iemOp_Grp2_Ev_1)
9436{
9437 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9438 PCIEMOPSHIFTSIZES pImpl;
9439 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9440 {
9441 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
9442 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
9443 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
9444 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
9445 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
9446 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
9447 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
9448 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9449 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9450 }
9451 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9452
9453 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9454 {
9455 /* register */
9456 IEMOP_HLP_NO_LOCK_PREFIX();
9457 switch (pIemCpu->enmEffOpSize)
9458 {
9459 case IEMMODE_16BIT:
9460 IEM_MC_BEGIN(3, 0);
9461 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9462 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9463 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9464 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9465 IEM_MC_REF_EFLAGS(pEFlags);
9466 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9467 IEM_MC_ADVANCE_RIP();
9468 IEM_MC_END();
9469 return VINF_SUCCESS;
9470
9471 case IEMMODE_32BIT:
9472 IEM_MC_BEGIN(3, 0);
9473 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9474 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9475 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9476 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9477 IEM_MC_REF_EFLAGS(pEFlags);
9478 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9479 IEM_MC_ADVANCE_RIP();
9480 IEM_MC_END();
9481 return VINF_SUCCESS;
9482
9483 case IEMMODE_64BIT:
9484 IEM_MC_BEGIN(3, 0);
9485 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9486 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9487 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9488 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9489 IEM_MC_REF_EFLAGS(pEFlags);
9490 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9491 IEM_MC_ADVANCE_RIP();
9492 IEM_MC_END();
9493 return VINF_SUCCESS;
9494
9495 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9496 }
9497 }
9498 else
9499 {
9500 /* memory */
9501 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9502 switch (pIemCpu->enmEffOpSize)
9503 {
9504 case IEMMODE_16BIT:
9505 IEM_MC_BEGIN(3, 2);
9506 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9507 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9508 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9509 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9510
9511 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9512 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9513 IEM_MC_FETCH_EFLAGS(EFlags);
9514 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9515
9516 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9517 IEM_MC_COMMIT_EFLAGS(EFlags);
9518 IEM_MC_ADVANCE_RIP();
9519 IEM_MC_END();
9520 return VINF_SUCCESS;
9521
9522 case IEMMODE_32BIT:
9523 IEM_MC_BEGIN(3, 2);
9524 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9525 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9526 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9527 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9528
9529 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9530 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9531 IEM_MC_FETCH_EFLAGS(EFlags);
9532 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9533
9534 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9535 IEM_MC_COMMIT_EFLAGS(EFlags);
9536 IEM_MC_ADVANCE_RIP();
9537 IEM_MC_END();
9538 return VINF_SUCCESS;
9539
9540 case IEMMODE_64BIT:
9541 IEM_MC_BEGIN(3, 2);
9542 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9543 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9544 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9545 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9546
9547 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9548 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9549 IEM_MC_FETCH_EFLAGS(EFlags);
9550 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9551
9552 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9553 IEM_MC_COMMIT_EFLAGS(EFlags);
9554 IEM_MC_ADVANCE_RIP();
9555 IEM_MC_END();
9556 return VINF_SUCCESS;
9557
9558 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9559 }
9560 }
9561}
9562
9563
9564/** Opcode 0xd2. */
9565FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
9566{
9567 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9568 PCIEMOPSHIFTSIZES pImpl;
9569 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9570 {
9571 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
9572 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
9573 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
9574 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
9575 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
9576 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
9577 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
9578 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9579 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
9580 }
9581 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9582
9583 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9584 {
9585 /* register */
9586 IEMOP_HLP_NO_LOCK_PREFIX();
9587 IEM_MC_BEGIN(3, 0);
9588 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9589 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9590 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9591 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9592 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9593 IEM_MC_REF_EFLAGS(pEFlags);
9594 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9595 IEM_MC_ADVANCE_RIP();
9596 IEM_MC_END();
9597 }
9598 else
9599 {
9600 /* memory */
9601 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9602 IEM_MC_BEGIN(3, 2);
9603 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9604 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9605 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9606 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9607
9608 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9609 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9610 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9611 IEM_MC_FETCH_EFLAGS(EFlags);
9612 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9613
9614 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9615 IEM_MC_COMMIT_EFLAGS(EFlags);
9616 IEM_MC_ADVANCE_RIP();
9617 IEM_MC_END();
9618 }
9619 return VINF_SUCCESS;
9620}
9621
9622
9623/** Opcode 0xd3. */
9624FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
9625{
9626 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9627 PCIEMOPSHIFTSIZES pImpl;
9628 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9629 {
9630 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
9631 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
9632 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
9633 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
9634 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
9635 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
9636 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
9637 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9638 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9639 }
9640 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9641
9642 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9643 {
9644 /* register */
9645 IEMOP_HLP_NO_LOCK_PREFIX();
9646 switch (pIemCpu->enmEffOpSize)
9647 {
9648 case IEMMODE_16BIT:
9649 IEM_MC_BEGIN(3, 0);
9650 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9651 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9652 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9653 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9654 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9655 IEM_MC_REF_EFLAGS(pEFlags);
9656 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9657 IEM_MC_ADVANCE_RIP();
9658 IEM_MC_END();
9659 return VINF_SUCCESS;
9660
9661 case IEMMODE_32BIT:
9662 IEM_MC_BEGIN(3, 0);
9663 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9664 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9665 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9666 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9667 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9668 IEM_MC_REF_EFLAGS(pEFlags);
9669 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9670 IEM_MC_ADVANCE_RIP();
9671 IEM_MC_END();
9672 return VINF_SUCCESS;
9673
9674 case IEMMODE_64BIT:
9675 IEM_MC_BEGIN(3, 0);
9676 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9677 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9678 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9679 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9680 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9681 IEM_MC_REF_EFLAGS(pEFlags);
9682 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9683 IEM_MC_ADVANCE_RIP();
9684 IEM_MC_END();
9685 return VINF_SUCCESS;
9686
9687 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9688 }
9689 }
9690 else
9691 {
9692 /* memory */
9693 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9694 switch (pIemCpu->enmEffOpSize)
9695 {
9696 case IEMMODE_16BIT:
9697 IEM_MC_BEGIN(3, 2);
9698 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9699 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9700 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9701 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9702
9703 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9704 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9705 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9706 IEM_MC_FETCH_EFLAGS(EFlags);
9707 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9708
9709 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9710 IEM_MC_COMMIT_EFLAGS(EFlags);
9711 IEM_MC_ADVANCE_RIP();
9712 IEM_MC_END();
9713 return VINF_SUCCESS;
9714
9715 case IEMMODE_32BIT:
9716 IEM_MC_BEGIN(3, 2);
9717 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9718 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9719 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9720 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9721
9722 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9723 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9724 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9725 IEM_MC_FETCH_EFLAGS(EFlags);
9726 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9727
9728 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9729 IEM_MC_COMMIT_EFLAGS(EFlags);
9730 IEM_MC_ADVANCE_RIP();
9731 IEM_MC_END();
9732 return VINF_SUCCESS;
9733
9734 case IEMMODE_64BIT:
9735 IEM_MC_BEGIN(3, 2);
9736 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9737 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9738 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9739 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9740
9741 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9742 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9743 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9744 IEM_MC_FETCH_EFLAGS(EFlags);
9745 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9746
9747 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9748 IEM_MC_COMMIT_EFLAGS(EFlags);
9749 IEM_MC_ADVANCE_RIP();
9750 IEM_MC_END();
9751 return VINF_SUCCESS;
9752
9753 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9754 }
9755 }
9756}
9757
9758/** Opcode 0xd4. */
9759FNIEMOP_STUB(iemOp_aam_Ib);
9760/** Opcode 0xd5. */
9761FNIEMOP_STUB(iemOp_aad_Ib);
9762
9763
9764/** Opcode 0xd7. */
9765FNIEMOP_DEF(iemOp_xlat)
9766{
9767 IEMOP_MNEMONIC("xlat");
9768 IEMOP_HLP_NO_LOCK_PREFIX();
9769 switch (pIemCpu->enmEffAddrMode)
9770 {
9771 case IEMMODE_16BIT:
9772 IEM_MC_BEGIN(2, 0);
9773 IEM_MC_LOCAL(uint8_t, u8Tmp);
9774 IEM_MC_LOCAL(uint16_t, u16Addr);
9775 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
9776 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
9777 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
9778 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9779 IEM_MC_ADVANCE_RIP();
9780 IEM_MC_END();
9781 return VINF_SUCCESS;
9782
9783 case IEMMODE_32BIT:
9784 IEM_MC_BEGIN(2, 0);
9785 IEM_MC_LOCAL(uint8_t, u8Tmp);
9786 IEM_MC_LOCAL(uint32_t, u32Addr);
9787 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
9788 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
9789 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
9790 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9791 IEM_MC_ADVANCE_RIP();
9792 IEM_MC_END();
9793 return VINF_SUCCESS;
9794
9795 case IEMMODE_64BIT:
9796 IEM_MC_BEGIN(2, 0);
9797 IEM_MC_LOCAL(uint8_t, u8Tmp);
9798 IEM_MC_LOCAL(uint64_t, u64Addr);
9799 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
9800 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
9801 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
9802 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9803 IEM_MC_ADVANCE_RIP();
9804 IEM_MC_END();
9805 return VINF_SUCCESS;
9806
9807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9808 }
9809}
9810
9811
9812/** Opcode 0xd8. */
9813FNIEMOP_STUB(iemOp_EscF0);
9814/** Opcode 0xd9. */
9815FNIEMOP_STUB(iemOp_EscF1);
9816/** Opcode 0xda. */
9817FNIEMOP_STUB(iemOp_EscF2);
9818
9819
9820/** Opcode 0xdb /0. */
9821FNIEMOP_STUB_1(iemOp_fild_dw, uint8_t, bRm);
9822/** Opcode 0xdb /1. */
9823FNIEMOP_STUB_1(iemOp_fisttp_dw, uint8_t, bRm);
9824/** Opcode 0xdb /2. */
9825FNIEMOP_STUB_1(iemOp_fist_dw, uint8_t, bRm);
9826/** Opcode 0xdb /3. */
9827FNIEMOP_STUB_1(iemOp_fistp_dw, uint8_t, bRm);
9828/** Opcode 0xdb /5. */
9829FNIEMOP_STUB_1(iemOp_fld_xr, uint8_t, bRm);
9830/** Opcode 0xdb /7. */
9831FNIEMOP_STUB_1(iemOp_fstp_xr, uint8_t, bRm);
9832
9833
9834/** Opcode 0xdb 0xe0. */
9835FNIEMOP_DEF(iemOp_fneni)
9836{
9837 IEMOP_MNEMONIC("fneni (8087/ign)");
9838 IEM_MC_BEGIN(0,0);
9839 IEM_MC_ADVANCE_RIP();
9840 IEM_MC_END();
9841 return VINF_SUCCESS;
9842}
9843
9844
9845/** Opcode 0xdb 0xe1. */
9846FNIEMOP_DEF(iemOp_fndisi)
9847{
9848 IEMOP_MNEMONIC("fndisi (8087/ign)");
9849 IEM_MC_BEGIN(0,0);
9850 IEM_MC_ADVANCE_RIP();
9851 IEM_MC_END();
9852 return VINF_SUCCESS;
9853}
9854
9855
9856/** Opcode 0xdb 0xe2. */
9857FNIEMOP_STUB(iemOp_fnclex);
9858
9859
9860/** Opcode 0xdb 0xe3. */
9861FNIEMOP_DEF(iemOp_fninit)
9862{
9863 IEMOP_MNEMONIC("fninit");
9864 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
9865}
9866
9867
9868/** Opcode 0xdb 0xe4. */
9869FNIEMOP_DEF(iemOp_fnsetpm)
9870{
9871 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
9872 IEM_MC_BEGIN(0,0);
9873 IEM_MC_ADVANCE_RIP();
9874 IEM_MC_END();
9875 return VINF_SUCCESS;
9876}
9877
9878
9879/** Opcode 0xdb 0xe5. */
9880FNIEMOP_DEF(iemOp_frstpm)
9881{
9882 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
9883 IEM_MC_BEGIN(0,0);
9884 IEM_MC_ADVANCE_RIP();
9885 IEM_MC_END();
9886 return VINF_SUCCESS;
9887}
9888
9889
9890/** Opcode 0xdb. */
9891FNIEMOP_DEF(iemOp_EscF3)
9892{
9893 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9894 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9895 {
9896 switch (bRm & 0xf8)
9897 {
9898 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnb
9899 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovne
9900 case 0xd0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnbe
9901 case 0xd8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnu
9902 case 0xe0:
9903 IEMOP_HLP_NO_LOCK_PREFIX();
9904 switch (bRm)
9905 {
9906 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
9907 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
9908 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
9909 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
9910 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
9911 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
9912 default: return IEMOP_RAISE_INVALID_OPCODE();
9913 }
9914 break;
9915 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomi
9916 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomi
9917 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
9918 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9919 }
9920 }
9921 else
9922 {
9923 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9924 {
9925 case 0: return FNIEMOP_CALL_1(iemOp_fild_dw, bRm);
9926 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_dw,bRm);
9927 case 2: return FNIEMOP_CALL_1(iemOp_fist_dw, bRm);
9928 case 3: return FNIEMOP_CALL_1(iemOp_fistp_dw, bRm);
9929 case 4: return IEMOP_RAISE_INVALID_OPCODE();
9930 case 5: return FNIEMOP_CALL_1(iemOp_fld_xr, bRm);
9931 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9932 case 7: return FNIEMOP_CALL_1(iemOp_fstp_xr, bRm);
9933 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9934 }
9935 }
9936}
9937
9938/** Opcode 0xdc. */
9939FNIEMOP_STUB(iemOp_EscF4);
9940/** Opcode 0xdd. */
9941FNIEMOP_STUB(iemOp_EscF5);
9942
9943/** Opcode 0xde 0xd9. */
9944FNIEMOP_STUB(iemOp_fcompp);
9945
9946/** Opcode 0xde. */
9947FNIEMOP_DEF(iemOp_EscF6)
9948{
9949 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9950 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9951 {
9952 switch (bRm & 0xf8)
9953 {
9954 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fiaddp
9955 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fimulp
9956 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
9957 case 0xd8:
9958 switch (bRm)
9959 {
9960 case 0xd9: return FNIEMOP_CALL(iemOp_fcompp);
9961 default: return IEMOP_RAISE_INVALID_OPCODE();
9962 }
9963 case 0xe0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubrp
9964 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubp
9965 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivrp
9966 case 0xf8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivp
9967 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9968 }
9969 }
9970 else
9971 {
9972#if 0
9973 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9974 {
9975 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_w, bRm);
9976 case 1: return FNIEMOP_CALL_1(iemOp_fimul_w, bRm);
9977 case 2: return FNIEMOP_CALL_1(iemOp_ficom_w, bRm);
9978 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_w, bRm);
9979 case 4: return FNIEMOP_CALL_1(iemOp_fisub_w, bRm);
9980 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_w, bRm);
9981 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_w, bRm);
9982 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_w, bRm);
9983 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9984 }
9985#endif
9986 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
9987 }
9988}
9989
9990
9991/** Opcode 0xdf 0xe0. */
9992FNIEMOP_DEF(iemOp_fnstsw_ax)
9993{
9994 IEMOP_MNEMONIC("fnstsw ax");
9995 IEMOP_HLP_NO_LOCK_PREFIX();
9996
9997 IEM_MC_BEGIN(0, 1);
9998 IEM_MC_LOCAL(uint16_t, u16Tmp);
9999 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10000 IEM_MC_FETCH_FSW(u16Tmp);
10001 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10002 IEM_MC_ADVANCE_RIP();
10003 IEM_MC_END();
10004 return VINF_SUCCESS;
10005}
10006
10007
10008/** Opcode 0xdf. */
10009FNIEMOP_DEF(iemOp_EscF7)
10010{
10011 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10012 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10013 {
10014 switch (bRm & 0xf8)
10015 {
10016 case 0xc0: return IEMOP_RAISE_INVALID_OPCODE();
10017 case 0xc8: return IEMOP_RAISE_INVALID_OPCODE();
10018 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10019 case 0xd8: return IEMOP_RAISE_INVALID_OPCODE();
10020 case 0xe0:
10021 switch (bRm)
10022 {
10023 case 0xe0: return FNIEMOP_CALL(iemOp_fnstsw_ax);
10024 default: return IEMOP_RAISE_INVALID_OPCODE();
10025 }
10026 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomip
10027 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomip
10028 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10029 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10030 }
10031 }
10032 else
10033 {
10034 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
10035 }
10036}
10037
10038
10039/** Opcode 0xe0. */
10040FNIEMOP_DEF(iemOp_loopne_Jb)
10041{
10042 IEMOP_MNEMONIC("loopne Jb");
10043 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10044 IEMOP_HLP_NO_LOCK_PREFIX();
10045 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10046
10047 switch (pIemCpu->enmEffAddrMode)
10048 {
10049 case IEMMODE_16BIT:
10050 IEM_MC_BEGIN(0,0);
10051 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10052 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10053 IEM_MC_REL_JMP_S8(i8Imm);
10054 } IEM_MC_ELSE() {
10055 IEM_MC_ADVANCE_RIP();
10056 } IEM_MC_ENDIF();
10057 IEM_MC_END();
10058 return VINF_SUCCESS;
10059
10060 case IEMMODE_32BIT:
10061 IEM_MC_BEGIN(0,0);
10062 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10063 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10064 IEM_MC_REL_JMP_S8(i8Imm);
10065 } IEM_MC_ELSE() {
10066 IEM_MC_ADVANCE_RIP();
10067 } IEM_MC_ENDIF();
10068 IEM_MC_END();
10069 return VINF_SUCCESS;
10070
10071 case IEMMODE_64BIT:
10072 IEM_MC_BEGIN(0,0);
10073 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10074 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10075 IEM_MC_REL_JMP_S8(i8Imm);
10076 } IEM_MC_ELSE() {
10077 IEM_MC_ADVANCE_RIP();
10078 } IEM_MC_ENDIF();
10079 IEM_MC_END();
10080 return VINF_SUCCESS;
10081
10082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10083 }
10084}
10085
10086
10087/** Opcode 0xe1. */
10088FNIEMOP_DEF(iemOp_loope_Jb)
10089{
10090 IEMOP_MNEMONIC("loope Jb");
10091 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10092 IEMOP_HLP_NO_LOCK_PREFIX();
10093 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10094
10095 switch (pIemCpu->enmEffAddrMode)
10096 {
10097 case IEMMODE_16BIT:
10098 IEM_MC_BEGIN(0,0);
10099 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10100 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10101 IEM_MC_REL_JMP_S8(i8Imm);
10102 } IEM_MC_ELSE() {
10103 IEM_MC_ADVANCE_RIP();
10104 } IEM_MC_ENDIF();
10105 IEM_MC_END();
10106 return VINF_SUCCESS;
10107
10108 case IEMMODE_32BIT:
10109 IEM_MC_BEGIN(0,0);
10110 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10111 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10112 IEM_MC_REL_JMP_S8(i8Imm);
10113 } IEM_MC_ELSE() {
10114 IEM_MC_ADVANCE_RIP();
10115 } IEM_MC_ENDIF();
10116 IEM_MC_END();
10117 return VINF_SUCCESS;
10118
10119 case IEMMODE_64BIT:
10120 IEM_MC_BEGIN(0,0);
10121 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10122 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10123 IEM_MC_REL_JMP_S8(i8Imm);
10124 } IEM_MC_ELSE() {
10125 IEM_MC_ADVANCE_RIP();
10126 } IEM_MC_ENDIF();
10127 IEM_MC_END();
10128 return VINF_SUCCESS;
10129
10130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10131 }
10132}
10133
10134
10135/** Opcode 0xe2. */
10136FNIEMOP_DEF(iemOp_loop_Jb)
10137{
10138 IEMOP_MNEMONIC("loop Jb");
10139 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10140 IEMOP_HLP_NO_LOCK_PREFIX();
10141 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10142
10143 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
10144 * using the 32-bit operand size override. How can that be restarted? See
10145 * weird pseudo code in intel manual. */
10146 switch (pIemCpu->enmEffAddrMode)
10147 {
10148 case IEMMODE_16BIT:
10149 IEM_MC_BEGIN(0,0);
10150 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10151 IEM_MC_IF_CX_IS_NZ() {
10152 IEM_MC_REL_JMP_S8(i8Imm);
10153 } IEM_MC_ELSE() {
10154 IEM_MC_ADVANCE_RIP();
10155 } IEM_MC_ENDIF();
10156 IEM_MC_END();
10157 return VINF_SUCCESS;
10158
10159 case IEMMODE_32BIT:
10160 IEM_MC_BEGIN(0,0);
10161 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10162 IEM_MC_IF_ECX_IS_NZ() {
10163 IEM_MC_REL_JMP_S8(i8Imm);
10164 } IEM_MC_ELSE() {
10165 IEM_MC_ADVANCE_RIP();
10166 } IEM_MC_ENDIF();
10167 IEM_MC_END();
10168 return VINF_SUCCESS;
10169
10170 case IEMMODE_64BIT:
10171 IEM_MC_BEGIN(0,0);
10172 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10173 IEM_MC_IF_RCX_IS_NZ() {
10174 IEM_MC_REL_JMP_S8(i8Imm);
10175 } IEM_MC_ELSE() {
10176 IEM_MC_ADVANCE_RIP();
10177 } IEM_MC_ENDIF();
10178 IEM_MC_END();
10179 return VINF_SUCCESS;
10180
10181 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10182 }
10183}
10184
10185
10186/** Opcode 0xe3. */
10187FNIEMOP_DEF(iemOp_jecxz_Jb)
10188{
10189 IEMOP_MNEMONIC("jecxz Jb");
10190 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10191 IEMOP_HLP_NO_LOCK_PREFIX();
10192 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10193
10194 switch (pIemCpu->enmEffAddrMode)
10195 {
10196 case IEMMODE_16BIT:
10197 IEM_MC_BEGIN(0,0);
10198 IEM_MC_IF_CX_IS_NZ() {
10199 IEM_MC_ADVANCE_RIP();
10200 } IEM_MC_ELSE() {
10201 IEM_MC_REL_JMP_S8(i8Imm);
10202 } IEM_MC_ENDIF();
10203 IEM_MC_END();
10204 return VINF_SUCCESS;
10205
10206 case IEMMODE_32BIT:
10207 IEM_MC_BEGIN(0,0);
10208 IEM_MC_IF_ECX_IS_NZ() {
10209 IEM_MC_ADVANCE_RIP();
10210 } IEM_MC_ELSE() {
10211 IEM_MC_REL_JMP_S8(i8Imm);
10212 } IEM_MC_ENDIF();
10213 IEM_MC_END();
10214 return VINF_SUCCESS;
10215
10216 case IEMMODE_64BIT:
10217 IEM_MC_BEGIN(0,0);
10218 IEM_MC_IF_RCX_IS_NZ() {
10219 IEM_MC_ADVANCE_RIP();
10220 } IEM_MC_ELSE() {
10221 IEM_MC_REL_JMP_S8(i8Imm);
10222 } IEM_MC_ENDIF();
10223 IEM_MC_END();
10224 return VINF_SUCCESS;
10225
10226 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10227 }
10228}
10229
10230
10231/** Opcode 0xe4 */
10232FNIEMOP_DEF(iemOp_in_AL_Ib)
10233{
10234 IEMOP_MNEMONIC("in eAX,Ib");
10235 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10236 IEMOP_HLP_NO_LOCK_PREFIX();
10237 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
10238}
10239
10240
10241/** Opcode 0xe5 */
10242FNIEMOP_DEF(iemOp_in_eAX_Ib)
10243{
10244 IEMOP_MNEMONIC("in eAX,Ib");
10245 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10246 IEMOP_HLP_NO_LOCK_PREFIX();
10247 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10248}
10249
10250
10251/** Opcode 0xe6 */
10252FNIEMOP_DEF(iemOp_out_Ib_AL)
10253{
10254 IEMOP_MNEMONIC("out Ib,AL");
10255 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10256 IEMOP_HLP_NO_LOCK_PREFIX();
10257 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
10258}
10259
10260
10261/** Opcode 0xe7 */
10262FNIEMOP_DEF(iemOp_out_Ib_eAX)
10263{
10264 IEMOP_MNEMONIC("out Ib,eAX");
10265 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10266 IEMOP_HLP_NO_LOCK_PREFIX();
10267 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10268}
10269
10270
10271/** Opcode 0xe8. */
10272FNIEMOP_DEF(iemOp_call_Jv)
10273{
10274 IEMOP_MNEMONIC("call Jv");
10275 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10276 switch (pIemCpu->enmEffOpSize)
10277 {
10278 case IEMMODE_16BIT:
10279 {
10280 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10281 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int32_t)u16Imm);
10282 }
10283
10284 case IEMMODE_32BIT:
10285 {
10286 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10287 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
10288 }
10289
10290 case IEMMODE_64BIT:
10291 {
10292 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10293 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
10294 }
10295
10296 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10297 }
10298}
10299
10300
10301/** Opcode 0xe9. */
10302FNIEMOP_DEF(iemOp_jmp_Jv)
10303{
10304 IEMOP_MNEMONIC("jmp Jv");
10305 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10306 switch (pIemCpu->enmEffOpSize)
10307 {
10308 case IEMMODE_16BIT:
10309 {
10310 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
10311 IEM_MC_BEGIN(0, 0);
10312 IEM_MC_REL_JMP_S16(i16Imm);
10313 IEM_MC_END();
10314 return VINF_SUCCESS;
10315 }
10316
10317 case IEMMODE_64BIT:
10318 case IEMMODE_32BIT:
10319 {
10320 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
10321 IEM_MC_BEGIN(0, 0);
10322 IEM_MC_REL_JMP_S32(i32Imm);
10323 IEM_MC_END();
10324 return VINF_SUCCESS;
10325 }
10326
10327 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10328 }
10329}
10330
10331
10332/** Opcode 0xea. */
10333FNIEMOP_DEF(iemOp_jmp_Ap)
10334{
10335 IEMOP_MNEMONIC("jmp Ap");
10336 IEMOP_HLP_NO_64BIT();
10337
10338 /* Decode the far pointer address and pass it on to the far call C implementation. */
10339 uint32_t offSeg;
10340 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10341 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10342 else
10343 {
10344 uint16_t offSeg16; IEM_OPCODE_GET_NEXT_U16(&offSeg16);
10345 offSeg = offSeg16;
10346 }
10347 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10348 IEMOP_HLP_NO_LOCK_PREFIX();
10349 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
10350}
10351
10352
10353/** Opcode 0xeb. */
10354FNIEMOP_DEF(iemOp_jmp_Jb)
10355{
10356 IEMOP_MNEMONIC("jmp Jb");
10357 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10358 IEMOP_HLP_NO_LOCK_PREFIX();
10359 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10360
10361 IEM_MC_BEGIN(0, 0);
10362 IEM_MC_REL_JMP_S8(i8Imm);
10363 IEM_MC_END();
10364 return VINF_SUCCESS;
10365}
10366
10367
10368/** Opcode 0xec */
10369FNIEMOP_DEF(iemOp_in_AL_DX)
10370{
10371 IEMOP_MNEMONIC("in AL,DX");
10372 IEMOP_HLP_NO_LOCK_PREFIX();
10373 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
10374}
10375
10376
10377/** Opcode 0xed */
10378FNIEMOP_DEF(iemOp_eAX_DX)
10379{
10380 IEMOP_MNEMONIC("in eAX,DX");
10381 IEMOP_HLP_NO_LOCK_PREFIX();
10382 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10383}
10384
10385
10386/** Opcode 0xee */
10387FNIEMOP_DEF(iemOp_out_DX_AL)
10388{
10389 IEMOP_MNEMONIC("out DX,AL");
10390 IEMOP_HLP_NO_LOCK_PREFIX();
10391 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
10392}
10393
10394
10395/** Opcode 0xef */
10396FNIEMOP_DEF(iemOp_out_DX_eAX)
10397{
10398 IEMOP_MNEMONIC("out DX,eAX");
10399 IEMOP_HLP_NO_LOCK_PREFIX();
10400 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10401}
10402
10403
10404/** Opcode 0xf0. */
10405FNIEMOP_DEF(iemOp_lock)
10406{
10407 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
10408
10409 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10410 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10411}
10412
10413
10414/** Opcode 0xf2. */
10415FNIEMOP_DEF(iemOp_repne)
10416{
10417 /* This overrides any previous REPE prefix. */
10418 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
10419 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
10420
10421 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10422 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10423}
10424
10425
10426/** Opcode 0xf3. */
10427FNIEMOP_DEF(iemOp_repe)
10428{
10429 /* This overrides any previous REPNE prefix. */
10430 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
10431 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
10432
10433 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10434 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10435}
10436
10437
10438/** Opcode 0xf4. */
10439FNIEMOP_DEF(iemOp_hlt)
10440{
10441 IEMOP_HLP_NO_LOCK_PREFIX();
10442 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
10443}
10444
10445
10446/** Opcode 0xf5. */
10447FNIEMOP_STUB(iemOp_cmc);
10448
10449
10450/**
10451 * Common implementation of 'inc/dec/not/neg Eb'.
10452 *
10453 * @param bRm The RM byte.
10454 * @param pImpl The instruction implementation.
10455 */
10456FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10457{
10458 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10459 {
10460 /* register access */
10461 IEM_MC_BEGIN(2, 0);
10462 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10463 IEM_MC_ARG(uint32_t *, pEFlags, 1);
10464 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10465 IEM_MC_REF_EFLAGS(pEFlags);
10466 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10467 IEM_MC_ADVANCE_RIP();
10468 IEM_MC_END();
10469 }
10470 else
10471 {
10472 /* memory access. */
10473 IEM_MC_BEGIN(2, 2);
10474 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10475 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10476 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10477
10478 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10479 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10480 IEM_MC_FETCH_EFLAGS(EFlags);
10481 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10482 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10483 else
10484 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
10485
10486 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
10487 IEM_MC_COMMIT_EFLAGS(EFlags);
10488 IEM_MC_ADVANCE_RIP();
10489 IEM_MC_END();
10490 }
10491 return VINF_SUCCESS;
10492}
10493
10494
10495/**
10496 * Common implementation of 'inc/dec/not/neg Ev'.
10497 *
10498 * @param bRm The RM byte.
10499 * @param pImpl The instruction implementation.
10500 */
10501FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10502{
10503 /* Registers are handled by a common worker. */
10504 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10505 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10506
10507 /* Memory we do here. */
10508 switch (pIemCpu->enmEffOpSize)
10509 {
10510 case IEMMODE_16BIT:
10511 IEM_MC_BEGIN(2, 2);
10512 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10513 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10514 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10515
10516 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10517 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10518 IEM_MC_FETCH_EFLAGS(EFlags);
10519 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10520 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
10521 else
10522 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
10523
10524 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
10525 IEM_MC_COMMIT_EFLAGS(EFlags);
10526 IEM_MC_ADVANCE_RIP();
10527 IEM_MC_END();
10528 return VINF_SUCCESS;
10529
10530 case IEMMODE_32BIT:
10531 IEM_MC_BEGIN(2, 2);
10532 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10533 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10534 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10535
10536 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10537 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10538 IEM_MC_FETCH_EFLAGS(EFlags);
10539 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10540 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
10541 else
10542 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
10543
10544 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
10545 IEM_MC_COMMIT_EFLAGS(EFlags);
10546 IEM_MC_ADVANCE_RIP();
10547 IEM_MC_END();
10548 return VINF_SUCCESS;
10549
10550 case IEMMODE_64BIT:
10551 IEM_MC_BEGIN(2, 2);
10552 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10553 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10554 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10555
10556 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10557 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10558 IEM_MC_FETCH_EFLAGS(EFlags);
10559 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10560 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
10561 else
10562 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
10563
10564 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
10565 IEM_MC_COMMIT_EFLAGS(EFlags);
10566 IEM_MC_ADVANCE_RIP();
10567 IEM_MC_END();
10568 return VINF_SUCCESS;
10569
10570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10571 }
10572}
10573
10574
10575/** Opcode 0xf6 /0. */
10576FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
10577{
10578 IEMOP_MNEMONIC("test Eb,Ib");
10579 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10580
10581 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10582 {
10583 /* register access */
10584 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10585 IEMOP_HLP_NO_LOCK_PREFIX();
10586
10587 IEM_MC_BEGIN(3, 0);
10588 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10589 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
10590 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10591 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10592 IEM_MC_REF_EFLAGS(pEFlags);
10593 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10594 IEM_MC_ADVANCE_RIP();
10595 IEM_MC_END();
10596 }
10597 else
10598 {
10599 /* memory access. */
10600 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10601
10602 IEM_MC_BEGIN(3, 2);
10603 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10604 IEM_MC_ARG(uint8_t, u8Src, 1);
10605 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10606 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10607
10608 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10609 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10610 IEM_MC_ASSIGN(u8Src, u8Imm);
10611 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10612 IEM_MC_FETCH_EFLAGS(EFlags);
10613 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10614
10615 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
10616 IEM_MC_COMMIT_EFLAGS(EFlags);
10617 IEM_MC_ADVANCE_RIP();
10618 IEM_MC_END();
10619 }
10620 return VINF_SUCCESS;
10621}
10622
10623
10624/** Opcode 0xf7 /0. */
10625FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
10626{
10627 IEMOP_MNEMONIC("test Ev,Iv");
10628 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10629 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10630
10631 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10632 {
10633 /* register access */
10634 switch (pIemCpu->enmEffOpSize)
10635 {
10636 case IEMMODE_16BIT:
10637 {
10638 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10639 IEM_MC_BEGIN(3, 0);
10640 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10641 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
10642 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10643 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10644 IEM_MC_REF_EFLAGS(pEFlags);
10645 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10646 IEM_MC_ADVANCE_RIP();
10647 IEM_MC_END();
10648 return VINF_SUCCESS;
10649 }
10650
10651 case IEMMODE_32BIT:
10652 {
10653 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10654 IEM_MC_BEGIN(3, 0);
10655 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10656 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
10657 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10658 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10659 IEM_MC_REF_EFLAGS(pEFlags);
10660 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10661 IEM_MC_ADVANCE_RIP();
10662 IEM_MC_END();
10663 return VINF_SUCCESS;
10664 }
10665
10666 case IEMMODE_64BIT:
10667 {
10668 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10669 IEM_MC_BEGIN(3, 0);
10670 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10671 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
10672 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10673 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10674 IEM_MC_REF_EFLAGS(pEFlags);
10675 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10676 IEM_MC_ADVANCE_RIP();
10677 IEM_MC_END();
10678 return VINF_SUCCESS;
10679 }
10680
10681 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10682 }
10683 }
10684 else
10685 {
10686 /* memory access. */
10687 switch (pIemCpu->enmEffOpSize)
10688 {
10689 case IEMMODE_16BIT:
10690 {
10691 IEM_MC_BEGIN(3, 2);
10692 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10693 IEM_MC_ARG(uint16_t, u16Src, 1);
10694 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10695 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10696
10697 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10698 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10699 IEM_MC_ASSIGN(u16Src, u16Imm);
10700 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10701 IEM_MC_FETCH_EFLAGS(EFlags);
10702 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10703
10704 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
10705 IEM_MC_COMMIT_EFLAGS(EFlags);
10706 IEM_MC_ADVANCE_RIP();
10707 IEM_MC_END();
10708 return VINF_SUCCESS;
10709 }
10710
10711 case IEMMODE_32BIT:
10712 {
10713 IEM_MC_BEGIN(3, 2);
10714 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10715 IEM_MC_ARG(uint32_t, u32Src, 1);
10716 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10717 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10718
10719 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10720 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10721 IEM_MC_ASSIGN(u32Src, u32Imm);
10722 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10723 IEM_MC_FETCH_EFLAGS(EFlags);
10724 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10725
10726 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
10727 IEM_MC_COMMIT_EFLAGS(EFlags);
10728 IEM_MC_ADVANCE_RIP();
10729 IEM_MC_END();
10730 return VINF_SUCCESS;
10731 }
10732
10733 case IEMMODE_64BIT:
10734 {
10735 IEM_MC_BEGIN(3, 2);
10736 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10737 IEM_MC_ARG(uint64_t, u64Src, 1);
10738 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10739 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10740
10741 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10742 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10743 IEM_MC_ASSIGN(u64Src, u64Imm);
10744 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10745 IEM_MC_FETCH_EFLAGS(EFlags);
10746 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10747
10748 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
10749 IEM_MC_COMMIT_EFLAGS(EFlags);
10750 IEM_MC_ADVANCE_RIP();
10751 IEM_MC_END();
10752 return VINF_SUCCESS;
10753 }
10754
10755 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10756 }
10757 }
10758}
10759
10760
10761/** Opcode 0xf6 /4, /5, /6 and /7. */
10762FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
10763{
10764 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10765
10766 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10767 {
10768 /* register access */
10769 IEMOP_HLP_NO_LOCK_PREFIX();
10770 IEM_MC_BEGIN(3, 0);
10771 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10772 IEM_MC_ARG(uint8_t, u8Value, 1);
10773 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10774 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10775 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10776 IEM_MC_REF_EFLAGS(pEFlags);
10777 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10778 IEM_MC_ADVANCE_RIP();
10779 IEM_MC_END();
10780 }
10781 else
10782 {
10783 /* memory access. */
10784 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10785
10786 IEM_MC_BEGIN(3, 1);
10787 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10788 IEM_MC_ARG(uint8_t, u8Value, 1);
10789 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10790 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10791
10792 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10793 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
10794 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10795 IEM_MC_REF_EFLAGS(pEFlags);
10796 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10797
10798 IEM_MC_ADVANCE_RIP();
10799 IEM_MC_END();
10800 }
10801 return VINF_SUCCESS;
10802}
10803
10804
10805/** Opcode 0xf7 /4, /5, /6 and /7. */
10806FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
10807{
10808 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10809 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10810
10811 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10812 {
10813 /* register access */
10814 switch (pIemCpu->enmEffOpSize)
10815 {
10816 case IEMMODE_16BIT:
10817 {
10818 IEMOP_HLP_NO_LOCK_PREFIX();
10819 IEM_MC_BEGIN(4, 1);
10820 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10821 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10822 IEM_MC_ARG(uint16_t, u16Value, 2);
10823 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10824 IEM_MC_LOCAL(int32_t, rc);
10825
10826 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10827 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10828 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
10829 IEM_MC_REF_EFLAGS(pEFlags);
10830 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
10831 IEM_MC_IF_LOCAL_IS_Z(rc) {
10832 IEM_MC_ADVANCE_RIP();
10833 } IEM_MC_ELSE() {
10834 IEM_MC_RAISE_DIVIDE_ERROR();
10835 } IEM_MC_ENDIF();
10836
10837 IEM_MC_END();
10838 return VINF_SUCCESS;
10839 }
10840
10841 case IEMMODE_32BIT:
10842 {
10843 IEMOP_HLP_NO_LOCK_PREFIX();
10844 IEM_MC_BEGIN(4, 1);
10845 IEM_MC_ARG(uint32_t *, pu32AX, 0);
10846 IEM_MC_ARG(uint32_t *, pu32DX, 1);
10847 IEM_MC_ARG(uint32_t, u32Value, 2);
10848 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10849 IEM_MC_LOCAL(int32_t, rc);
10850
10851 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10852 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
10853 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
10854 IEM_MC_REF_EFLAGS(pEFlags);
10855 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
10856 IEM_MC_IF_LOCAL_IS_Z(rc) {
10857 IEM_MC_ADVANCE_RIP();
10858 } IEM_MC_ELSE() {
10859 IEM_MC_RAISE_DIVIDE_ERROR();
10860 } IEM_MC_ENDIF();
10861
10862 IEM_MC_END();
10863 return VINF_SUCCESS;
10864 }
10865
10866 case IEMMODE_64BIT:
10867 {
10868 IEMOP_HLP_NO_LOCK_PREFIX();
10869 IEM_MC_BEGIN(4, 1);
10870 IEM_MC_ARG(uint64_t *, pu64AX, 0);
10871 IEM_MC_ARG(uint64_t *, pu64DX, 1);
10872 IEM_MC_ARG(uint64_t, u64Value, 2);
10873 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10874 IEM_MC_LOCAL(int32_t, rc);
10875
10876 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10877 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
10878 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
10879 IEM_MC_REF_EFLAGS(pEFlags);
10880 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
10881 IEM_MC_IF_LOCAL_IS_Z(rc) {
10882 IEM_MC_ADVANCE_RIP();
10883 } IEM_MC_ELSE() {
10884 IEM_MC_RAISE_DIVIDE_ERROR();
10885 } IEM_MC_ENDIF();
10886
10887 IEM_MC_END();
10888 return VINF_SUCCESS;
10889 }
10890
10891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10892 }
10893 }
10894 else
10895 {
10896 /* memory access. */
10897 switch (pIemCpu->enmEffOpSize)
10898 {
10899 case IEMMODE_16BIT:
10900 {
10901 IEMOP_HLP_NO_LOCK_PREFIX();
10902 IEM_MC_BEGIN(4, 2);
10903 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10904 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10905 IEM_MC_ARG(uint16_t, u16Value, 2);
10906 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10907 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10908 IEM_MC_LOCAL(int32_t, rc);
10909
10910 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10911 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10912 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10913 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
10914 IEM_MC_REF_EFLAGS(pEFlags);
10915 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
10916 IEM_MC_IF_LOCAL_IS_Z(rc) {
10917 IEM_MC_ADVANCE_RIP();
10918 } IEM_MC_ELSE() {
10919 IEM_MC_RAISE_DIVIDE_ERROR();
10920 } IEM_MC_ENDIF();
10921
10922 IEM_MC_END();
10923 return VINF_SUCCESS;
10924 }
10925
10926 case IEMMODE_32BIT:
10927 {
10928 IEMOP_HLP_NO_LOCK_PREFIX();
10929 IEM_MC_BEGIN(4, 2);
10930 IEM_MC_ARG(uint32_t *, pu32AX, 0);
10931 IEM_MC_ARG(uint32_t *, pu32DX, 1);
10932 IEM_MC_ARG(uint32_t, u32Value, 2);
10933 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10934 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10935 IEM_MC_LOCAL(int32_t, rc);
10936
10937 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10938 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
10939 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
10940 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
10941 IEM_MC_REF_EFLAGS(pEFlags);
10942 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
10943 IEM_MC_IF_LOCAL_IS_Z(rc) {
10944 IEM_MC_ADVANCE_RIP();
10945 } IEM_MC_ELSE() {
10946 IEM_MC_RAISE_DIVIDE_ERROR();
10947 } IEM_MC_ENDIF();
10948
10949 IEM_MC_END();
10950 return VINF_SUCCESS;
10951 }
10952
10953 case IEMMODE_64BIT:
10954 {
10955 IEMOP_HLP_NO_LOCK_PREFIX();
10956 IEM_MC_BEGIN(4, 2);
10957 IEM_MC_ARG(uint64_t *, pu64AX, 0);
10958 IEM_MC_ARG(uint64_t *, pu64DX, 1);
10959 IEM_MC_ARG(uint64_t, u64Value, 2);
10960 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10961 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10962 IEM_MC_LOCAL(int32_t, rc);
10963
10964 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10965 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
10966 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
10967 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
10968 IEM_MC_REF_EFLAGS(pEFlags);
10969 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
10970 IEM_MC_IF_LOCAL_IS_Z(rc) {
10971 IEM_MC_ADVANCE_RIP();
10972 } IEM_MC_ELSE() {
10973 IEM_MC_RAISE_DIVIDE_ERROR();
10974 } IEM_MC_ENDIF();
10975
10976 IEM_MC_END();
10977 return VINF_SUCCESS;
10978 }
10979
10980 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10981 }
10982 }
10983}
10984
10985/** Opcode 0xf6. */
10986FNIEMOP_DEF(iemOp_Grp3_Eb)
10987{
10988 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10989 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10990 {
10991 case 0:
10992 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
10993 case 1:
10994 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
10995 case 2:
10996 IEMOP_MNEMONIC("not Eb");
10997 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
10998 case 3:
10999 IEMOP_MNEMONIC("neg Eb");
11000 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
11001 case 4:
11002 IEMOP_MNEMONIC("mul Eb");
11003 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11004 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
11005 case 5:
11006 IEMOP_MNEMONIC("imul Eb");
11007 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11008 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
11009 case 6:
11010 IEMOP_MNEMONIC("div Eb");
11011 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11012 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
11013 case 7:
11014 IEMOP_MNEMONIC("idiv Eb");
11015 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11016 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
11017 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11018 }
11019}
11020
11021
11022/** Opcode 0xf7. */
11023FNIEMOP_DEF(iemOp_Grp3_Ev)
11024{
11025 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11026 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11027 {
11028 case 0:
11029 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
11030 case 1:
11031 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11032 case 2:
11033 IEMOP_MNEMONIC("not Ev");
11034 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
11035 case 3:
11036 IEMOP_MNEMONIC("neg Ev");
11037 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
11038 case 4:
11039 IEMOP_MNEMONIC("mul Ev");
11040 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11041 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
11042 case 5:
11043 IEMOP_MNEMONIC("imul Ev");
11044 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11045 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
11046 case 6:
11047 IEMOP_MNEMONIC("div Ev");
11048 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11049 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
11050 case 7:
11051 IEMOP_MNEMONIC("idiv Ev");
11052 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11053 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
11054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11055 }
11056}
11057
11058
11059/** Opcode 0xf8. */
11060FNIEMOP_DEF(iemOp_clc)
11061{
11062 IEMOP_MNEMONIC("clc");
11063 IEMOP_HLP_NO_LOCK_PREFIX();
11064 IEM_MC_BEGIN(0, 0);
11065 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
11066 IEM_MC_ADVANCE_RIP();
11067 IEM_MC_END();
11068 return VINF_SUCCESS;
11069}
11070
11071
11072/** Opcode 0xf9. */
11073FNIEMOP_DEF(iemOp_stc)
11074{
11075 IEMOP_MNEMONIC("slc");
11076 IEMOP_HLP_NO_LOCK_PREFIX();
11077 IEM_MC_BEGIN(0, 0);
11078 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
11079 IEM_MC_ADVANCE_RIP();
11080 IEM_MC_END();
11081 return VINF_SUCCESS;
11082}
11083
11084
11085/** Opcode 0xfa. */
11086FNIEMOP_DEF(iemOp_cli)
11087{
11088 IEMOP_MNEMONIC("cli");
11089 IEMOP_HLP_NO_LOCK_PREFIX();
11090 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
11091}
11092
11093
11094FNIEMOP_DEF(iemOp_sti)
11095{
11096 IEMOP_MNEMONIC("sti");
11097 IEMOP_HLP_NO_LOCK_PREFIX();
11098 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
11099}
11100
11101
11102/** Opcode 0xfc. */
11103FNIEMOP_DEF(iemOp_cld)
11104{
11105 IEMOP_MNEMONIC("cld");
11106 IEMOP_HLP_NO_LOCK_PREFIX();
11107 IEM_MC_BEGIN(0, 0);
11108 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
11109 IEM_MC_ADVANCE_RIP();
11110 IEM_MC_END();
11111 return VINF_SUCCESS;
11112}
11113
11114
11115/** Opcode 0xfd. */
11116FNIEMOP_DEF(iemOp_std)
11117{
11118 IEMOP_MNEMONIC("std");
11119 IEMOP_HLP_NO_LOCK_PREFIX();
11120 IEM_MC_BEGIN(0, 0);
11121 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
11122 IEM_MC_ADVANCE_RIP();
11123 IEM_MC_END();
11124 return VINF_SUCCESS;
11125}
11126
11127
11128/** Opcode 0xfe. */
11129FNIEMOP_DEF(iemOp_Grp4)
11130{
11131 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11132 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11133 {
11134 case 0:
11135 IEMOP_MNEMONIC("inc Ev");
11136 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
11137 case 1:
11138 IEMOP_MNEMONIC("dec Ev");
11139 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
11140 default:
11141 IEMOP_MNEMONIC("grp4-ud");
11142 return IEMOP_RAISE_INVALID_OPCODE();
11143 }
11144}
11145
11146
11147/**
11148 * Opcode 0xff /2.
11149 * @param bRm The RM byte.
11150 */
11151FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
11152{
11153 IEMOP_MNEMONIC("calln Ev");
11154 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11155 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11156
11157 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11158 {
11159 /* The new RIP is taken from a register. */
11160 switch (pIemCpu->enmEffOpSize)
11161 {
11162 case IEMMODE_16BIT:
11163 IEM_MC_BEGIN(1, 0);
11164 IEM_MC_ARG(uint16_t, u16Target, 0);
11165 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11166 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11167 IEM_MC_END()
11168 return VINF_SUCCESS;
11169
11170 case IEMMODE_32BIT:
11171 IEM_MC_BEGIN(1, 0);
11172 IEM_MC_ARG(uint32_t, u32Target, 0);
11173 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11174 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11175 IEM_MC_END()
11176 return VINF_SUCCESS;
11177
11178 case IEMMODE_64BIT:
11179 IEM_MC_BEGIN(1, 0);
11180 IEM_MC_ARG(uint64_t, u64Target, 0);
11181 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11182 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11183 IEM_MC_END()
11184 return VINF_SUCCESS;
11185
11186 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11187 }
11188 }
11189 else
11190 {
11191 /* The new RIP is taken from a register. */
11192 switch (pIemCpu->enmEffOpSize)
11193 {
11194 case IEMMODE_16BIT:
11195 IEM_MC_BEGIN(1, 1);
11196 IEM_MC_ARG(uint16_t, u16Target, 0);
11197 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11198 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11199 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11200 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11201 IEM_MC_END()
11202 return VINF_SUCCESS;
11203
11204 case IEMMODE_32BIT:
11205 IEM_MC_BEGIN(1, 1);
11206 IEM_MC_ARG(uint32_t, u32Target, 0);
11207 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11208 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11209 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11210 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11211 IEM_MC_END()
11212 return VINF_SUCCESS;
11213
11214 case IEMMODE_64BIT:
11215 IEM_MC_BEGIN(1, 1);
11216 IEM_MC_ARG(uint64_t, u64Target, 0);
11217 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11218 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11219 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11220 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11221 IEM_MC_END()
11222 return VINF_SUCCESS;
11223
11224 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11225 }
11226 }
11227}
11228
11229
11230/**
11231 * Opcode 0xff /3.
11232 * @param bRm The RM byte.
11233 */
11234FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
11235{
11236 IEMOP_MNEMONIC("callf Ep");
11237 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11238
11239 /* Registers? How?? */
11240 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11241 {
11242 /** @todo How the heck does a 'callf eax' work? Probably just have to
11243 * search the docs... */
11244 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11245 }
11246
11247 /* Far pointer loaded from memory. */
11248 switch (pIemCpu->enmEffOpSize)
11249 {
11250 case IEMMODE_16BIT:
11251 IEM_MC_BEGIN(3, 1);
11252 IEM_MC_ARG(uint16_t, u16Sel, 0);
11253 IEM_MC_ARG(uint16_t, offSeg, 1);
11254 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11255 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11256 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11257 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11258 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11259 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11260 IEM_MC_END();
11261 return VINF_SUCCESS;
11262
11263 case IEMMODE_32BIT:
11264 IEM_MC_BEGIN(3, 1);
11265 IEM_MC_ARG(uint16_t, u16Sel, 0);
11266 IEM_MC_ARG(uint32_t, offSeg, 1);
11267 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11268 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11269 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11270 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11271 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11272 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11273 IEM_MC_END();
11274 return VINF_SUCCESS;
11275
11276 case IEMMODE_64BIT:
11277 IEM_MC_BEGIN(3, 1);
11278 IEM_MC_ARG(uint16_t, u16Sel, 0);
11279 IEM_MC_ARG(uint64_t, offSeg, 1);
11280 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11281 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11282 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11283 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11284 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11285 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11286 IEM_MC_END();
11287 return VINF_SUCCESS;
11288
11289 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11290 }
11291}
11292
11293
11294/**
11295 * Opcode 0xff /4.
11296 * @param bRm The RM byte.
11297 */
11298FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
11299{
11300 IEMOP_MNEMONIC("callf Ep");
11301 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11302 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11303
11304 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11305 {
11306 /* The new RIP is taken from a register. */
11307 switch (pIemCpu->enmEffOpSize)
11308 {
11309 case IEMMODE_16BIT:
11310 IEM_MC_BEGIN(0, 1);
11311 IEM_MC_LOCAL(uint16_t, u16Target);
11312 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11313 IEM_MC_SET_RIP_U16(u16Target);
11314 IEM_MC_END()
11315 return VINF_SUCCESS;
11316
11317 case IEMMODE_32BIT:
11318 IEM_MC_BEGIN(0, 1);
11319 IEM_MC_LOCAL(uint32_t, u32Target);
11320 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11321 IEM_MC_SET_RIP_U32(u32Target);
11322 IEM_MC_END()
11323 return VINF_SUCCESS;
11324
11325 case IEMMODE_64BIT:
11326 IEM_MC_BEGIN(0, 1);
11327 IEM_MC_LOCAL(uint64_t, u64Target);
11328 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11329 IEM_MC_SET_RIP_U64(u64Target);
11330 IEM_MC_END()
11331 return VINF_SUCCESS;
11332
11333 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11334 }
11335 }
11336 else
11337 {
11338 /* The new RIP is taken from a register. */
11339 switch (pIemCpu->enmEffOpSize)
11340 {
11341 case IEMMODE_16BIT:
11342 IEM_MC_BEGIN(0, 2);
11343 IEM_MC_LOCAL(uint16_t, u16Target);
11344 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11345 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11346 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11347 IEM_MC_SET_RIP_U16(u16Target);
11348 IEM_MC_END()
11349 return VINF_SUCCESS;
11350
11351 case IEMMODE_32BIT:
11352 IEM_MC_BEGIN(0, 2);
11353 IEM_MC_LOCAL(uint32_t, u32Target);
11354 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11355 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11356 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11357 IEM_MC_SET_RIP_U32(u32Target);
11358 IEM_MC_END()
11359 return VINF_SUCCESS;
11360
11361 case IEMMODE_64BIT:
11362 IEM_MC_BEGIN(0, 2);
11363 IEM_MC_LOCAL(uint32_t, u32Target);
11364 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11365 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11366 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11367 IEM_MC_SET_RIP_U32(u32Target);
11368 IEM_MC_END()
11369 return VINF_SUCCESS;
11370
11371 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11372 }
11373 }
11374}
11375
11376
11377/**
11378 * Opcode 0xff /5.
11379 * @param bRm The RM byte.
11380 */
11381FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
11382{
11383 IEMOP_MNEMONIC("jmp Ap");
11384 IEMOP_HLP_NO_64BIT();
11385 /** @todo could share all the decoding with iemOp_Grp5_callf_Ep. */
11386
11387 /* Decode the far pointer address and pass it on to the far call C
11388 implementation. */
11389 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11390 {
11391 /** @todo How the heck does a 'callf eax' work? Probably just have to
11392 * search the docs... */
11393 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11394 }
11395
11396 /* Far pointer loaded from memory. */
11397 switch (pIemCpu->enmEffOpSize)
11398 {
11399 case IEMMODE_16BIT:
11400 IEM_MC_BEGIN(3, 1);
11401 IEM_MC_ARG(uint16_t, u16Sel, 0);
11402 IEM_MC_ARG(uint16_t, offSeg, 1);
11403 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11404 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11405 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11406 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11407 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11408 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11409 IEM_MC_END();
11410 return VINF_SUCCESS;
11411
11412 case IEMMODE_32BIT:
11413 IEM_MC_BEGIN(3, 1);
11414 IEM_MC_ARG(uint16_t, u16Sel, 0);
11415 IEM_MC_ARG(uint32_t, offSeg, 1);
11416 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11417 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11418 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11419 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11420 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11421 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11422 IEM_MC_END();
11423 return VINF_SUCCESS;
11424
11425 case IEMMODE_64BIT:
11426 IEM_MC_BEGIN(3, 1);
11427 IEM_MC_ARG(uint16_t, u16Sel, 0);
11428 IEM_MC_ARG(uint64_t, offSeg, 1);
11429 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11430 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11431 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11432 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11433 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11434 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11435 IEM_MC_END();
11436 return VINF_SUCCESS;
11437
11438 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11439 }
11440}
11441
11442
11443/**
11444 * Opcode 0xff /6.
11445 * @param bRm The RM byte.
11446 */
11447FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
11448{
11449 IEMOP_MNEMONIC("push Ev");
11450 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11451
11452 /* Registers are handled by a common worker. */
11453 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11454 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11455
11456 /* Memory we do here. */
11457 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11458 switch (pIemCpu->enmEffOpSize)
11459 {
11460 case IEMMODE_16BIT:
11461 IEM_MC_BEGIN(0, 2);
11462 IEM_MC_LOCAL(uint16_t, u16Src);
11463 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11464 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11465 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11466 IEM_MC_PUSH_U16(u16Src);
11467 IEM_MC_ADVANCE_RIP();
11468 IEM_MC_END();
11469 return VINF_SUCCESS;
11470
11471 case IEMMODE_32BIT:
11472 IEM_MC_BEGIN(0, 2);
11473 IEM_MC_LOCAL(uint32_t, u32Src);
11474 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11475 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11476 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11477 IEM_MC_PUSH_U32(u32Src);
11478 IEM_MC_ADVANCE_RIP();
11479 IEM_MC_END();
11480 return VINF_SUCCESS;
11481
11482 case IEMMODE_64BIT:
11483 IEM_MC_BEGIN(0, 2);
11484 IEM_MC_LOCAL(uint64_t, u64Src);
11485 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11486 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11487 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11488 IEM_MC_PUSH_U64(u64Src);
11489 IEM_MC_ADVANCE_RIP();
11490 IEM_MC_END();
11491 return VINF_SUCCESS;
11492 }
11493 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11494}
11495
11496
11497/** Opcode 0xff. */
11498FNIEMOP_DEF(iemOp_Grp5)
11499{
11500 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11501 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11502 {
11503 case 0:
11504 IEMOP_MNEMONIC("inc Ev");
11505 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
11506 case 1:
11507 IEMOP_MNEMONIC("dec Ev");
11508 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
11509 case 2:
11510 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
11511 case 3:
11512 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
11513 case 4:
11514 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
11515 case 5:
11516 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
11517 case 6:
11518 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
11519 case 7:
11520 IEMOP_MNEMONIC("grp5-ud");
11521 return IEMOP_RAISE_INVALID_OPCODE();
11522 }
11523 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
11524}
11525
11526
11527
11528const PFNIEMOP g_apfnOneByteMap[256] =
11529{
11530 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
11531 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
11532 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
11533 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
11534 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
11535 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
11536 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
11537 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
11538 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
11539 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
11540 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
11541 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
11542 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
11543 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
11544 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
11545 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
11546 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
11547 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
11548 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
11549 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
11550 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
11551 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
11552 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
11553 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
11554 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
11555 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
11556 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
11557 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
11558 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
11559 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
11560 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
11561 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
11562 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
11563 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
11564 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
11565 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_pop_Ev,
11566 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
11567 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
11568 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
11569 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
11570 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
11571 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
11572 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
11573 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
11574 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
11575 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
11576 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
11577 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
11578 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
11579 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
11580 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
11581 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
11582 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
11583 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
11584 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
11585 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
11586 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
11587 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
11588 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
11589 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
11590 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
11591 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
11592 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
11593 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
11594};
11595
11596
11597/** @} */
11598
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette