VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 39402

Last change on this file since 39402 was 39127, checked in by vboxsync, 13 years ago

IEM: Adding GET_NEXT_U16_ZX_U32/64 and GET_NEXT_U32_ZX_U64.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 395.9 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 39127 2011-10-27 11:42:34Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 IEM_MC_ADVANCE_RIP();
133 IEM_MC_END();
134 break;
135
136 case IEMMODE_64BIT:
137 IEM_MC_BEGIN(3, 0);
138 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
139 IEM_MC_ARG(uint64_t, u64Src, 1);
140 IEM_MC_ARG(uint32_t *, pEFlags, 2);
141
142 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
143 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
144 IEM_MC_REF_EFLAGS(pEFlags);
145 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
146
147 IEM_MC_ADVANCE_RIP();
148 IEM_MC_END();
149 break;
150 }
151 }
152 else
153 {
154 /*
155 * We're accessing memory.
156 * Note! We're putting the eflags on the stack here so we can commit them
157 * after the memory.
158 */
159 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
160 switch (pIemCpu->enmEffOpSize)
161 {
162 case IEMMODE_16BIT:
163 IEM_MC_BEGIN(3, 2);
164 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
165 IEM_MC_ARG(uint16_t, u16Src, 1);
166 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
167 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
168
169 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
170 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
171 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
172 IEM_MC_FETCH_EFLAGS(EFlags);
173 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
174 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
175 else
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
177
178 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
179 IEM_MC_COMMIT_EFLAGS(EFlags);
180 IEM_MC_ADVANCE_RIP();
181 IEM_MC_END();
182 break;
183
184 case IEMMODE_32BIT:
185 IEM_MC_BEGIN(3, 2);
186 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
187 IEM_MC_ARG(uint32_t, u32Src, 1);
188 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
189 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
190
191 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
192 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
193 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
194 IEM_MC_FETCH_EFLAGS(EFlags);
195 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
196 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
197 else
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
199
200 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
201 IEM_MC_COMMIT_EFLAGS(EFlags);
202 IEM_MC_ADVANCE_RIP();
203 IEM_MC_END();
204 break;
205
206 case IEMMODE_64BIT:
207 IEM_MC_BEGIN(3, 2);
208 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
209 IEM_MC_ARG(uint64_t, u64Src, 1);
210 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
211 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
212
213 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
214 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
215 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
216 IEM_MC_FETCH_EFLAGS(EFlags);
217 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
218 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
219 else
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
221
222 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
223 IEM_MC_COMMIT_EFLAGS(EFlags);
224 IEM_MC_ADVANCE_RIP();
225 IEM_MC_END();
226 break;
227 }
228 }
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
235 * the destination.
236 *
237 * @param pImpl Pointer to the instruction implementation (assembly).
238 */
239FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
240{
241 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
242 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
243
244 /*
245 * If rm is denoting a register, no more instruction bytes.
246 */
247 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
248 {
249 IEM_MC_BEGIN(3, 0);
250 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
251 IEM_MC_ARG(uint8_t, u8Src, 1);
252 IEM_MC_ARG(uint32_t *, pEFlags, 2);
253
254 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
255 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
256 IEM_MC_REF_EFLAGS(pEFlags);
257 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
258
259 IEM_MC_ADVANCE_RIP();
260 IEM_MC_END();
261 }
262 else
263 {
264 /*
265 * We're accessing memory.
266 */
267 IEM_MC_BEGIN(3, 1);
268 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
269 IEM_MC_ARG(uint8_t, u8Src, 1);
270 IEM_MC_ARG(uint32_t *, pEFlags, 2);
271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
272
273 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
274 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
275 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
276 IEM_MC_REF_EFLAGS(pEFlags);
277 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
278
279 IEM_MC_ADVANCE_RIP();
280 IEM_MC_END();
281 }
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
288 * register as the destination.
289 *
290 * @param pImpl Pointer to the instruction implementation (assembly).
291 */
292FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
293{
294 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
295 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
296
297 /*
298 * If rm is denoting a register, no more instruction bytes.
299 */
300 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
301 {
302 switch (pIemCpu->enmEffOpSize)
303 {
304 case IEMMODE_16BIT:
305 IEM_MC_BEGIN(3, 0);
306 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
307 IEM_MC_ARG(uint16_t, u16Src, 1);
308 IEM_MC_ARG(uint32_t *, pEFlags, 2);
309
310 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
311 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
312 IEM_MC_REF_EFLAGS(pEFlags);
313 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
314
315 IEM_MC_ADVANCE_RIP();
316 IEM_MC_END();
317 break;
318
319 case IEMMODE_32BIT:
320 IEM_MC_BEGIN(3, 0);
321 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
322 IEM_MC_ARG(uint32_t, u32Src, 1);
323 IEM_MC_ARG(uint32_t *, pEFlags, 2);
324
325 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
326 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
327 IEM_MC_REF_EFLAGS(pEFlags);
328 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
329
330 IEM_MC_ADVANCE_RIP();
331 IEM_MC_END();
332 break;
333
334 case IEMMODE_64BIT:
335 IEM_MC_BEGIN(3, 0);
336 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
337 IEM_MC_ARG(uint64_t, u64Src, 1);
338 IEM_MC_ARG(uint32_t *, pEFlags, 2);
339
340 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
341 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
342 IEM_MC_REF_EFLAGS(pEFlags);
343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
344
345 IEM_MC_ADVANCE_RIP();
346 IEM_MC_END();
347 break;
348 }
349 }
350 else
351 {
352 /*
353 * We're accessing memory.
354 */
355 switch (pIemCpu->enmEffOpSize)
356 {
357 case IEMMODE_16BIT:
358 IEM_MC_BEGIN(3, 1);
359 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
360 IEM_MC_ARG(uint16_t, u16Src, 1);
361 IEM_MC_ARG(uint32_t *, pEFlags, 2);
362 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
363
364 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
365 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
366 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
367 IEM_MC_REF_EFLAGS(pEFlags);
368 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
369
370 IEM_MC_ADVANCE_RIP();
371 IEM_MC_END();
372 break;
373
374 case IEMMODE_32BIT:
375 IEM_MC_BEGIN(3, 1);
376 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
377 IEM_MC_ARG(uint32_t, u32Src, 1);
378 IEM_MC_ARG(uint32_t *, pEFlags, 2);
379 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
380
381 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
382 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
383 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
384 IEM_MC_REF_EFLAGS(pEFlags);
385 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
386
387 IEM_MC_ADVANCE_RIP();
388 IEM_MC_END();
389 break;
390
391 case IEMMODE_64BIT:
392 IEM_MC_BEGIN(3, 1);
393 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
394 IEM_MC_ARG(uint64_t, u64Src, 1);
395 IEM_MC_ARG(uint32_t *, pEFlags, 2);
396 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
397
398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
399 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
400 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
401 IEM_MC_REF_EFLAGS(pEFlags);
402 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
403
404 IEM_MC_ADVANCE_RIP();
405 IEM_MC_END();
406 break;
407 }
408 }
409 return VINF_SUCCESS;
410}
411
412
413/**
414 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
415 * a byte immediate.
416 *
417 * @param pImpl Pointer to the instruction implementation (assembly).
418 */
419FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
420{
421 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
422 IEMOP_HLP_NO_LOCK_PREFIX();
423
424 IEM_MC_BEGIN(3, 0);
425 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
426 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
427 IEM_MC_ARG(uint32_t *, pEFlags, 2);
428
429 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
430 IEM_MC_REF_EFLAGS(pEFlags);
431 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
432
433 IEM_MC_ADVANCE_RIP();
434 IEM_MC_END();
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Common worker for instructions like ADD, AND, OR, ++ with working on
441 * AX/EAX/RAX with a word/dword immediate.
442 *
443 * @param pImpl Pointer to the instruction implementation (assembly).
444 */
445FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
446{
447 switch (pIemCpu->enmEffOpSize)
448 {
449 case IEMMODE_16BIT:
450 {
451 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
452 IEMOP_HLP_NO_LOCK_PREFIX();
453
454 IEM_MC_BEGIN(3, 0);
455 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
456 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
457 IEM_MC_ARG(uint32_t *, pEFlags, 2);
458
459 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
460 IEM_MC_REF_EFLAGS(pEFlags);
461 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
462
463 IEM_MC_ADVANCE_RIP();
464 IEM_MC_END();
465 return VINF_SUCCESS;
466 }
467
468 case IEMMODE_32BIT:
469 {
470 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
471 IEMOP_HLP_NO_LOCK_PREFIX();
472
473 IEM_MC_BEGIN(3, 0);
474 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
475 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
476 IEM_MC_ARG(uint32_t *, pEFlags, 2);
477
478 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
479 IEM_MC_REF_EFLAGS(pEFlags);
480 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
481
482 IEM_MC_ADVANCE_RIP();
483 IEM_MC_END();
484 return VINF_SUCCESS;
485 }
486
487 case IEMMODE_64BIT:
488 {
489 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
490 IEMOP_HLP_NO_LOCK_PREFIX();
491
492 IEM_MC_BEGIN(3, 0);
493 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
494 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
495 IEM_MC_ARG(uint32_t *, pEFlags, 2);
496
497 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
498 IEM_MC_REF_EFLAGS(pEFlags);
499 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
500
501 IEM_MC_ADVANCE_RIP();
502 IEM_MC_END();
503 return VINF_SUCCESS;
504 }
505
506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
507 }
508}
509
510
511/** Opcodes 0xf1, 0xd6. */
512FNIEMOP_DEF(iemOp_Invalid)
513{
514 IEMOP_MNEMONIC("Invalid");
515 return IEMOP_RAISE_INVALID_OPCODE();
516}
517
518
519
520/** @name ..... opcodes.
521 *
522 * @{
523 */
524
525/** @} */
526
527
528/** @name Two byte opcodes (first byte 0x0f).
529 *
530 * @{
531 */
532
533/** Opcode 0x0f 0x00 /0. */
534FNIEMOP_STUB_1(iemOp_Grp6_sldt, uint8_t, bRm);
535
536
537/** Opcode 0x0f 0x00 /1. */
538FNIEMOP_STUB_1(iemOp_Grp6_str, uint8_t, bRm);
539
540
541/** Opcode 0x0f 0x00 /2. */
542FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
543{
544 IEMOP_HLP_NO_LOCK_PREFIX();
545 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
546 {
547 IEM_MC_BEGIN(1, 0);
548 IEM_MC_ARG(uint16_t, u16Sel, 0);
549 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
550 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
551 IEM_MC_END();
552 }
553 else
554 {
555 IEM_MC_BEGIN(1, 1);
556 IEM_MC_ARG(uint16_t, u16Sel, 0);
557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
558 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
560 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
561 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
562 IEM_MC_END();
563 }
564 return VINF_SUCCESS;
565}
566
567
568/** Opcode 0x0f 0x00 /3. */
569FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
570{
571 IEMOP_HLP_NO_LOCK_PREFIX();
572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
573 {
574 IEM_MC_BEGIN(1, 0);
575 IEM_MC_ARG(uint16_t, u16Sel, 0);
576 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
577 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
578 IEM_MC_END();
579 }
580 else
581 {
582 IEM_MC_BEGIN(1, 1);
583 IEM_MC_ARG(uint16_t, u16Sel, 0);
584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
585 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
587 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
588 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
589 IEM_MC_END();
590 }
591 return VINF_SUCCESS;
592}
593
594
595/** Opcode 0x0f 0x00 /4. */
596FNIEMOP_STUB_1(iemOp_Grp6_verr, uint8_t, bRm);
597
598
599/** Opcode 0x0f 0x00 /5. */
600FNIEMOP_STUB_1(iemOp_Grp6_verw, uint8_t, bRm);
601
602
603/** Opcode 0x0f 0x00. */
604FNIEMOP_DEF(iemOp_Grp6)
605{
606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
607 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
608 {
609 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
610 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
611 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
612 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
613 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
614 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
615 case 6: return IEMOP_RAISE_INVALID_OPCODE();
616 case 7: return IEMOP_RAISE_INVALID_OPCODE();
617 IEM_NOT_REACHED_DEFAULT_CASE_RET();
618 }
619
620}
621
622
623/** Opcode 0x0f 0x01 /0. */
624FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
625{
626 NOREF(pIemCpu); NOREF(bRm);
627 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
628}
629
630
631/** Opcode 0x0f 0x01 /0. */
632FNIEMOP_DEF(iemOp_Grp7_vmcall)
633{
634 AssertFailed();
635 return IEMOP_RAISE_INVALID_OPCODE();
636}
637
638
639/** Opcode 0x0f 0x01 /0. */
640FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
641{
642 AssertFailed();
643 return IEMOP_RAISE_INVALID_OPCODE();
644}
645
646
647/** Opcode 0x0f 0x01 /0. */
648FNIEMOP_DEF(iemOp_Grp7_vmresume)
649{
650 AssertFailed();
651 return IEMOP_RAISE_INVALID_OPCODE();
652}
653
654
655/** Opcode 0x0f 0x01 /0. */
656FNIEMOP_DEF(iemOp_Grp7_vmxoff)
657{
658 AssertFailed();
659 return IEMOP_RAISE_INVALID_OPCODE();
660}
661
662
663/** Opcode 0x0f 0x01 /1. */
664FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
665{
666 NOREF(pIemCpu); NOREF(bRm);
667 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
668}
669
670
671/** Opcode 0x0f 0x01 /1. */
672FNIEMOP_DEF(iemOp_Grp7_monitor)
673{
674 NOREF(pIemCpu);
675 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
676}
677
678
679/** Opcode 0x0f 0x01 /1. */
680FNIEMOP_DEF(iemOp_Grp7_mwait)
681{
682 NOREF(pIemCpu);
683 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
684}
685
686
687/** Opcode 0x0f 0x01 /2. */
688FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
689{
690 IEMOP_HLP_NO_LOCK_PREFIX();
691
692 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
693 ? IEMMODE_64BIT
694 : pIemCpu->enmEffOpSize;
695 IEM_MC_BEGIN(3, 1);
696 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
697 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
698 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
699 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
700 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
701 IEM_MC_END();
702 return VINF_SUCCESS;
703}
704
705
706/** Opcode 0x0f 0x01 /2. */
707FNIEMOP_DEF(iemOp_Grp7_xgetbv)
708{
709 AssertFailed();
710 return IEMOP_RAISE_INVALID_OPCODE();
711}
712
713
714/** Opcode 0x0f 0x01 /2. */
715FNIEMOP_DEF(iemOp_Grp7_xsetbv)
716{
717 AssertFailed();
718 return IEMOP_RAISE_INVALID_OPCODE();
719}
720
721
722/** Opcode 0x0f 0x01 /3. */
723FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
724{
725 IEMOP_HLP_NO_LOCK_PREFIX();
726
727 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
728 ? IEMMODE_64BIT
729 : pIemCpu->enmEffOpSize;
730 IEM_MC_BEGIN(3, 1);
731 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/pIemCpu->iEffSeg, 0);
732 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
733 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
734 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
735 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
736 IEM_MC_END();
737 return VINF_SUCCESS;
738}
739
740
741/** Opcode 0x0f 0x01 /4. */
742FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
743{
744 IEMOP_HLP_NO_LOCK_PREFIX();
745 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
746 {
747 switch (pIemCpu->enmEffOpSize)
748 {
749 case IEMMODE_16BIT:
750 IEM_MC_BEGIN(0, 1);
751 IEM_MC_LOCAL(uint16_t, u16Tmp);
752 IEM_MC_FETCH_CR0_U16(u16Tmp);
753 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
754 IEM_MC_ADVANCE_RIP();
755 IEM_MC_END();
756 return VINF_SUCCESS;
757
758 case IEMMODE_32BIT:
759 IEM_MC_BEGIN(0, 1);
760 IEM_MC_LOCAL(uint32_t, u32Tmp);
761 IEM_MC_FETCH_CR0_U32(u32Tmp);
762 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
763 IEM_MC_ADVANCE_RIP();
764 IEM_MC_END();
765 return VINF_SUCCESS;
766
767 case IEMMODE_64BIT:
768 IEM_MC_BEGIN(0, 1);
769 IEM_MC_LOCAL(uint64_t, u64Tmp);
770 IEM_MC_FETCH_CR0_U64(u64Tmp);
771 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
772 IEM_MC_ADVANCE_RIP();
773 IEM_MC_END();
774 return VINF_SUCCESS;
775
776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
777 }
778 }
779 else
780 {
781 /* Ignore operand size here, memory refs are always 16-bit. */
782 IEM_MC_BEGIN(0, 2);
783 IEM_MC_LOCAL(uint16_t, u16Tmp);
784 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
785 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
786 IEM_MC_FETCH_CR0_U16(u16Tmp);
787 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
788 IEM_MC_ADVANCE_RIP();
789 IEM_MC_END();
790 return VINF_SUCCESS;
791 }
792}
793
794
795/** Opcode 0x0f 0x01 /6. */
796FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
797{
798 /* The operand size is effectively ignored, all is 16-bit and only the
799 lower 3-bits are used. */
800 IEMOP_HLP_NO_LOCK_PREFIX();
801 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
802 {
803 IEM_MC_BEGIN(1, 0);
804 IEM_MC_ARG(uint16_t, u16Tmp, 0);
805 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
806 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
807 IEM_MC_END();
808 }
809 else
810 {
811 IEM_MC_BEGIN(1, 1);
812 IEM_MC_ARG(uint16_t, u16Tmp, 0);
813 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
814 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
815 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
816 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
817 IEM_MC_END();
818 }
819 return VINF_SUCCESS;
820}
821
822
823/** Opcode 0x0f 0x01 /7. */
824FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
825{
826 NOREF(pIemCpu); NOREF(bRm);
827 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
828}
829
830
831/** Opcode 0x0f 0x01 /7. */
832FNIEMOP_DEF(iemOp_Grp7_swapgs)
833{
834 NOREF(pIemCpu);
835 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
836}
837
838
839/** Opcode 0x0f 0x01 /7. */
840FNIEMOP_DEF(iemOp_Grp7_rdtscp)
841{
842 NOREF(pIemCpu);
843 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
844}
845
846
847/** Opcode 0x0f 0x01. */
848FNIEMOP_DEF(iemOp_Grp7)
849{
850 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
851 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
852 {
853 case 0:
854 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
855 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
856 switch (bRm & X86_MODRM_RM_MASK)
857 {
858 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
859 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
860 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
861 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
862 }
863 return IEMOP_RAISE_INVALID_OPCODE();
864
865 case 1:
866 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
867 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
868 switch (bRm & X86_MODRM_RM_MASK)
869 {
870 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
871 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
872 }
873 return IEMOP_RAISE_INVALID_OPCODE();
874
875 case 2:
876 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
877 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
878 switch (bRm & X86_MODRM_RM_MASK)
879 {
880 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
881 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
882 }
883 return IEMOP_RAISE_INVALID_OPCODE();
884
885 case 3:
886 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
887 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
888 return IEMOP_RAISE_INVALID_OPCODE();
889
890 case 4:
891 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
892
893 case 5:
894 return IEMOP_RAISE_INVALID_OPCODE();
895
896 case 6:
897 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
898
899 case 7:
900 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
901 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
902 switch (bRm & X86_MODRM_RM_MASK)
903 {
904 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
905 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
906 }
907 return IEMOP_RAISE_INVALID_OPCODE();
908
909 IEM_NOT_REACHED_DEFAULT_CASE_RET();
910 }
911}
912
913
914/** Opcode 0x0f 0x02. */
915FNIEMOP_STUB(iemOp_lar_Gv_Ew);
916/** Opcode 0x0f 0x03. */
917FNIEMOP_STUB(iemOp_lsl_Gv_Ew);
918/** Opcode 0x0f 0x04. */
919FNIEMOP_STUB(iemOp_syscall);
920
921
922/** Opcode 0x0f 0x05. */
923FNIEMOP_DEF(iemOp_clts)
924{
925 IEMOP_MNEMONIC("clts");
926 IEMOP_HLP_NO_LOCK_PREFIX();
927 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
928}
929
930
931/** Opcode 0x0f 0x06. */
932FNIEMOP_STUB(iemOp_sysret);
933/** Opcode 0x0f 0x08. */
934FNIEMOP_STUB(iemOp_invd);
935/** Opcode 0x0f 0x09. */
936FNIEMOP_STUB(iemOp_wbinvd);
937/** Opcode 0x0f 0x0b. */
938FNIEMOP_STUB(iemOp_ud2);
939/** Opcode 0x0f 0x0d. */
940FNIEMOP_STUB(iemOp_nop_Ev_prefetch);
941/** Opcode 0x0f 0x0e. */
942FNIEMOP_STUB(iemOp_femms);
943/** Opcode 0x0f 0x0f. */
944FNIEMOP_STUB(iemOp_3Dnow);
945/** Opcode 0x0f 0x10. */
946FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
947/** Opcode 0x0f 0x11. */
948FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
949/** Opcode 0x0f 0x12. */
950FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq);
951/** Opcode 0x0f 0x13. */
952FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq);
953/** Opcode 0x0f 0x14. */
954FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
955/** Opcode 0x0f 0x15. */
956FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
957/** Opcode 0x0f 0x16. */
958FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq);
959/** Opcode 0x0f 0x17. */
960FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq);
961/** Opcode 0x0f 0x18. */
962FNIEMOP_STUB(iemOp_prefetch_Grp16);
963
964
965/** Opcode 0x0f 0x20. */
966FNIEMOP_DEF(iemOp_mov_Rd_Cd)
967{
968 /* mod is ignored, as is operand size overrides. */
969 IEMOP_MNEMONIC("mov Rd,Cd");
970 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
971 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
972 else
973 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
974
975 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
976 * before the privilege level violation (\#GP). */
977 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
978 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
979 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
980 {
981 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
982 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
983 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
984 iCrReg |= 8;
985 }
986 switch (iCrReg)
987 {
988 case 0: case 2: case 3: case 4: case 8:
989 break;
990 default:
991 return IEMOP_RAISE_INVALID_OPCODE();
992 }
993
994 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
995}
996
997
998/** Opcode 0x0f 0x21. */
999FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1000{
1001 IEMOP_MNEMONIC("mov Rd,Dd");
1002 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1003 IEMOP_HLP_NO_LOCK_PREFIX();
1004 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1005 return IEMOP_RAISE_INVALID_OPCODE();
1006 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1007 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1008 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1009}
1010
1011
1012/** Opcode 0x0f 0x22. */
1013FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1014{
1015 /* mod is ignored, as is operand size overrides. */
1016 IEMOP_MNEMONIC("mov Cd,Rd");
1017 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1018 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1019 else
1020 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1021
1022 /** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
1023 * before the privilege level violation (\#GP). */
1024 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1025 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1026 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1027 {
1028 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1029 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L))
1030 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
1031 iCrReg |= 8;
1032 }
1033 switch (iCrReg)
1034 {
1035 case 0: case 2: case 3: case 4: case 8:
1036 break;
1037 default:
1038 return IEMOP_RAISE_INVALID_OPCODE();
1039 }
1040
1041 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1042}
1043
1044
1045/** Opcode 0x0f 0x23. */
1046FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1047{
1048 IEMOP_MNEMONIC("mov Dd,Rd");
1049 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1050 IEMOP_HLP_NO_LOCK_PREFIX();
1051 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1052 return IEMOP_RAISE_INVALID_OPCODE();
1053 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1054 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1055 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1056}
1057
1058
1059/** Opcode 0x0f 0x24. */
1060FNIEMOP_DEF(iemOp_mov_Rd_Td)
1061{
1062 IEMOP_MNEMONIC("mov Rd,Td");
1063/** @todo Is the invalid opcode raise before parsing any R/M byte? */
1064 return IEMOP_RAISE_INVALID_OPCODE();
1065}
1066
1067
1068
1069/** Opcode 0x0f 0x26. */
1070FNIEMOP_DEF(iemOp_mov_Td_Rd)
1071{
1072 IEMOP_MNEMONIC("mov Td,Rd");
1073 return IEMOP_RAISE_INVALID_OPCODE();
1074}
1075
1076
1077/** Opcode 0x0f 0x28. */
1078FNIEMOP_STUB(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd);
1079/** Opcode 0x0f 0x29. */
1080FNIEMOP_STUB(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd);
1081/** Opcode 0x0f 0x2a. */
1082FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey);
1083/** Opcode 0x0f 0x2b. */
1084FNIEMOP_STUB(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd);
1085/** Opcode 0x0f 0x2c. */
1086FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd);
1087/** Opcode 0x0f 0x2d. */
1088FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1089/** Opcode 0x0f 0x2e. */
1090FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd);
1091/** Opcode 0x0f 0x2f. */
1092FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1093/** Opcode 0x0f 0x30. */
1094FNIEMOP_STUB(iemOp_wrmsr);
1095
1096
1097/** Opcode 0x0f 0x31. */
1098FNIEMOP_DEF(iemOp_rdtsc)
1099{
1100 IEMOP_MNEMONIC("rdtsc");
1101 IEMOP_HLP_NO_LOCK_PREFIX();
1102 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1103}
1104
1105
1106/** Opcode 0x0f 0x33. */
1107FNIEMOP_STUB(iemOp_rdmsr);
1108/** Opcode 0x0f 0x34. */
1109FNIEMOP_STUB(iemOp_rdpmc);
1110/** Opcode 0x0f 0x34. */
1111FNIEMOP_STUB(iemOp_sysenter);
1112/** Opcode 0x0f 0x35. */
1113FNIEMOP_STUB(iemOp_sysexit);
1114/** Opcode 0x0f 0x37. */
1115FNIEMOP_STUB(iemOp_getsec);
1116/** Opcode 0x0f 0x38. */
1117FNIEMOP_STUB(iemOp_3byte_Esc_A4);
1118/** Opcode 0x0f 0x39. */
1119FNIEMOP_STUB(iemOp_3byte_Esc_A5);
1120/** Opcode 0x0f 0x3c (?). */
1121FNIEMOP_STUB(iemOp_movnti_Gv_Ev);
1122
1123/**
1124 * Implements a conditional move.
1125 *
1126 * Wish there was an obvious way to do this where we could share and reduce
1127 * code bloat.
1128 *
1129 * @param a_Cnd The conditional "microcode" operation.
1130 */
1131#define CMOV_X(a_Cnd) \
1132 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1133 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1134 { \
1135 switch (pIemCpu->enmEffOpSize) \
1136 { \
1137 case IEMMODE_16BIT: \
1138 IEM_MC_BEGIN(0, 1); \
1139 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1140 a_Cnd { \
1141 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1142 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1143 } IEM_MC_ENDIF(); \
1144 IEM_MC_ADVANCE_RIP(); \
1145 IEM_MC_END(); \
1146 return VINF_SUCCESS; \
1147 \
1148 case IEMMODE_32BIT: \
1149 IEM_MC_BEGIN(0, 1); \
1150 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1151 a_Cnd { \
1152 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1153 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1154 } IEM_MC_ELSE() { \
1155 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1156 } IEM_MC_ENDIF(); \
1157 IEM_MC_ADVANCE_RIP(); \
1158 IEM_MC_END(); \
1159 return VINF_SUCCESS; \
1160 \
1161 case IEMMODE_64BIT: \
1162 IEM_MC_BEGIN(0, 1); \
1163 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1164 a_Cnd { \
1165 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1166 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1167 } IEM_MC_ENDIF(); \
1168 IEM_MC_ADVANCE_RIP(); \
1169 IEM_MC_END(); \
1170 return VINF_SUCCESS; \
1171 \
1172 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1173 } \
1174 } \
1175 else \
1176 { \
1177 switch (pIemCpu->enmEffOpSize) \
1178 { \
1179 case IEMMODE_16BIT: \
1180 IEM_MC_BEGIN(0, 2); \
1181 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1182 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1183 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1184 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1185 a_Cnd { \
1186 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1187 } IEM_MC_ENDIF(); \
1188 IEM_MC_ADVANCE_RIP(); \
1189 IEM_MC_END(); \
1190 return VINF_SUCCESS; \
1191 \
1192 case IEMMODE_32BIT: \
1193 IEM_MC_BEGIN(0, 2); \
1194 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1195 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1196 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1197 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1198 a_Cnd { \
1199 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1200 } IEM_MC_ELSE() { \
1201 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1202 } IEM_MC_ENDIF(); \
1203 IEM_MC_ADVANCE_RIP(); \
1204 IEM_MC_END(); \
1205 return VINF_SUCCESS; \
1206 \
1207 case IEMMODE_64BIT: \
1208 IEM_MC_BEGIN(0, 2); \
1209 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1210 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1211 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm); \
1212 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1213 a_Cnd { \
1214 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1215 } IEM_MC_ENDIF(); \
1216 IEM_MC_ADVANCE_RIP(); \
1217 IEM_MC_END(); \
1218 return VINF_SUCCESS; \
1219 \
1220 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1221 } \
1222 } do {} while (0)
1223
1224
1225
1226/** Opcode 0x0f 0x40. */
1227FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1228{
1229 IEMOP_MNEMONIC("cmovo Gv,Ev");
1230 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1231}
1232
1233
1234/** Opcode 0x0f 0x41. */
1235FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1236{
1237 IEMOP_MNEMONIC("cmovno Gv,Ev");
1238 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
1239}
1240
1241
1242/** Opcode 0x0f 0x42. */
1243FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
1244{
1245 IEMOP_MNEMONIC("cmovc Gv,Ev");
1246 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
1247}
1248
1249
1250/** Opcode 0x0f 0x43. */
1251FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
1252{
1253 IEMOP_MNEMONIC("cmovnc Gv,Ev");
1254 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
1255}
1256
1257
1258/** Opcode 0x0f 0x44. */
1259FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
1260{
1261 IEMOP_MNEMONIC("cmove Gv,Ev");
1262 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
1263}
1264
1265
1266/** Opcode 0x0f 0x45. */
1267FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
1268{
1269 IEMOP_MNEMONIC("cmovne Gv,Ev");
1270 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
1271}
1272
1273
1274/** Opcode 0x0f 0x46. */
1275FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
1276{
1277 IEMOP_MNEMONIC("cmovbe Gv,Ev");
1278 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1279}
1280
1281
1282/** Opcode 0x0f 0x47. */
1283FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
1284{
1285 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
1286 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
1287}
1288
1289
1290/** Opcode 0x0f 0x48. */
1291FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
1292{
1293 IEMOP_MNEMONIC("cmovs Gv,Ev");
1294 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
1295}
1296
1297
1298/** Opcode 0x0f 0x49. */
1299FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
1300{
1301 IEMOP_MNEMONIC("cmovns Gv,Ev");
1302 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
1303}
1304
1305
1306/** Opcode 0x0f 0x4a. */
1307FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
1308{
1309 IEMOP_MNEMONIC("cmovp Gv,Ev");
1310 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
1311}
1312
1313
1314/** Opcode 0x0f 0x4b. */
1315FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
1316{
1317 IEMOP_MNEMONIC("cmovnp Gv,Ev");
1318 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
1319}
1320
1321
1322/** Opcode 0x0f 0x4c. */
1323FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
1324{
1325 IEMOP_MNEMONIC("cmovl Gv,Ev");
1326 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
1327}
1328
1329
1330/** Opcode 0x0f 0x4d. */
1331FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
1332{
1333 IEMOP_MNEMONIC("cmovnl Gv,Ev");
1334 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
1335}
1336
1337
1338/** Opcode 0x0f 0x4e. */
1339FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
1340{
1341 IEMOP_MNEMONIC("cmovle Gv,Ev");
1342 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1343}
1344
1345
1346/** Opcode 0x0f 0x4f. */
1347FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
1348{
1349 IEMOP_MNEMONIC("cmovnle Gv,Ev");
1350 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
1351}
1352
1353#undef CMOV_X
1354
1355/** Opcode 0x0f 0x50. */
1356FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
1357/** Opcode 0x0f 0x51. */
1358FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
1359/** Opcode 0x0f 0x52. */
1360FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
1361/** Opcode 0x0f 0x53. */
1362FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
1363/** Opcode 0x0f 0x54. */
1364FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
1365/** Opcode 0x0f 0x55. */
1366FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
1367/** Opcode 0x0f 0x56. */
1368FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
1369/** Opcode 0x0f 0x57. */
1370FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
1371/** Opcode 0x0f 0x58. */
1372FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd);
1373/** Opcode 0x0f 0x59. */
1374FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);
1375/** Opcode 0x0f 0x5a. */
1376FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
1377/** Opcode 0x0f 0x5b. */
1378FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
1379/** Opcode 0x0f 0x5c. */
1380FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
1381/** Opcode 0x0f 0x5d. */
1382FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
1383/** Opcode 0x0f 0x5e. */
1384FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
1385/** Opcode 0x0f 0x5f. */
1386FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
1387/** Opcode 0x0f 0x60. */
1388FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq);
1389/** Opcode 0x0f 0x61. */
1390FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq);
1391/** Opcode 0x0f 0x62. */
1392FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq);
1393/** Opcode 0x0f 0x63. */
1394FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
1395/** Opcode 0x0f 0x64. */
1396FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
1397/** Opcode 0x0f 0x65. */
1398FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
1399/** Opcode 0x0f 0x66. */
1400FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
1401/** Opcode 0x0f 0x67. */
1402FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
1403/** Opcode 0x0f 0x68. */
1404FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq);
1405/** Opcode 0x0f 0x69. */
1406FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq);
1407/** Opcode 0x0f 0x6a. */
1408FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq);
1409/** Opcode 0x0f 0x6b. */
1410FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
1411/** Opcode 0x0f 0x6c. */
1412FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq);
1413/** Opcode 0x0f 0x6d. */
1414FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq);
1415/** Opcode 0x0f 0x6e. */
1416FNIEMOP_STUB(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey);
1417/** Opcode 0x0f 0x6f. */
1418FNIEMOP_STUB(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq);
1419/** Opcode 0x0f 0x70. */
1420FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib);
1421/** Opcode 0x0f 0x71. */
1422FNIEMOP_STUB(iemOp_Grp12);
1423/** Opcode 0x0f 0x72. */
1424FNIEMOP_STUB(iemOp_Grp13);
1425/** Opcode 0x0f 0x73. */
1426FNIEMOP_STUB(iemOp_Grp14);
1427/** Opcode 0x0f 0x74. */
1428FNIEMOP_STUB(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq);
1429/** Opcode 0x0f 0x75. */
1430FNIEMOP_STUB(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq);
1431/** Opcode 0x0f 0x76. */
1432FNIEMOP_STUB(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq);
1433/** Opcode 0x0f 0x77. */
1434FNIEMOP_STUB(iemOp_emms);
1435/** Opcode 0x0f 0x78. */
1436FNIEMOP_STUB(iemOp_vmread);
1437/** Opcode 0x0f 0x79. */
1438FNIEMOP_STUB(iemOp_vmwrite);
1439/** Opcode 0x0f 0x7c. */
1440FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
1441/** Opcode 0x0f 0x7d. */
1442FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
1443/** Opcode 0x0f 0x7e. */
1444FNIEMOP_STUB(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq);
1445/** Opcode 0x0f 0x7f. */
1446FNIEMOP_STUB(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq);
1447
1448
1449/** Opcode 0x0f 0x80. */
1450FNIEMOP_DEF(iemOp_jo_Jv)
1451{
1452 IEMOP_MNEMONIC("jo Jv");
1453 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1454 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1455 {
1456 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1457 IEMOP_HLP_NO_LOCK_PREFIX();
1458
1459 IEM_MC_BEGIN(0, 0);
1460 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1461 IEM_MC_REL_JMP_S16(i16Imm);
1462 } IEM_MC_ELSE() {
1463 IEM_MC_ADVANCE_RIP();
1464 } IEM_MC_ENDIF();
1465 IEM_MC_END();
1466 }
1467 else
1468 {
1469 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1470 IEMOP_HLP_NO_LOCK_PREFIX();
1471
1472 IEM_MC_BEGIN(0, 0);
1473 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1474 IEM_MC_REL_JMP_S32(i32Imm);
1475 } IEM_MC_ELSE() {
1476 IEM_MC_ADVANCE_RIP();
1477 } IEM_MC_ENDIF();
1478 IEM_MC_END();
1479 }
1480 return VINF_SUCCESS;
1481}
1482
1483
1484/** Opcode 0x0f 0x81. */
1485FNIEMOP_DEF(iemOp_jno_Jv)
1486{
1487 IEMOP_MNEMONIC("jno Jv");
1488 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1489 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1490 {
1491 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1492 IEMOP_HLP_NO_LOCK_PREFIX();
1493
1494 IEM_MC_BEGIN(0, 0);
1495 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1496 IEM_MC_ADVANCE_RIP();
1497 } IEM_MC_ELSE() {
1498 IEM_MC_REL_JMP_S16(i16Imm);
1499 } IEM_MC_ENDIF();
1500 IEM_MC_END();
1501 }
1502 else
1503 {
1504 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1505 IEMOP_HLP_NO_LOCK_PREFIX();
1506
1507 IEM_MC_BEGIN(0, 0);
1508 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
1509 IEM_MC_ADVANCE_RIP();
1510 } IEM_MC_ELSE() {
1511 IEM_MC_REL_JMP_S32(i32Imm);
1512 } IEM_MC_ENDIF();
1513 IEM_MC_END();
1514 }
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/** Opcode 0x0f 0x82. */
1520FNIEMOP_DEF(iemOp_jc_Jv)
1521{
1522 IEMOP_MNEMONIC("jc/jb/jnae Jv");
1523 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1524 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1525 {
1526 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1527 IEMOP_HLP_NO_LOCK_PREFIX();
1528
1529 IEM_MC_BEGIN(0, 0);
1530 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1531 IEM_MC_REL_JMP_S16(i16Imm);
1532 } IEM_MC_ELSE() {
1533 IEM_MC_ADVANCE_RIP();
1534 } IEM_MC_ENDIF();
1535 IEM_MC_END();
1536 }
1537 else
1538 {
1539 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1540 IEMOP_HLP_NO_LOCK_PREFIX();
1541
1542 IEM_MC_BEGIN(0, 0);
1543 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1544 IEM_MC_REL_JMP_S32(i32Imm);
1545 } IEM_MC_ELSE() {
1546 IEM_MC_ADVANCE_RIP();
1547 } IEM_MC_ENDIF();
1548 IEM_MC_END();
1549 }
1550 return VINF_SUCCESS;
1551}
1552
1553
1554/** Opcode 0x0f 0x83. */
1555FNIEMOP_DEF(iemOp_jnc_Jv)
1556{
1557 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
1558 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1559 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1560 {
1561 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1562 IEMOP_HLP_NO_LOCK_PREFIX();
1563
1564 IEM_MC_BEGIN(0, 0);
1565 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1566 IEM_MC_ADVANCE_RIP();
1567 } IEM_MC_ELSE() {
1568 IEM_MC_REL_JMP_S16(i16Imm);
1569 } IEM_MC_ENDIF();
1570 IEM_MC_END();
1571 }
1572 else
1573 {
1574 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1575 IEMOP_HLP_NO_LOCK_PREFIX();
1576
1577 IEM_MC_BEGIN(0, 0);
1578 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
1579 IEM_MC_ADVANCE_RIP();
1580 } IEM_MC_ELSE() {
1581 IEM_MC_REL_JMP_S32(i32Imm);
1582 } IEM_MC_ENDIF();
1583 IEM_MC_END();
1584 }
1585 return VINF_SUCCESS;
1586}
1587
1588
1589/** Opcode 0x0f 0x84. */
1590FNIEMOP_DEF(iemOp_je_Jv)
1591{
1592 IEMOP_MNEMONIC("je/jz Jv");
1593 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1594 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1595 {
1596 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1597 IEMOP_HLP_NO_LOCK_PREFIX();
1598
1599 IEM_MC_BEGIN(0, 0);
1600 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1601 IEM_MC_REL_JMP_S16(i16Imm);
1602 } IEM_MC_ELSE() {
1603 IEM_MC_ADVANCE_RIP();
1604 } IEM_MC_ENDIF();
1605 IEM_MC_END();
1606 }
1607 else
1608 {
1609 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1610 IEMOP_HLP_NO_LOCK_PREFIX();
1611
1612 IEM_MC_BEGIN(0, 0);
1613 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1614 IEM_MC_REL_JMP_S32(i32Imm);
1615 } IEM_MC_ELSE() {
1616 IEM_MC_ADVANCE_RIP();
1617 } IEM_MC_ENDIF();
1618 IEM_MC_END();
1619 }
1620 return VINF_SUCCESS;
1621}
1622
1623
1624/** Opcode 0x0f 0x85. */
1625FNIEMOP_DEF(iemOp_jne_Jv)
1626{
1627 IEMOP_MNEMONIC("jne/jnz Jv");
1628 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1629 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1630 {
1631 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1632 IEMOP_HLP_NO_LOCK_PREFIX();
1633
1634 IEM_MC_BEGIN(0, 0);
1635 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1636 IEM_MC_ADVANCE_RIP();
1637 } IEM_MC_ELSE() {
1638 IEM_MC_REL_JMP_S16(i16Imm);
1639 } IEM_MC_ENDIF();
1640 IEM_MC_END();
1641 }
1642 else
1643 {
1644 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1645 IEMOP_HLP_NO_LOCK_PREFIX();
1646
1647 IEM_MC_BEGIN(0, 0);
1648 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
1649 IEM_MC_ADVANCE_RIP();
1650 } IEM_MC_ELSE() {
1651 IEM_MC_REL_JMP_S32(i32Imm);
1652 } IEM_MC_ENDIF();
1653 IEM_MC_END();
1654 }
1655 return VINF_SUCCESS;
1656}
1657
1658
1659/** Opcode 0x0f 0x86. */
1660FNIEMOP_DEF(iemOp_jbe_Jv)
1661{
1662 IEMOP_MNEMONIC("jbe/jna Jv");
1663 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1664 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1665 {
1666 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1667 IEMOP_HLP_NO_LOCK_PREFIX();
1668
1669 IEM_MC_BEGIN(0, 0);
1670 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1671 IEM_MC_REL_JMP_S16(i16Imm);
1672 } IEM_MC_ELSE() {
1673 IEM_MC_ADVANCE_RIP();
1674 } IEM_MC_ENDIF();
1675 IEM_MC_END();
1676 }
1677 else
1678 {
1679 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1680 IEMOP_HLP_NO_LOCK_PREFIX();
1681
1682 IEM_MC_BEGIN(0, 0);
1683 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1684 IEM_MC_REL_JMP_S32(i32Imm);
1685 } IEM_MC_ELSE() {
1686 IEM_MC_ADVANCE_RIP();
1687 } IEM_MC_ENDIF();
1688 IEM_MC_END();
1689 }
1690 return VINF_SUCCESS;
1691}
1692
1693
1694/** Opcode 0x0f 0x87. */
1695FNIEMOP_DEF(iemOp_jnbe_Jv)
1696{
1697 IEMOP_MNEMONIC("jnbe/ja Jv");
1698 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1699 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1700 {
1701 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1702 IEMOP_HLP_NO_LOCK_PREFIX();
1703
1704 IEM_MC_BEGIN(0, 0);
1705 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1706 IEM_MC_ADVANCE_RIP();
1707 } IEM_MC_ELSE() {
1708 IEM_MC_REL_JMP_S16(i16Imm);
1709 } IEM_MC_ENDIF();
1710 IEM_MC_END();
1711 }
1712 else
1713 {
1714 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1715 IEMOP_HLP_NO_LOCK_PREFIX();
1716
1717 IEM_MC_BEGIN(0, 0);
1718 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
1719 IEM_MC_ADVANCE_RIP();
1720 } IEM_MC_ELSE() {
1721 IEM_MC_REL_JMP_S32(i32Imm);
1722 } IEM_MC_ENDIF();
1723 IEM_MC_END();
1724 }
1725 return VINF_SUCCESS;
1726}
1727
1728
1729/** Opcode 0x0f 0x88. */
1730FNIEMOP_DEF(iemOp_js_Jv)
1731{
1732 IEMOP_MNEMONIC("js Jv");
1733 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1734 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1735 {
1736 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1737 IEMOP_HLP_NO_LOCK_PREFIX();
1738
1739 IEM_MC_BEGIN(0, 0);
1740 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1741 IEM_MC_REL_JMP_S16(i16Imm);
1742 } IEM_MC_ELSE() {
1743 IEM_MC_ADVANCE_RIP();
1744 } IEM_MC_ENDIF();
1745 IEM_MC_END();
1746 }
1747 else
1748 {
1749 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1750 IEMOP_HLP_NO_LOCK_PREFIX();
1751
1752 IEM_MC_BEGIN(0, 0);
1753 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1754 IEM_MC_REL_JMP_S32(i32Imm);
1755 } IEM_MC_ELSE() {
1756 IEM_MC_ADVANCE_RIP();
1757 } IEM_MC_ENDIF();
1758 IEM_MC_END();
1759 }
1760 return VINF_SUCCESS;
1761}
1762
1763
1764/** Opcode 0x0f 0x89. */
1765FNIEMOP_DEF(iemOp_jns_Jv)
1766{
1767 IEMOP_MNEMONIC("jns Jv");
1768 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1769 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1770 {
1771 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1772 IEMOP_HLP_NO_LOCK_PREFIX();
1773
1774 IEM_MC_BEGIN(0, 0);
1775 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1776 IEM_MC_ADVANCE_RIP();
1777 } IEM_MC_ELSE() {
1778 IEM_MC_REL_JMP_S16(i16Imm);
1779 } IEM_MC_ENDIF();
1780 IEM_MC_END();
1781 }
1782 else
1783 {
1784 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1785 IEMOP_HLP_NO_LOCK_PREFIX();
1786
1787 IEM_MC_BEGIN(0, 0);
1788 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
1789 IEM_MC_ADVANCE_RIP();
1790 } IEM_MC_ELSE() {
1791 IEM_MC_REL_JMP_S32(i32Imm);
1792 } IEM_MC_ENDIF();
1793 IEM_MC_END();
1794 }
1795 return VINF_SUCCESS;
1796}
1797
1798
1799/** Opcode 0x0f 0x8a. */
1800FNIEMOP_DEF(iemOp_jp_Jv)
1801{
1802 IEMOP_MNEMONIC("jp Jv");
1803 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1804 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1805 {
1806 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1807 IEMOP_HLP_NO_LOCK_PREFIX();
1808
1809 IEM_MC_BEGIN(0, 0);
1810 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1811 IEM_MC_REL_JMP_S16(i16Imm);
1812 } IEM_MC_ELSE() {
1813 IEM_MC_ADVANCE_RIP();
1814 } IEM_MC_ENDIF();
1815 IEM_MC_END();
1816 }
1817 else
1818 {
1819 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1820 IEMOP_HLP_NO_LOCK_PREFIX();
1821
1822 IEM_MC_BEGIN(0, 0);
1823 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1824 IEM_MC_REL_JMP_S32(i32Imm);
1825 } IEM_MC_ELSE() {
1826 IEM_MC_ADVANCE_RIP();
1827 } IEM_MC_ENDIF();
1828 IEM_MC_END();
1829 }
1830 return VINF_SUCCESS;
1831}
1832
1833
1834/** Opcode 0x0f 0x8b. */
1835FNIEMOP_DEF(iemOp_jnp_Jv)
1836{
1837 IEMOP_MNEMONIC("jo Jv");
1838 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1839 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1840 {
1841 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1842 IEMOP_HLP_NO_LOCK_PREFIX();
1843
1844 IEM_MC_BEGIN(0, 0);
1845 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1846 IEM_MC_ADVANCE_RIP();
1847 } IEM_MC_ELSE() {
1848 IEM_MC_REL_JMP_S16(i16Imm);
1849 } IEM_MC_ENDIF();
1850 IEM_MC_END();
1851 }
1852 else
1853 {
1854 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1855 IEMOP_HLP_NO_LOCK_PREFIX();
1856
1857 IEM_MC_BEGIN(0, 0);
1858 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
1859 IEM_MC_ADVANCE_RIP();
1860 } IEM_MC_ELSE() {
1861 IEM_MC_REL_JMP_S32(i32Imm);
1862 } IEM_MC_ENDIF();
1863 IEM_MC_END();
1864 }
1865 return VINF_SUCCESS;
1866}
1867
1868
1869/** Opcode 0x0f 0x8c. */
1870FNIEMOP_DEF(iemOp_jl_Jv)
1871{
1872 IEMOP_MNEMONIC("jl/jnge Jv");
1873 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1874 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1875 {
1876 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1877 IEMOP_HLP_NO_LOCK_PREFIX();
1878
1879 IEM_MC_BEGIN(0, 0);
1880 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1881 IEM_MC_REL_JMP_S16(i16Imm);
1882 } IEM_MC_ELSE() {
1883 IEM_MC_ADVANCE_RIP();
1884 } IEM_MC_ENDIF();
1885 IEM_MC_END();
1886 }
1887 else
1888 {
1889 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1890 IEMOP_HLP_NO_LOCK_PREFIX();
1891
1892 IEM_MC_BEGIN(0, 0);
1893 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1894 IEM_MC_REL_JMP_S32(i32Imm);
1895 } IEM_MC_ELSE() {
1896 IEM_MC_ADVANCE_RIP();
1897 } IEM_MC_ENDIF();
1898 IEM_MC_END();
1899 }
1900 return VINF_SUCCESS;
1901}
1902
1903
1904/** Opcode 0x0f 0x8d. */
1905FNIEMOP_DEF(iemOp_jnl_Jv)
1906{
1907 IEMOP_MNEMONIC("jnl/jge Jv");
1908 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1909 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1910 {
1911 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1912 IEMOP_HLP_NO_LOCK_PREFIX();
1913
1914 IEM_MC_BEGIN(0, 0);
1915 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1916 IEM_MC_ADVANCE_RIP();
1917 } IEM_MC_ELSE() {
1918 IEM_MC_REL_JMP_S16(i16Imm);
1919 } IEM_MC_ENDIF();
1920 IEM_MC_END();
1921 }
1922 else
1923 {
1924 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1925 IEMOP_HLP_NO_LOCK_PREFIX();
1926
1927 IEM_MC_BEGIN(0, 0);
1928 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
1929 IEM_MC_ADVANCE_RIP();
1930 } IEM_MC_ELSE() {
1931 IEM_MC_REL_JMP_S32(i32Imm);
1932 } IEM_MC_ENDIF();
1933 IEM_MC_END();
1934 }
1935 return VINF_SUCCESS;
1936}
1937
1938
1939/** Opcode 0x0f 0x8e. */
1940FNIEMOP_DEF(iemOp_jle_Jv)
1941{
1942 IEMOP_MNEMONIC("jle/jng Jv");
1943 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1944 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1945 {
1946 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1947 IEMOP_HLP_NO_LOCK_PREFIX();
1948
1949 IEM_MC_BEGIN(0, 0);
1950 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1951 IEM_MC_REL_JMP_S16(i16Imm);
1952 } IEM_MC_ELSE() {
1953 IEM_MC_ADVANCE_RIP();
1954 } IEM_MC_ENDIF();
1955 IEM_MC_END();
1956 }
1957 else
1958 {
1959 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1960 IEMOP_HLP_NO_LOCK_PREFIX();
1961
1962 IEM_MC_BEGIN(0, 0);
1963 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1964 IEM_MC_REL_JMP_S32(i32Imm);
1965 } IEM_MC_ELSE() {
1966 IEM_MC_ADVANCE_RIP();
1967 } IEM_MC_ENDIF();
1968 IEM_MC_END();
1969 }
1970 return VINF_SUCCESS;
1971}
1972
1973
1974/** Opcode 0x0f 0x8f. */
1975FNIEMOP_DEF(iemOp_jnle_Jv)
1976{
1977 IEMOP_MNEMONIC("jnle/jg Jv");
1978 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
1979 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
1980 {
1981 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
1982 IEMOP_HLP_NO_LOCK_PREFIX();
1983
1984 IEM_MC_BEGIN(0, 0);
1985 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1986 IEM_MC_ADVANCE_RIP();
1987 } IEM_MC_ELSE() {
1988 IEM_MC_REL_JMP_S16(i16Imm);
1989 } IEM_MC_ENDIF();
1990 IEM_MC_END();
1991 }
1992 else
1993 {
1994 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
1995 IEMOP_HLP_NO_LOCK_PREFIX();
1996
1997 IEM_MC_BEGIN(0, 0);
1998 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
1999 IEM_MC_ADVANCE_RIP();
2000 } IEM_MC_ELSE() {
2001 IEM_MC_REL_JMP_S32(i32Imm);
2002 } IEM_MC_ENDIF();
2003 IEM_MC_END();
2004 }
2005 return VINF_SUCCESS;
2006}
2007
2008
2009/** Opcode 0x0f 0x90. */
2010FNIEMOP_DEF(iemOp_seto_Eb)
2011{
2012 IEMOP_MNEMONIC("seto Eb");
2013 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2014 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2015
2016 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2017 * any way. AMD says it's "unused", whatever that means. We're
2018 * ignoring for now. */
2019 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2020 {
2021 /* register target */
2022 IEM_MC_BEGIN(0, 0);
2023 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2024 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2025 } IEM_MC_ELSE() {
2026 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2027 } IEM_MC_ENDIF();
2028 IEM_MC_ADVANCE_RIP();
2029 IEM_MC_END();
2030 }
2031 else
2032 {
2033 /* memory target */
2034 IEM_MC_BEGIN(0, 1);
2035 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2036 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2037 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2038 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2039 } IEM_MC_ELSE() {
2040 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2041 } IEM_MC_ENDIF();
2042 IEM_MC_ADVANCE_RIP();
2043 IEM_MC_END();
2044 }
2045 return VINF_SUCCESS;
2046}
2047
2048
2049/** Opcode 0x0f 0x91. */
2050FNIEMOP_DEF(iemOp_setno_Eb)
2051{
2052 IEMOP_MNEMONIC("setno Eb");
2053 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2054 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2055
2056 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2057 * any way. AMD says it's "unused", whatever that means. We're
2058 * ignoring for now. */
2059 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2060 {
2061 /* register target */
2062 IEM_MC_BEGIN(0, 0);
2063 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2064 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2065 } IEM_MC_ELSE() {
2066 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2067 } IEM_MC_ENDIF();
2068 IEM_MC_ADVANCE_RIP();
2069 IEM_MC_END();
2070 }
2071 else
2072 {
2073 /* memory target */
2074 IEM_MC_BEGIN(0, 1);
2075 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2076 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2077 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
2078 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2079 } IEM_MC_ELSE() {
2080 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2081 } IEM_MC_ENDIF();
2082 IEM_MC_ADVANCE_RIP();
2083 IEM_MC_END();
2084 }
2085 return VINF_SUCCESS;
2086}
2087
2088
2089/** Opcode 0x0f 0x92. */
2090FNIEMOP_DEF(iemOp_setc_Eb)
2091{
2092 IEMOP_MNEMONIC("setc Eb");
2093 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2094 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2095
2096 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2097 * any way. AMD says it's "unused", whatever that means. We're
2098 * ignoring for now. */
2099 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2100 {
2101 /* register target */
2102 IEM_MC_BEGIN(0, 0);
2103 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2104 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2105 } IEM_MC_ELSE() {
2106 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2107 } IEM_MC_ENDIF();
2108 IEM_MC_ADVANCE_RIP();
2109 IEM_MC_END();
2110 }
2111 else
2112 {
2113 /* memory target */
2114 IEM_MC_BEGIN(0, 1);
2115 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2116 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2117 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2118 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2119 } IEM_MC_ELSE() {
2120 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2121 } IEM_MC_ENDIF();
2122 IEM_MC_ADVANCE_RIP();
2123 IEM_MC_END();
2124 }
2125 return VINF_SUCCESS;
2126}
2127
2128
2129/** Opcode 0x0f 0x93. */
2130FNIEMOP_DEF(iemOp_setnc_Eb)
2131{
2132 IEMOP_MNEMONIC("setnc Eb");
2133 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2134 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2135
2136 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2137 * any way. AMD says it's "unused", whatever that means. We're
2138 * ignoring for now. */
2139 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2140 {
2141 /* register target */
2142 IEM_MC_BEGIN(0, 0);
2143 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2144 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2145 } IEM_MC_ELSE() {
2146 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2147 } IEM_MC_ENDIF();
2148 IEM_MC_ADVANCE_RIP();
2149 IEM_MC_END();
2150 }
2151 else
2152 {
2153 /* memory target */
2154 IEM_MC_BEGIN(0, 1);
2155 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2156 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2157 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
2158 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2159 } IEM_MC_ELSE() {
2160 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2161 } IEM_MC_ENDIF();
2162 IEM_MC_ADVANCE_RIP();
2163 IEM_MC_END();
2164 }
2165 return VINF_SUCCESS;
2166}
2167
2168
2169/** Opcode 0x0f 0x94. */
2170FNIEMOP_DEF(iemOp_sete_Eb)
2171{
2172 IEMOP_MNEMONIC("sete Eb");
2173 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2174 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2175
2176 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2177 * any way. AMD says it's "unused", whatever that means. We're
2178 * ignoring for now. */
2179 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2180 {
2181 /* register target */
2182 IEM_MC_BEGIN(0, 0);
2183 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2184 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2185 } IEM_MC_ELSE() {
2186 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2187 } IEM_MC_ENDIF();
2188 IEM_MC_ADVANCE_RIP();
2189 IEM_MC_END();
2190 }
2191 else
2192 {
2193 /* memory target */
2194 IEM_MC_BEGIN(0, 1);
2195 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2196 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2197 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2198 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2199 } IEM_MC_ELSE() {
2200 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2201 } IEM_MC_ENDIF();
2202 IEM_MC_ADVANCE_RIP();
2203 IEM_MC_END();
2204 }
2205 return VINF_SUCCESS;
2206}
2207
2208
2209/** Opcode 0x0f 0x95. */
2210FNIEMOP_DEF(iemOp_setne_Eb)
2211{
2212 IEMOP_MNEMONIC("setne Eb");
2213 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2214 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2215
2216 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2217 * any way. AMD says it's "unused", whatever that means. We're
2218 * ignoring for now. */
2219 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2220 {
2221 /* register target */
2222 IEM_MC_BEGIN(0, 0);
2223 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2224 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2225 } IEM_MC_ELSE() {
2226 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2227 } IEM_MC_ENDIF();
2228 IEM_MC_ADVANCE_RIP();
2229 IEM_MC_END();
2230 }
2231 else
2232 {
2233 /* memory target */
2234 IEM_MC_BEGIN(0, 1);
2235 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2236 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2237 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
2238 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2239 } IEM_MC_ELSE() {
2240 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2241 } IEM_MC_ENDIF();
2242 IEM_MC_ADVANCE_RIP();
2243 IEM_MC_END();
2244 }
2245 return VINF_SUCCESS;
2246}
2247
2248
2249/** Opcode 0x0f 0x96. */
2250FNIEMOP_DEF(iemOp_setbe_Eb)
2251{
2252 IEMOP_MNEMONIC("setbe Eb");
2253 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2254 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2255
2256 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2257 * any way. AMD says it's "unused", whatever that means. We're
2258 * ignoring for now. */
2259 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2260 {
2261 /* register target */
2262 IEM_MC_BEGIN(0, 0);
2263 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2264 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2265 } IEM_MC_ELSE() {
2266 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2267 } IEM_MC_ENDIF();
2268 IEM_MC_ADVANCE_RIP();
2269 IEM_MC_END();
2270 }
2271 else
2272 {
2273 /* memory target */
2274 IEM_MC_BEGIN(0, 1);
2275 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2276 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2277 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2278 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2279 } IEM_MC_ELSE() {
2280 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2281 } IEM_MC_ENDIF();
2282 IEM_MC_ADVANCE_RIP();
2283 IEM_MC_END();
2284 }
2285 return VINF_SUCCESS;
2286}
2287
2288
2289/** Opcode 0x0f 0x97. */
2290FNIEMOP_DEF(iemOp_setnbe_Eb)
2291{
2292 IEMOP_MNEMONIC("setnbe Eb");
2293 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2294 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2295
2296 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2297 * any way. AMD says it's "unused", whatever that means. We're
2298 * ignoring for now. */
2299 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2300 {
2301 /* register target */
2302 IEM_MC_BEGIN(0, 0);
2303 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2304 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2305 } IEM_MC_ELSE() {
2306 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2307 } IEM_MC_ENDIF();
2308 IEM_MC_ADVANCE_RIP();
2309 IEM_MC_END();
2310 }
2311 else
2312 {
2313 /* memory target */
2314 IEM_MC_BEGIN(0, 1);
2315 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2316 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2317 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
2318 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2319 } IEM_MC_ELSE() {
2320 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2321 } IEM_MC_ENDIF();
2322 IEM_MC_ADVANCE_RIP();
2323 IEM_MC_END();
2324 }
2325 return VINF_SUCCESS;
2326}
2327
2328
2329/** Opcode 0x0f 0x98. */
2330FNIEMOP_DEF(iemOp_sets_Eb)
2331{
2332 IEMOP_MNEMONIC("sets Eb");
2333 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2334 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2335
2336 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2337 * any way. AMD says it's "unused", whatever that means. We're
2338 * ignoring for now. */
2339 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2340 {
2341 /* register target */
2342 IEM_MC_BEGIN(0, 0);
2343 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2344 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2345 } IEM_MC_ELSE() {
2346 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2347 } IEM_MC_ENDIF();
2348 IEM_MC_ADVANCE_RIP();
2349 IEM_MC_END();
2350 }
2351 else
2352 {
2353 /* memory target */
2354 IEM_MC_BEGIN(0, 1);
2355 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2356 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2357 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2358 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2359 } IEM_MC_ELSE() {
2360 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2361 } IEM_MC_ENDIF();
2362 IEM_MC_ADVANCE_RIP();
2363 IEM_MC_END();
2364 }
2365 return VINF_SUCCESS;
2366}
2367
2368
2369/** Opcode 0x0f 0x99. */
2370FNIEMOP_DEF(iemOp_setns_Eb)
2371{
2372 IEMOP_MNEMONIC("setns Eb");
2373 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2374 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2375
2376 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2377 * any way. AMD says it's "unused", whatever that means. We're
2378 * ignoring for now. */
2379 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2380 {
2381 /* register target */
2382 IEM_MC_BEGIN(0, 0);
2383 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2384 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2385 } IEM_MC_ELSE() {
2386 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2387 } IEM_MC_ENDIF();
2388 IEM_MC_ADVANCE_RIP();
2389 IEM_MC_END();
2390 }
2391 else
2392 {
2393 /* memory target */
2394 IEM_MC_BEGIN(0, 1);
2395 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2396 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2397 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
2398 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2399 } IEM_MC_ELSE() {
2400 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2401 } IEM_MC_ENDIF();
2402 IEM_MC_ADVANCE_RIP();
2403 IEM_MC_END();
2404 }
2405 return VINF_SUCCESS;
2406}
2407
2408
2409/** Opcode 0x0f 0x9a. */
2410FNIEMOP_DEF(iemOp_setp_Eb)
2411{
2412 IEMOP_MNEMONIC("setnp Eb");
2413 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2414 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2415
2416 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2417 * any way. AMD says it's "unused", whatever that means. We're
2418 * ignoring for now. */
2419 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2420 {
2421 /* register target */
2422 IEM_MC_BEGIN(0, 0);
2423 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2424 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2425 } IEM_MC_ELSE() {
2426 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2427 } IEM_MC_ENDIF();
2428 IEM_MC_ADVANCE_RIP();
2429 IEM_MC_END();
2430 }
2431 else
2432 {
2433 /* memory target */
2434 IEM_MC_BEGIN(0, 1);
2435 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2436 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2437 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2438 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2439 } IEM_MC_ELSE() {
2440 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2441 } IEM_MC_ENDIF();
2442 IEM_MC_ADVANCE_RIP();
2443 IEM_MC_END();
2444 }
2445 return VINF_SUCCESS;
2446}
2447
2448
2449/** Opcode 0x0f 0x9b. */
2450FNIEMOP_DEF(iemOp_setnp_Eb)
2451{
2452 IEMOP_MNEMONIC("setnp Eb");
2453 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2454 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2455
2456 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2457 * any way. AMD says it's "unused", whatever that means. We're
2458 * ignoring for now. */
2459 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2460 {
2461 /* register target */
2462 IEM_MC_BEGIN(0, 0);
2463 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2464 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2465 } IEM_MC_ELSE() {
2466 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2467 } IEM_MC_ENDIF();
2468 IEM_MC_ADVANCE_RIP();
2469 IEM_MC_END();
2470 }
2471 else
2472 {
2473 /* memory target */
2474 IEM_MC_BEGIN(0, 1);
2475 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2476 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2477 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
2478 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2479 } IEM_MC_ELSE() {
2480 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2481 } IEM_MC_ENDIF();
2482 IEM_MC_ADVANCE_RIP();
2483 IEM_MC_END();
2484 }
2485 return VINF_SUCCESS;
2486}
2487
2488
2489/** Opcode 0x0f 0x9c. */
2490FNIEMOP_DEF(iemOp_setl_Eb)
2491{
2492 IEMOP_MNEMONIC("setl Eb");
2493 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2494 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2495
2496 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2497 * any way. AMD says it's "unused", whatever that means. We're
2498 * ignoring for now. */
2499 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2500 {
2501 /* register target */
2502 IEM_MC_BEGIN(0, 0);
2503 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2504 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2505 } IEM_MC_ELSE() {
2506 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2507 } IEM_MC_ENDIF();
2508 IEM_MC_ADVANCE_RIP();
2509 IEM_MC_END();
2510 }
2511 else
2512 {
2513 /* memory target */
2514 IEM_MC_BEGIN(0, 1);
2515 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2516 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2517 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2518 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2519 } IEM_MC_ELSE() {
2520 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2521 } IEM_MC_ENDIF();
2522 IEM_MC_ADVANCE_RIP();
2523 IEM_MC_END();
2524 }
2525 return VINF_SUCCESS;
2526}
2527
2528
2529/** Opcode 0x0f 0x9d. */
2530FNIEMOP_DEF(iemOp_setnl_Eb)
2531{
2532 IEMOP_MNEMONIC("setnl Eb");
2533 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2534 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2535
2536 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2537 * any way. AMD says it's "unused", whatever that means. We're
2538 * ignoring for now. */
2539 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2540 {
2541 /* register target */
2542 IEM_MC_BEGIN(0, 0);
2543 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2544 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2545 } IEM_MC_ELSE() {
2546 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2547 } IEM_MC_ENDIF();
2548 IEM_MC_ADVANCE_RIP();
2549 IEM_MC_END();
2550 }
2551 else
2552 {
2553 /* memory target */
2554 IEM_MC_BEGIN(0, 1);
2555 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2556 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2557 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
2558 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2559 } IEM_MC_ELSE() {
2560 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2561 } IEM_MC_ENDIF();
2562 IEM_MC_ADVANCE_RIP();
2563 IEM_MC_END();
2564 }
2565 return VINF_SUCCESS;
2566}
2567
2568
2569/** Opcode 0x0f 0x9e. */
2570FNIEMOP_DEF(iemOp_setle_Eb)
2571{
2572 IEMOP_MNEMONIC("setle Eb");
2573 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2574 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2575
2576 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2577 * any way. AMD says it's "unused", whatever that means. We're
2578 * ignoring for now. */
2579 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2580 {
2581 /* register target */
2582 IEM_MC_BEGIN(0, 0);
2583 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2584 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2585 } IEM_MC_ELSE() {
2586 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2587 } IEM_MC_ENDIF();
2588 IEM_MC_ADVANCE_RIP();
2589 IEM_MC_END();
2590 }
2591 else
2592 {
2593 /* memory target */
2594 IEM_MC_BEGIN(0, 1);
2595 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2596 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2597 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2598 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2599 } IEM_MC_ELSE() {
2600 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2601 } IEM_MC_ENDIF();
2602 IEM_MC_ADVANCE_RIP();
2603 IEM_MC_END();
2604 }
2605 return VINF_SUCCESS;
2606}
2607
2608
2609/** Opcode 0x0f 0x9f. */
2610FNIEMOP_DEF(iemOp_setnle_Eb)
2611{
2612 IEMOP_MNEMONIC("setnle Eb");
2613 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2614 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2615
2616 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
2617 * any way. AMD says it's "unused", whatever that means. We're
2618 * ignoring for now. */
2619 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2620 {
2621 /* register target */
2622 IEM_MC_BEGIN(0, 0);
2623 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2624 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
2625 } IEM_MC_ELSE() {
2626 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
2627 } IEM_MC_ENDIF();
2628 IEM_MC_ADVANCE_RIP();
2629 IEM_MC_END();
2630 }
2631 else
2632 {
2633 /* memory target */
2634 IEM_MC_BEGIN(0, 1);
2635 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2636 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2637 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
2638 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
2639 } IEM_MC_ELSE() {
2640 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
2641 } IEM_MC_ENDIF();
2642 IEM_MC_ADVANCE_RIP();
2643 IEM_MC_END();
2644 }
2645 return VINF_SUCCESS;
2646}
2647
2648
2649/**
2650 * Common 'push segment-register' helper.
2651 */
2652FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
2653{
2654 IEMOP_HLP_NO_LOCK_PREFIX();
2655 if (iReg < X86_SREG_FS)
2656 IEMOP_HLP_NO_64BIT();
2657 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
2658
2659 switch (pIemCpu->enmEffOpSize)
2660 {
2661 case IEMMODE_16BIT:
2662 IEM_MC_BEGIN(0, 1);
2663 IEM_MC_LOCAL(uint16_t, u16Value);
2664 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
2665 IEM_MC_PUSH_U16(u16Value);
2666 IEM_MC_ADVANCE_RIP();
2667 IEM_MC_END();
2668 break;
2669
2670 case IEMMODE_32BIT:
2671 IEM_MC_BEGIN(0, 1);
2672 IEM_MC_LOCAL(uint32_t, u32Value);
2673 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
2674 IEM_MC_PUSH_U32(u32Value);
2675 IEM_MC_ADVANCE_RIP();
2676 IEM_MC_END();
2677 break;
2678
2679 case IEMMODE_64BIT:
2680 IEM_MC_BEGIN(0, 1);
2681 IEM_MC_LOCAL(uint64_t, u64Value);
2682 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
2683 IEM_MC_PUSH_U64(u64Value);
2684 IEM_MC_ADVANCE_RIP();
2685 IEM_MC_END();
2686 break;
2687 }
2688
2689 return VINF_SUCCESS;
2690}
2691
2692
2693/** Opcode 0x0f 0xa0. */
2694FNIEMOP_DEF(iemOp_push_fs)
2695{
2696 IEMOP_MNEMONIC("push fs");
2697 IEMOP_HLP_NO_LOCK_PREFIX();
2698 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
2699}
2700
2701
2702/** Opcode 0x0f 0xa1. */
2703FNIEMOP_DEF(iemOp_pop_fs)
2704{
2705 IEMOP_MNEMONIC("pop fs");
2706 IEMOP_HLP_NO_LOCK_PREFIX();
2707 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
2708}
2709
2710
2711/** Opcode 0x0f 0xa2. */
2712FNIEMOP_DEF(iemOp_cpuid)
2713{
2714 IEMOP_MNEMONIC("cpuid");
2715 IEMOP_HLP_NO_LOCK_PREFIX();
2716 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
2717}
2718
2719
2720/**
2721 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
2722 * iemOp_bts_Ev_Gv.
2723 */
2724FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
2725{
2726 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2727 IEMOP_HLP_NO_LOCK_PREFIX();
2728 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
2729
2730 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2731 {
2732 /* register destination. */
2733 IEMOP_HLP_NO_LOCK_PREFIX();
2734 switch (pIemCpu->enmEffOpSize)
2735 {
2736 case IEMMODE_16BIT:
2737 IEM_MC_BEGIN(3, 0);
2738 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2739 IEM_MC_ARG(uint16_t, u16Src, 1);
2740 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2741
2742 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2743 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
2744 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2745 IEM_MC_REF_EFLAGS(pEFlags);
2746 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2747
2748 IEM_MC_ADVANCE_RIP();
2749 IEM_MC_END();
2750 return VINF_SUCCESS;
2751
2752 case IEMMODE_32BIT:
2753 IEM_MC_BEGIN(3, 0);
2754 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2755 IEM_MC_ARG(uint32_t, u32Src, 1);
2756 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2757
2758 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2759 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
2760 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2761 IEM_MC_REF_EFLAGS(pEFlags);
2762 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2763
2764 IEM_MC_ADVANCE_RIP();
2765 IEM_MC_END();
2766 return VINF_SUCCESS;
2767
2768 case IEMMODE_64BIT:
2769 IEM_MC_BEGIN(3, 0);
2770 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2771 IEM_MC_ARG(uint64_t, u64Src, 1);
2772 IEM_MC_ARG(uint32_t *, pEFlags, 2);
2773
2774 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2775 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
2776 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2777 IEM_MC_REF_EFLAGS(pEFlags);
2778 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2779
2780 IEM_MC_ADVANCE_RIP();
2781 IEM_MC_END();
2782 return VINF_SUCCESS;
2783
2784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2785 }
2786 }
2787 else
2788 {
2789 /* memory destination. */
2790
2791 uint32_t fAccess;
2792 if (pImpl->pfnLockedU16)
2793 fAccess = IEM_ACCESS_DATA_RW;
2794 else /* BT */
2795 {
2796 IEMOP_HLP_NO_LOCK_PREFIX();
2797 fAccess = IEM_ACCESS_DATA_R;
2798 }
2799
2800 /** @todo test negative bit offsets! */
2801 switch (pIemCpu->enmEffOpSize)
2802 {
2803 case IEMMODE_16BIT:
2804 IEM_MC_BEGIN(3, 2);
2805 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2806 IEM_MC_ARG(uint16_t, u16Src, 1);
2807 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2808 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2809 IEM_MC_LOCAL(int16_t, i16AddrAdj);
2810
2811 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2812 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2813 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
2814 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
2815 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
2816 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
2817 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
2818 IEM_MC_FETCH_EFLAGS(EFlags);
2819
2820 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2821 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2822 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
2823 else
2824 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
2825 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2826
2827 IEM_MC_COMMIT_EFLAGS(EFlags);
2828 IEM_MC_ADVANCE_RIP();
2829 IEM_MC_END();
2830 return VINF_SUCCESS;
2831
2832 case IEMMODE_32BIT:
2833 IEM_MC_BEGIN(3, 2);
2834 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2835 IEM_MC_ARG(uint32_t, u32Src, 1);
2836 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2837 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2838 IEM_MC_LOCAL(int32_t, i32AddrAdj);
2839
2840 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2841 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2842 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
2843 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
2844 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
2845 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
2846 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
2847 IEM_MC_FETCH_EFLAGS(EFlags);
2848
2849 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2850 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2851 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
2852 else
2853 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
2854 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
2855
2856 IEM_MC_COMMIT_EFLAGS(EFlags);
2857 IEM_MC_ADVANCE_RIP();
2858 IEM_MC_END();
2859 return VINF_SUCCESS;
2860
2861 case IEMMODE_64BIT:
2862 IEM_MC_BEGIN(3, 2);
2863 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2864 IEM_MC_ARG(uint64_t, u64Src, 1);
2865 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
2866 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2867 IEM_MC_LOCAL(int64_t, i64AddrAdj);
2868
2869 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2870 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2871 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
2872 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
2873 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
2874 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
2875 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
2876 IEM_MC_FETCH_EFLAGS(EFlags);
2877
2878 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2879 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
2880 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
2881 else
2882 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
2883 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
2884
2885 IEM_MC_COMMIT_EFLAGS(EFlags);
2886 IEM_MC_ADVANCE_RIP();
2887 IEM_MC_END();
2888 return VINF_SUCCESS;
2889
2890 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2891 }
2892 }
2893}
2894
2895
2896/** Opcode 0x0f 0xa3. */
2897FNIEMOP_DEF(iemOp_bt_Ev_Gv)
2898{
2899 IEMOP_MNEMONIC("bt Gv,Mp");
2900 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
2901}
2902
2903
2904/**
2905 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
2906 */
2907FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
2908{
2909 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2910 IEMOP_HLP_NO_LOCK_PREFIX();
2911 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
2912
2913 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2914 {
2915 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2916 IEMOP_HLP_NO_LOCK_PREFIX();
2917
2918 switch (pIemCpu->enmEffOpSize)
2919 {
2920 case IEMMODE_16BIT:
2921 IEM_MC_BEGIN(4, 0);
2922 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2923 IEM_MC_ARG(uint16_t, u16Src, 1);
2924 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2925 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2926
2927 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2928 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2929 IEM_MC_REF_EFLAGS(pEFlags);
2930 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2931
2932 IEM_MC_ADVANCE_RIP();
2933 IEM_MC_END();
2934 return VINF_SUCCESS;
2935
2936 case IEMMODE_32BIT:
2937 IEM_MC_BEGIN(4, 0);
2938 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
2939 IEM_MC_ARG(uint32_t, u32Src, 1);
2940 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2941 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2942
2943 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2944 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2945 IEM_MC_REF_EFLAGS(pEFlags);
2946 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
2947
2948 IEM_MC_ADVANCE_RIP();
2949 IEM_MC_END();
2950 return VINF_SUCCESS;
2951
2952 case IEMMODE_64BIT:
2953 IEM_MC_BEGIN(4, 0);
2954 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
2955 IEM_MC_ARG(uint64_t, u64Src, 1);
2956 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
2957 IEM_MC_ARG(uint32_t *, pEFlags, 3);
2958
2959 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2960 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2961 IEM_MC_REF_EFLAGS(pEFlags);
2962 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
2963
2964 IEM_MC_ADVANCE_RIP();
2965 IEM_MC_END();
2966 return VINF_SUCCESS;
2967
2968 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2969 }
2970 }
2971 else
2972 {
2973 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
2974
2975 switch (pIemCpu->enmEffOpSize)
2976 {
2977 case IEMMODE_16BIT:
2978 IEM_MC_BEGIN(4, 2);
2979 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
2980 IEM_MC_ARG(uint16_t, u16Src, 1);
2981 IEM_MC_ARG(uint8_t, cShiftArg, 2);
2982 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
2983 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
2984
2985 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
2986 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
2987 IEM_MC_ASSIGN(cShiftArg, cShift);
2988 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2989 IEM_MC_FETCH_EFLAGS(EFlags);
2990 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
2991 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
2992
2993 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
2994 IEM_MC_COMMIT_EFLAGS(EFlags);
2995 IEM_MC_ADVANCE_RIP();
2996 IEM_MC_END();
2997 return VINF_SUCCESS;
2998
2999 case IEMMODE_32BIT:
3000 IEM_MC_BEGIN(4, 2);
3001 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3002 IEM_MC_ARG(uint32_t, u32Src, 1);
3003 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3004 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3005 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3006
3007 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3008 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3009 IEM_MC_ASSIGN(cShiftArg, cShift);
3010 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3011 IEM_MC_FETCH_EFLAGS(EFlags);
3012 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3013 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3014
3015 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3016 IEM_MC_COMMIT_EFLAGS(EFlags);
3017 IEM_MC_ADVANCE_RIP();
3018 IEM_MC_END();
3019 return VINF_SUCCESS;
3020
3021 case IEMMODE_64BIT:
3022 IEM_MC_BEGIN(4, 2);
3023 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3024 IEM_MC_ARG(uint64_t, u64Src, 1);
3025 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3026 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3027 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3028
3029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3030 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
3031 IEM_MC_ASSIGN(cShiftArg, cShift);
3032 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3033 IEM_MC_FETCH_EFLAGS(EFlags);
3034 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3035 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3036
3037 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3038 IEM_MC_COMMIT_EFLAGS(EFlags);
3039 IEM_MC_ADVANCE_RIP();
3040 IEM_MC_END();
3041 return VINF_SUCCESS;
3042
3043 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3044 }
3045 }
3046}
3047
3048
3049/**
3050 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
3051 */
3052FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
3053{
3054 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3055 IEMOP_HLP_NO_LOCK_PREFIX();
3056 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
3057
3058 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3059 {
3060 IEMOP_HLP_NO_LOCK_PREFIX();
3061
3062 switch (pIemCpu->enmEffOpSize)
3063 {
3064 case IEMMODE_16BIT:
3065 IEM_MC_BEGIN(4, 0);
3066 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3067 IEM_MC_ARG(uint16_t, u16Src, 1);
3068 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3069 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3070
3071 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3072 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3073 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3074 IEM_MC_REF_EFLAGS(pEFlags);
3075 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3076
3077 IEM_MC_ADVANCE_RIP();
3078 IEM_MC_END();
3079 return VINF_SUCCESS;
3080
3081 case IEMMODE_32BIT:
3082 IEM_MC_BEGIN(4, 0);
3083 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3084 IEM_MC_ARG(uint32_t, u32Src, 1);
3085 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3086 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3087
3088 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3089 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3090 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3091 IEM_MC_REF_EFLAGS(pEFlags);
3092 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3093
3094 IEM_MC_ADVANCE_RIP();
3095 IEM_MC_END();
3096 return VINF_SUCCESS;
3097
3098 case IEMMODE_64BIT:
3099 IEM_MC_BEGIN(4, 0);
3100 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3101 IEM_MC_ARG(uint64_t, u64Src, 1);
3102 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3103 IEM_MC_ARG(uint32_t *, pEFlags, 3);
3104
3105 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3106 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3107 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3108 IEM_MC_REF_EFLAGS(pEFlags);
3109 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3110
3111 IEM_MC_ADVANCE_RIP();
3112 IEM_MC_END();
3113 return VINF_SUCCESS;
3114
3115 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3116 }
3117 }
3118 else
3119 {
3120 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3121
3122 switch (pIemCpu->enmEffOpSize)
3123 {
3124 case IEMMODE_16BIT:
3125 IEM_MC_BEGIN(4, 2);
3126 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3127 IEM_MC_ARG(uint16_t, u16Src, 1);
3128 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3129 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3130 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3131
3132 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3133 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3134 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3135 IEM_MC_FETCH_EFLAGS(EFlags);
3136 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3137 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
3138
3139 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3140 IEM_MC_COMMIT_EFLAGS(EFlags);
3141 IEM_MC_ADVANCE_RIP();
3142 IEM_MC_END();
3143 return VINF_SUCCESS;
3144
3145 case IEMMODE_32BIT:
3146 IEM_MC_BEGIN(4, 2);
3147 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3148 IEM_MC_ARG(uint32_t, u32Src, 1);
3149 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3150 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3151 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3152
3153 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3154 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3155 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3156 IEM_MC_FETCH_EFLAGS(EFlags);
3157 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3158 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
3159
3160 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3161 IEM_MC_COMMIT_EFLAGS(EFlags);
3162 IEM_MC_ADVANCE_RIP();
3163 IEM_MC_END();
3164 return VINF_SUCCESS;
3165
3166 case IEMMODE_64BIT:
3167 IEM_MC_BEGIN(4, 2);
3168 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3169 IEM_MC_ARG(uint64_t, u64Src, 1);
3170 IEM_MC_ARG(uint8_t, cShiftArg, 2);
3171 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
3172 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3173
3174 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3175 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3176 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
3177 IEM_MC_FETCH_EFLAGS(EFlags);
3178 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3179 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
3180
3181 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3182 IEM_MC_COMMIT_EFLAGS(EFlags);
3183 IEM_MC_ADVANCE_RIP();
3184 IEM_MC_END();
3185 return VINF_SUCCESS;
3186
3187 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3188 }
3189 }
3190}
3191
3192
3193
3194/** Opcode 0x0f 0xa4. */
3195FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
3196{
3197 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
3198 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
3199}
3200
3201
3202/** Opcode 0x0f 0xa7. */
3203FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
3204{
3205 IEMOP_MNEMONIC("shld Ev,Gv,CL");
3206 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
3207}
3208
3209
3210/** Opcode 0x0f 0xa8. */
3211FNIEMOP_DEF(iemOp_push_gs)
3212{
3213 IEMOP_MNEMONIC("push gs");
3214 IEMOP_HLP_NO_LOCK_PREFIX();
3215 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
3216}
3217
3218
3219/** Opcode 0x0f 0xa9. */
3220FNIEMOP_DEF(iemOp_pop_gs)
3221{
3222 IEMOP_MNEMONIC("pop gs");
3223 IEMOP_HLP_NO_LOCK_PREFIX();
3224 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
3225}
3226
3227
3228/** Opcode 0x0f 0xaa. */
3229FNIEMOP_STUB(iemOp_rsm);
3230
3231
3232/** Opcode 0x0f 0xab. */
3233FNIEMOP_DEF(iemOp_bts_Ev_Gv)
3234{
3235 IEMOP_MNEMONIC("bts Gv,Mp");
3236 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
3237}
3238
3239
3240/** Opcode 0x0f 0xac. */
3241FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
3242{
3243 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
3244 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
3245}
3246
3247
3248/** Opcode 0x0f 0xad. */
3249FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
3250{
3251 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
3252 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
3253}
3254
3255
3256/** Opcode 0x0f 0xae. */
3257FNIEMOP_STUB(iemOp_Grp15);
3258
3259
3260/** Opcode 0x0f 0xaf. */
3261FNIEMOP_DEF(iemOp_imul_Gv_Ev)
3262{
3263 IEMOP_MNEMONIC("imul Gv,Ev");
3264 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3265 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
3266}
3267
3268
3269/** Opcode 0x0f 0xb0. */
3270FNIEMOP_STUB(iemOp_cmpxchg_Eb_Gb);
3271/** Opcode 0x0f 0xb1. */
3272FNIEMOP_STUB(iemOp_cmpxchg_Ev_Gv);
3273
3274
3275FNIEMOP_DEF_1(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg)
3276{
3277 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3278 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3279
3280 /* The source cannot be a register. */
3281 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3282 return IEMOP_RAISE_INVALID_OPCODE();
3283 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
3284
3285 switch (pIemCpu->enmEffOpSize)
3286 {
3287 case IEMMODE_16BIT:
3288 IEM_MC_BEGIN(5, 1);
3289 IEM_MC_ARG(uint16_t, uSel, 0);
3290 IEM_MC_ARG(uint16_t, offSeg, 1);
3291 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3292 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3293 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3294 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3295 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3296 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3297 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
3298 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3299 IEM_MC_END();
3300 return VINF_SUCCESS;
3301
3302 case IEMMODE_32BIT:
3303 IEM_MC_BEGIN(5, 1);
3304 IEM_MC_ARG(uint16_t, uSel, 0);
3305 IEM_MC_ARG(uint32_t, offSeg, 1);
3306 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3307 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3308 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3309 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3310 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3311 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3312 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
3313 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3314 IEM_MC_END();
3315 return VINF_SUCCESS;
3316
3317 case IEMMODE_64BIT:
3318 IEM_MC_BEGIN(5, 1);
3319 IEM_MC_ARG(uint16_t, uSel, 0);
3320 IEM_MC_ARG(uint64_t, offSeg, 1);
3321 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
3322 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
3323 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
3324 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
3325 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm);
3326 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
3327 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
3328 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
3329 IEM_MC_END();
3330 return VINF_SUCCESS;
3331
3332 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3333 }
3334}
3335
3336
3337/** Opcode 0x0f 0xb2. */
3338FNIEMOP_DEF(iemOp_lss_Gv_Mp)
3339{
3340 IEMOP_MNEMONIC("lss Gv,Mp");
3341 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_SS);
3342}
3343
3344
3345/** Opcode 0x0f 0xb3. */
3346FNIEMOP_DEF(iemOp_btr_Ev_Gv)
3347{
3348 IEMOP_MNEMONIC("btr Gv,Mp");
3349 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
3350}
3351
3352
3353/** Opcode 0x0f 0xb4. */
3354FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
3355{
3356 IEMOP_MNEMONIC("lfs Gv,Mp");
3357 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_FS);
3358}
3359
3360
3361/** Opcode 0x0f 0xb5. */
3362FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
3363{
3364 IEMOP_MNEMONIC("lgs Gv,Mp");
3365 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_GS);
3366}
3367
3368
3369/** Opcode 0x0f 0xb6. */
3370FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
3371{
3372 IEMOP_MNEMONIC("movzx Gv,Eb");
3373
3374 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3375 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3376
3377 /*
3378 * If rm is denoting a register, no more instruction bytes.
3379 */
3380 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3381 {
3382 switch (pIemCpu->enmEffOpSize)
3383 {
3384 case IEMMODE_16BIT:
3385 IEM_MC_BEGIN(0, 1);
3386 IEM_MC_LOCAL(uint16_t, u16Value);
3387 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3388 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3389 IEM_MC_ADVANCE_RIP();
3390 IEM_MC_END();
3391 return VINF_SUCCESS;
3392
3393 case IEMMODE_32BIT:
3394 IEM_MC_BEGIN(0, 1);
3395 IEM_MC_LOCAL(uint32_t, u32Value);
3396 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3397 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3398 IEM_MC_ADVANCE_RIP();
3399 IEM_MC_END();
3400 return VINF_SUCCESS;
3401
3402 case IEMMODE_64BIT:
3403 IEM_MC_BEGIN(0, 1);
3404 IEM_MC_LOCAL(uint64_t, u64Value);
3405 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3406 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3407 IEM_MC_ADVANCE_RIP();
3408 IEM_MC_END();
3409 return VINF_SUCCESS;
3410
3411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3412 }
3413 }
3414 else
3415 {
3416 /*
3417 * We're loading a register from memory.
3418 */
3419 switch (pIemCpu->enmEffOpSize)
3420 {
3421 case IEMMODE_16BIT:
3422 IEM_MC_BEGIN(0, 2);
3423 IEM_MC_LOCAL(uint16_t, u16Value);
3424 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3425 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3426 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3427 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3428 IEM_MC_ADVANCE_RIP();
3429 IEM_MC_END();
3430 return VINF_SUCCESS;
3431
3432 case IEMMODE_32BIT:
3433 IEM_MC_BEGIN(0, 2);
3434 IEM_MC_LOCAL(uint32_t, u32Value);
3435 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3436 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3437 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3438 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3439 IEM_MC_ADVANCE_RIP();
3440 IEM_MC_END();
3441 return VINF_SUCCESS;
3442
3443 case IEMMODE_64BIT:
3444 IEM_MC_BEGIN(0, 2);
3445 IEM_MC_LOCAL(uint64_t, u64Value);
3446 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3447 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3448 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3449 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3450 IEM_MC_ADVANCE_RIP();
3451 IEM_MC_END();
3452 return VINF_SUCCESS;
3453
3454 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3455 }
3456 }
3457}
3458
3459
3460/** Opcode 0x0f 0xb7. */
3461FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
3462{
3463 IEMOP_MNEMONIC("movzx Gv,Ew");
3464
3465 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3466 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3467
3468 /** @todo Not entirely sure how the operand size prefix is handled here,
3469 * assuming that it will be ignored. Would be nice to have a few
3470 * test for this. */
3471 /*
3472 * If rm is denoting a register, no more instruction bytes.
3473 */
3474 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3475 {
3476 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3477 {
3478 IEM_MC_BEGIN(0, 1);
3479 IEM_MC_LOCAL(uint32_t, u32Value);
3480 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3481 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3482 IEM_MC_ADVANCE_RIP();
3483 IEM_MC_END();
3484 }
3485 else
3486 {
3487 IEM_MC_BEGIN(0, 1);
3488 IEM_MC_LOCAL(uint64_t, u64Value);
3489 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3490 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3491 IEM_MC_ADVANCE_RIP();
3492 IEM_MC_END();
3493 }
3494 }
3495 else
3496 {
3497 /*
3498 * We're loading a register from memory.
3499 */
3500 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3501 {
3502 IEM_MC_BEGIN(0, 2);
3503 IEM_MC_LOCAL(uint32_t, u32Value);
3504 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3505 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3506 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3507 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3508 IEM_MC_ADVANCE_RIP();
3509 IEM_MC_END();
3510 }
3511 else
3512 {
3513 IEM_MC_BEGIN(0, 2);
3514 IEM_MC_LOCAL(uint64_t, u64Value);
3515 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3516 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3517 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3518 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3519 IEM_MC_ADVANCE_RIP();
3520 IEM_MC_END();
3521 }
3522 }
3523 return VINF_SUCCESS;
3524}
3525
3526
3527/** Opcode 0x0f 0xb8. */
3528FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
3529/** Opcode 0x0f 0xb9. */
3530FNIEMOP_STUB(iemOp_Grp10);
3531
3532
3533/** Opcode 0x0f 0xba. */
3534FNIEMOP_DEF(iemOp_Grp8)
3535{
3536 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3537 PCIEMOPBINSIZES pImpl;
3538 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
3539 {
3540 case 0: case 1: case 2: case 3:
3541 return IEMOP_RAISE_INVALID_OPCODE();
3542 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
3543 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
3544 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
3545 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
3546 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3547 }
3548 IEMOP_HLP_NO_LOCK_PREFIX();
3549 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
3550
3551 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3552 {
3553 /* register destination. */
3554 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3555 IEMOP_HLP_NO_LOCK_PREFIX();
3556
3557 switch (pIemCpu->enmEffOpSize)
3558 {
3559 case IEMMODE_16BIT:
3560 IEM_MC_BEGIN(3, 0);
3561 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3562 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
3563 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3564
3565 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3566 IEM_MC_REF_EFLAGS(pEFlags);
3567 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3568
3569 IEM_MC_ADVANCE_RIP();
3570 IEM_MC_END();
3571 return VINF_SUCCESS;
3572
3573 case IEMMODE_32BIT:
3574 IEM_MC_BEGIN(3, 0);
3575 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3576 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
3577 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3578
3579 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3580 IEM_MC_REF_EFLAGS(pEFlags);
3581 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3582
3583 IEM_MC_ADVANCE_RIP();
3584 IEM_MC_END();
3585 return VINF_SUCCESS;
3586
3587 case IEMMODE_64BIT:
3588 IEM_MC_BEGIN(3, 0);
3589 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3590 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
3591 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3592
3593 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3594 IEM_MC_REF_EFLAGS(pEFlags);
3595 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3596
3597 IEM_MC_ADVANCE_RIP();
3598 IEM_MC_END();
3599 return VINF_SUCCESS;
3600
3601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3602 }
3603 }
3604 else
3605 {
3606 /* memory destination. */
3607
3608 uint32_t fAccess;
3609 if (pImpl->pfnLockedU16)
3610 fAccess = IEM_ACCESS_DATA_RW;
3611 else /* BT */
3612 {
3613 IEMOP_HLP_NO_LOCK_PREFIX();
3614 fAccess = IEM_ACCESS_DATA_R;
3615 }
3616
3617 /** @todo test negative bit offsets! */
3618 switch (pIemCpu->enmEffOpSize)
3619 {
3620 case IEMMODE_16BIT:
3621 IEM_MC_BEGIN(3, 1);
3622 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3623 IEM_MC_ARG(uint16_t, u16Src, 1);
3624 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3625 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3626
3627 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3628 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3629 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
3630 IEM_MC_FETCH_EFLAGS(EFlags);
3631 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3632 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3633 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
3634 else
3635 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
3636 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
3637
3638 IEM_MC_COMMIT_EFLAGS(EFlags);
3639 IEM_MC_ADVANCE_RIP();
3640 IEM_MC_END();
3641 return VINF_SUCCESS;
3642
3643 case IEMMODE_32BIT:
3644 IEM_MC_BEGIN(3, 1);
3645 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3646 IEM_MC_ARG(uint32_t, u32Src, 1);
3647 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3648 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3649
3650 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3651 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3652 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
3653 IEM_MC_FETCH_EFLAGS(EFlags);
3654 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3655 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3656 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
3657 else
3658 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
3659 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
3660
3661 IEM_MC_COMMIT_EFLAGS(EFlags);
3662 IEM_MC_ADVANCE_RIP();
3663 IEM_MC_END();
3664 return VINF_SUCCESS;
3665
3666 case IEMMODE_64BIT:
3667 IEM_MC_BEGIN(3, 1);
3668 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3669 IEM_MC_ARG(uint64_t, u64Src, 1);
3670 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
3671 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3672
3673 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3674 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
3675 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
3676 IEM_MC_FETCH_EFLAGS(EFlags);
3677 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
3678 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3679 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
3680 else
3681 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
3682 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
3683
3684 IEM_MC_COMMIT_EFLAGS(EFlags);
3685 IEM_MC_ADVANCE_RIP();
3686 IEM_MC_END();
3687 return VINF_SUCCESS;
3688
3689 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3690 }
3691 }
3692
3693}
3694
3695
3696/** Opcode 0x0f 0xbb. */
3697FNIEMOP_DEF(iemOp_btc_Ev_Gv)
3698{
3699 IEMOP_MNEMONIC("btc Gv,Mp");
3700 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
3701}
3702
3703
3704/** Opcode 0x0f 0xbc. */
3705FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
3706{
3707 IEMOP_MNEMONIC("bsf Gv,Ev");
3708 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3709 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
3710}
3711
3712
3713/** Opcode 0x0f 0xbd. */
3714FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
3715{
3716 IEMOP_MNEMONIC("bsr Gv,Ev");
3717 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
3718 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
3719}
3720
3721
3722/** Opcode 0x0f 0xbe. */
3723FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
3724{
3725 IEMOP_MNEMONIC("movsx Gv,Eb");
3726
3727 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3728 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3729
3730 /*
3731 * If rm is denoting a register, no more instruction bytes.
3732 */
3733 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3734 {
3735 switch (pIemCpu->enmEffOpSize)
3736 {
3737 case IEMMODE_16BIT:
3738 IEM_MC_BEGIN(0, 1);
3739 IEM_MC_LOCAL(uint16_t, u16Value);
3740 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3741 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3742 IEM_MC_ADVANCE_RIP();
3743 IEM_MC_END();
3744 return VINF_SUCCESS;
3745
3746 case IEMMODE_32BIT:
3747 IEM_MC_BEGIN(0, 1);
3748 IEM_MC_LOCAL(uint32_t, u32Value);
3749 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3750 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3751 IEM_MC_ADVANCE_RIP();
3752 IEM_MC_END();
3753 return VINF_SUCCESS;
3754
3755 case IEMMODE_64BIT:
3756 IEM_MC_BEGIN(0, 1);
3757 IEM_MC_LOCAL(uint64_t, u64Value);
3758 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3759 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3760 IEM_MC_ADVANCE_RIP();
3761 IEM_MC_END();
3762 return VINF_SUCCESS;
3763
3764 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3765 }
3766 }
3767 else
3768 {
3769 /*
3770 * We're loading a register from memory.
3771 */
3772 switch (pIemCpu->enmEffOpSize)
3773 {
3774 case IEMMODE_16BIT:
3775 IEM_MC_BEGIN(0, 2);
3776 IEM_MC_LOCAL(uint16_t, u16Value);
3777 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3778 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3779 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
3780 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
3781 IEM_MC_ADVANCE_RIP();
3782 IEM_MC_END();
3783 return VINF_SUCCESS;
3784
3785 case IEMMODE_32BIT:
3786 IEM_MC_BEGIN(0, 2);
3787 IEM_MC_LOCAL(uint32_t, u32Value);
3788 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3789 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3790 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3791 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3792 IEM_MC_ADVANCE_RIP();
3793 IEM_MC_END();
3794 return VINF_SUCCESS;
3795
3796 case IEMMODE_64BIT:
3797 IEM_MC_BEGIN(0, 2);
3798 IEM_MC_LOCAL(uint64_t, u64Value);
3799 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3800 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3801 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3802 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3803 IEM_MC_ADVANCE_RIP();
3804 IEM_MC_END();
3805 return VINF_SUCCESS;
3806
3807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3808 }
3809 }
3810}
3811
3812
3813/** Opcode 0x0f 0xbf. */
3814FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
3815{
3816 IEMOP_MNEMONIC("movsx Gv,Ew");
3817
3818 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3819 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
3820
3821 /** @todo Not entirely sure how the operand size prefix is handled here,
3822 * assuming that it will be ignored. Would be nice to have a few
3823 * test for this. */
3824 /*
3825 * If rm is denoting a register, no more instruction bytes.
3826 */
3827 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3828 {
3829 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3830 {
3831 IEM_MC_BEGIN(0, 1);
3832 IEM_MC_LOCAL(uint32_t, u32Value);
3833 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3834 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3835 IEM_MC_ADVANCE_RIP();
3836 IEM_MC_END();
3837 }
3838 else
3839 {
3840 IEM_MC_BEGIN(0, 1);
3841 IEM_MC_LOCAL(uint64_t, u64Value);
3842 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3843 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3844 IEM_MC_ADVANCE_RIP();
3845 IEM_MC_END();
3846 }
3847 }
3848 else
3849 {
3850 /*
3851 * We're loading a register from memory.
3852 */
3853 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
3854 {
3855 IEM_MC_BEGIN(0, 2);
3856 IEM_MC_LOCAL(uint32_t, u32Value);
3857 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3858 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3859 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
3860 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
3861 IEM_MC_ADVANCE_RIP();
3862 IEM_MC_END();
3863 }
3864 else
3865 {
3866 IEM_MC_BEGIN(0, 2);
3867 IEM_MC_LOCAL(uint64_t, u64Value);
3868 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3869 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3870 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
3871 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
3872 IEM_MC_ADVANCE_RIP();
3873 IEM_MC_END();
3874 }
3875 }
3876 return VINF_SUCCESS;
3877}
3878
3879
3880/** Opcode 0x0f 0xc0. */
3881FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
3882{
3883 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3884 IEMOP_MNEMONIC("xadd Eb,Gb");
3885
3886 /*
3887 * If rm is denoting a register, no more instruction bytes.
3888 */
3889 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3890 {
3891 IEMOP_HLP_NO_LOCK_PREFIX();
3892
3893 IEM_MC_BEGIN(3, 0);
3894 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3895 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3896 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3897
3898 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3899 IEM_MC_REF_GREG_U8(pu8Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3900 IEM_MC_REF_EFLAGS(pEFlags);
3901 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3902
3903 IEM_MC_ADVANCE_RIP();
3904 IEM_MC_END();
3905 }
3906 else
3907 {
3908 /*
3909 * We're accessing memory.
3910 */
3911 IEM_MC_BEGIN(3, 3);
3912 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
3913 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
3914 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
3915 IEM_MC_LOCAL(uint8_t, u8RegCopy);
3916 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3917
3918 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
3919 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
3920 IEM_MC_FETCH_GREG_U8(u8RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3921 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
3922 IEM_MC_FETCH_EFLAGS(EFlags);
3923 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
3924 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
3925 else
3926 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
3927
3928 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
3929 IEM_MC_COMMIT_EFLAGS(EFlags);
3930 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8RegCopy);
3931 IEM_MC_ADVANCE_RIP();
3932 IEM_MC_END();
3933 return VINF_SUCCESS;
3934 }
3935 return VINF_SUCCESS;
3936}
3937
3938
3939/** Opcode 0x0f 0xc1. */
3940FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
3941{
3942 IEMOP_MNEMONIC("xadd Ev,Gv");
3943 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3944
3945 /*
3946 * If rm is denoting a register, no more instruction bytes.
3947 */
3948 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3949 {
3950 IEMOP_HLP_NO_LOCK_PREFIX();
3951
3952 switch (pIemCpu->enmEffOpSize)
3953 {
3954 case IEMMODE_16BIT:
3955 IEM_MC_BEGIN(3, 0);
3956 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
3957 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
3958 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3959
3960 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3961 IEM_MC_REF_GREG_U16(pu16Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3962 IEM_MC_REF_EFLAGS(pEFlags);
3963 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
3964
3965 IEM_MC_ADVANCE_RIP();
3966 IEM_MC_END();
3967 return VINF_SUCCESS;
3968
3969 case IEMMODE_32BIT:
3970 IEM_MC_BEGIN(3, 0);
3971 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
3972 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
3973 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3974
3975 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3976 IEM_MC_REF_GREG_U32(pu32Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3977 IEM_MC_REF_EFLAGS(pEFlags);
3978 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
3979
3980 IEM_MC_ADVANCE_RIP();
3981 IEM_MC_END();
3982 return VINF_SUCCESS;
3983
3984 case IEMMODE_64BIT:
3985 IEM_MC_BEGIN(3, 0);
3986 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
3987 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
3988 IEM_MC_ARG(uint32_t *, pEFlags, 2);
3989
3990 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3991 IEM_MC_REF_GREG_U64(pu64Reg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
3992 IEM_MC_REF_EFLAGS(pEFlags);
3993 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
3994
3995 IEM_MC_ADVANCE_RIP();
3996 IEM_MC_END();
3997 return VINF_SUCCESS;
3998
3999 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4000 }
4001 }
4002 else
4003 {
4004 /*
4005 * We're accessing memory.
4006 */
4007 switch (pIemCpu->enmEffOpSize)
4008 {
4009 case IEMMODE_16BIT:
4010 IEM_MC_BEGIN(3, 3);
4011 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4012 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
4013 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4014 IEM_MC_LOCAL(uint16_t, u16RegCopy);
4015 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4016
4017 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4018 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4019 IEM_MC_FETCH_GREG_U16(u16RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4020 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
4021 IEM_MC_FETCH_EFLAGS(EFlags);
4022 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4023 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
4024 else
4025 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
4026
4027 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4028 IEM_MC_COMMIT_EFLAGS(EFlags);
4029 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16RegCopy);
4030 IEM_MC_ADVANCE_RIP();
4031 IEM_MC_END();
4032 return VINF_SUCCESS;
4033
4034 case IEMMODE_32BIT:
4035 IEM_MC_BEGIN(3, 3);
4036 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4037 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
4038 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4039 IEM_MC_LOCAL(uint32_t, u32RegCopy);
4040 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4041
4042 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4043 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4044 IEM_MC_FETCH_GREG_U32(u32RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4045 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
4046 IEM_MC_FETCH_EFLAGS(EFlags);
4047 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4048 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
4049 else
4050 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
4051
4052 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4053 IEM_MC_COMMIT_EFLAGS(EFlags);
4054 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32RegCopy);
4055 IEM_MC_ADVANCE_RIP();
4056 IEM_MC_END();
4057 return VINF_SUCCESS;
4058
4059 case IEMMODE_64BIT:
4060 IEM_MC_BEGIN(3, 3);
4061 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4062 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
4063 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
4064 IEM_MC_LOCAL(uint64_t, u64RegCopy);
4065 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4066
4067 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
4068 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
4069 IEM_MC_FETCH_GREG_U64(u64RegCopy, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4070 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
4071 IEM_MC_FETCH_EFLAGS(EFlags);
4072 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4073 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
4074 else
4075 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
4076
4077 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4078 IEM_MC_COMMIT_EFLAGS(EFlags);
4079 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64RegCopy);
4080 IEM_MC_ADVANCE_RIP();
4081 IEM_MC_END();
4082 return VINF_SUCCESS;
4083
4084 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4085 }
4086 }
4087}
4088
4089/** Opcode 0x0f 0xc2. */
4090FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
4091/** Opcode 0x0f 0xc3. */
4092FNIEMOP_STUB(iemOp_movnti_My_Gy);
4093/** Opcode 0x0f 0xc4. */
4094FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
4095/** Opcode 0x0f 0xc5. */
4096FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
4097/** Opcode 0x0f 0xc6. */
4098FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
4099/** Opcode 0x0f 0xc7. */
4100FNIEMOP_STUB(iemOp_Grp9);
4101/** Opcode 0x0f 0xc8. */
4102FNIEMOP_STUB(iemOp_bswap_rAX_r8);
4103/** Opcode 0x0f 0xc9. */
4104FNIEMOP_STUB(iemOp_bswap_rCX_r9);
4105/** Opcode 0x0f 0xca. */
4106FNIEMOP_STUB(iemOp_bswap_rDX_r10);
4107/** Opcode 0x0f 0xcb. */
4108FNIEMOP_STUB(iemOp_bswap_rBX_r11);
4109/** Opcode 0x0f 0xcc. */
4110FNIEMOP_STUB(iemOp_bswap_rSP_r12);
4111/** Opcode 0x0f 0xcd. */
4112FNIEMOP_STUB(iemOp_bswap_rBP_r13);
4113/** Opcode 0x0f 0xce. */
4114FNIEMOP_STUB(iemOp_bswap_rSI_r14);
4115/** Opcode 0x0f 0xcf. */
4116FNIEMOP_STUB(iemOp_bswap_rDI_r15);
4117/** Opcode 0x0f 0xd0. */
4118FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
4119/** Opcode 0x0f 0xd1. */
4120FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
4121/** Opcode 0x0f 0xd2. */
4122FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
4123/** Opcode 0x0f 0xd3. */
4124FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
4125/** Opcode 0x0f 0xd4. */
4126FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
4127/** Opcode 0x0f 0xd5. */
4128FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
4129/** Opcode 0x0f 0xd6. */
4130FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
4131/** Opcode 0x0f 0xd7. */
4132FNIEMOP_STUB(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq);
4133/** Opcode 0x0f 0xd8. */
4134FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
4135/** Opcode 0x0f 0xd9. */
4136FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
4137/** Opcode 0x0f 0xda. */
4138FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
4139/** Opcode 0x0f 0xdb. */
4140FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
4141/** Opcode 0x0f 0xdc. */
4142FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
4143/** Opcode 0x0f 0xdd. */
4144FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
4145/** Opcode 0x0f 0xde. */
4146FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
4147/** Opcode 0x0f 0xdf. */
4148FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
4149/** Opcode 0x0f 0xe0. */
4150FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
4151/** Opcode 0x0f 0xe1. */
4152FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
4153/** Opcode 0x0f 0xe2. */
4154FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
4155/** Opcode 0x0f 0xe3. */
4156FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
4157/** Opcode 0x0f 0xe4. */
4158FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
4159/** Opcode 0x0f 0xe5. */
4160FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
4161/** Opcode 0x0f 0xe6. */
4162FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
4163/** Opcode 0x0f 0xe7. */
4164FNIEMOP_STUB(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq);
4165/** Opcode 0x0f 0xe8. */
4166FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
4167/** Opcode 0x0f 0xe9. */
4168FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
4169/** Opcode 0x0f 0xea. */
4170FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
4171/** Opcode 0x0f 0xeb. */
4172FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
4173/** Opcode 0x0f 0xec. */
4174FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
4175/** Opcode 0x0f 0xed. */
4176FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
4177/** Opcode 0x0f 0xee. */
4178FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
4179/** Opcode 0x0f 0xef. */
4180FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq);
4181/** Opcode 0x0f 0xf0. */
4182FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
4183/** Opcode 0x0f 0xf1. */
4184FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
4185/** Opcode 0x0f 0xf2. */
4186FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
4187/** Opcode 0x0f 0xf3. */
4188FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
4189/** Opcode 0x0f 0xf4. */
4190FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
4191/** Opcode 0x0f 0xf5. */
4192FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
4193/** Opcode 0x0f 0xf6. */
4194FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
4195/** Opcode 0x0f 0xf7. */
4196FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
4197/** Opcode 0x0f 0xf8. */
4198FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq);
4199/** Opcode 0x0f 0xf9. */
4200FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
4201/** Opcode 0x0f 0xfa. */
4202FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
4203/** Opcode 0x0f 0xfb. */
4204FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
4205/** Opcode 0x0f 0xfc. */
4206FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
4207/** Opcode 0x0f 0xfd. */
4208FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
4209/** Opcode 0x0f 0xfe. */
4210FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
4211
4212
4213const PFNIEMOP g_apfnTwoByteMap[256] =
4214{
4215 /* 0x00 */ iemOp_Grp6, iemOp_Grp7, iemOp_lar_Gv_Ew, iemOp_lsl_Gv_Ew,
4216 /* 0x04 */ iemOp_Invalid, iemOp_syscall, iemOp_clts, iemOp_sysret,
4217 /* 0x08 */ iemOp_invd, iemOp_wbinvd, iemOp_Invalid, iemOp_ud2,
4218 /* 0x0c */ iemOp_Invalid, iemOp_nop_Ev_prefetch, iemOp_femms, iemOp_3Dnow,
4219 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
4220 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
4221 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
4222 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
4223 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
4224 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
4225 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
4226 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
4227 /* 0x18 */ iemOp_prefetch_Grp16, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4228 /* 0x1c */ iemOp_Invalid, iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4229 /* 0x20 */ iemOp_mov_Rd_Cd, iemOp_mov_Rd_Dd, iemOp_mov_Cd_Rd, iemOp_mov_Dd_Rd,
4230 /* 0x24 */ iemOp_mov_Rd_Td, iemOp_Invalid, iemOp_mov_Td_Rd, iemOp_Invalid,
4231 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
4232 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
4233 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
4234 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
4235 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
4236 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
4237 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
4238 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
4239 /* 0x30 */ iemOp_wrmsr, iemOp_rdtsc, iemOp_rdmsr, iemOp_rdpmc,
4240 /* 0x34 */ iemOp_sysenter, iemOp_sysexit, iemOp_Invalid, iemOp_getsec,
4241 /* 0x38 */ iemOp_3byte_Esc_A4, iemOp_Invalid, iemOp_3byte_Esc_A5, iemOp_Invalid,
4242 /* 0x3c */ iemOp_movnti_Gv_Ev/*?*/,iemOp_Invalid, iemOp_Invalid, iemOp_Invalid,
4243 /* 0x40 */ iemOp_cmovo_Gv_Ev, iemOp_cmovno_Gv_Ev, iemOp_cmovc_Gv_Ev, iemOp_cmovnc_Gv_Ev,
4244 /* 0x44 */ iemOp_cmove_Gv_Ev, iemOp_cmovne_Gv_Ev, iemOp_cmovbe_Gv_Ev, iemOp_cmovnbe_Gv_Ev,
4245 /* 0x48 */ iemOp_cmovs_Gv_Ev, iemOp_cmovns_Gv_Ev, iemOp_cmovp_Gv_Ev, iemOp_cmovnp_Gv_Ev,
4246 /* 0x4c */ iemOp_cmovl_Gv_Ev, iemOp_cmovnl_Gv_Ev, iemOp_cmovle_Gv_Ev, iemOp_cmovnle_Gv_Ev,
4247 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
4248 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
4249 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
4250 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
4251 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
4252 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
4253 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
4254 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
4255 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
4256 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
4257 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
4258 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
4259 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
4260 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
4261 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
4262 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
4263 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
4264 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
4265 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
4266 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
4267 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
4268 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
4269 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
4270 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
4271 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
4272 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
4273 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
4274 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
4275 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
4276 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
4277 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
4278 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
4279 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
4280 /* 0x71 */ iemOp_Grp12,
4281 /* 0x72 */ iemOp_Grp13,
4282 /* 0x73 */ iemOp_Grp14,
4283 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
4284 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
4285 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
4286 /* 0x77 */ iemOp_emms,
4287 /* 0x78 */ iemOp_vmread, iemOp_vmwrite, iemOp_Invalid, iemOp_Invalid,
4288 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
4289 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
4290 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
4291 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
4292 /* 0x80 */ iemOp_jo_Jv, iemOp_jno_Jv, iemOp_jc_Jv, iemOp_jnc_Jv,
4293 /* 0x84 */ iemOp_je_Jv, iemOp_jne_Jv, iemOp_jbe_Jv, iemOp_jnbe_Jv,
4294 /* 0x88 */ iemOp_js_Jv, iemOp_jns_Jv, iemOp_jp_Jv, iemOp_jnp_Jv,
4295 /* 0x8c */ iemOp_jl_Jv, iemOp_jnl_Jv, iemOp_jle_Jv, iemOp_jnle_Jv,
4296 /* 0x90 */ iemOp_seto_Eb, iemOp_setno_Eb, iemOp_setc_Eb, iemOp_setnc_Eb,
4297 /* 0x94 */ iemOp_sete_Eb, iemOp_setne_Eb, iemOp_setbe_Eb, iemOp_setnbe_Eb,
4298 /* 0x98 */ iemOp_sets_Eb, iemOp_setns_Eb, iemOp_setp_Eb, iemOp_setnp_Eb,
4299 /* 0x9c */ iemOp_setl_Eb, iemOp_setnl_Eb, iemOp_setle_Eb, iemOp_setnle_Eb,
4300 /* 0xa0 */ iemOp_push_fs, iemOp_pop_fs, iemOp_cpuid, iemOp_bt_Ev_Gv,
4301 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib, iemOp_shld_Ev_Gv_CL, iemOp_Invalid, iemOp_Invalid,
4302 /* 0xa8 */ iemOp_push_gs, iemOp_pop_gs, iemOp_rsm, iemOp_bts_Ev_Gv,
4303 /* 0xac */ iemOp_shrd_Ev_Gv_Ib, iemOp_shrd_Ev_Gv_CL, iemOp_Grp15, iemOp_imul_Gv_Ev,
4304 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb, iemOp_cmpxchg_Ev_Gv, iemOp_lss_Gv_Mp, iemOp_btr_Ev_Gv,
4305 /* 0xb4 */ iemOp_lfs_Gv_Mp, iemOp_lgs_Gv_Mp, iemOp_movzx_Gv_Eb, iemOp_movzx_Gv_Ew,
4306 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,iemOp_Grp10, iemOp_Grp8, iemOp_btc_Ev_Gv,
4307 /* 0xbc */ iemOp_bsf_Gv_Ev, iemOp_bsr_Gv_Ev, iemOp_movsx_Gv_Eb, iemOp_movsx_Gv_Ew,
4308 /* 0xc0 */ iemOp_xadd_Eb_Gb,
4309 /* 0xc1 */ iemOp_xadd_Ev_Gv,
4310 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
4311 /* 0xc3 */ iemOp_movnti_My_Gy,
4312 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
4313 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
4314 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
4315 /* 0xc7 */ iemOp_Grp9,
4316 /* 0xc8 */ iemOp_bswap_rAX_r8, iemOp_bswap_rCX_r9, iemOp_bswap_rDX_r10, iemOp_bswap_rBX_r11,
4317 /* 0xcc */ iemOp_bswap_rSP_r12, iemOp_bswap_rBP_r13, iemOp_bswap_rSI_r14, iemOp_bswap_rDI_r15,
4318 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
4319 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
4320 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
4321 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
4322 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
4323 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
4324 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
4325 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
4326 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
4327 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
4328 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
4329 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
4330 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
4331 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
4332 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
4333 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
4334 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
4335 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
4336 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
4337 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
4338 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
4339 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
4340 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
4341 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
4342 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
4343 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
4344 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
4345 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
4346 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
4347 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
4348 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
4349 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
4350 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
4351 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
4352 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
4353 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
4354 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
4355 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
4356 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
4357 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
4358 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
4359 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
4360 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
4361 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
4362 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
4363 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
4364 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
4365 /* 0xff */ iemOp_Invalid
4366};
4367
4368/** @} */
4369
4370
4371/** @name One byte opcodes.
4372 *
4373 * @{
4374 */
4375
4376/** Opcode 0x00. */
4377FNIEMOP_DEF(iemOp_add_Eb_Gb)
4378{
4379 IEMOP_MNEMONIC("add Eb,Gb");
4380 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
4381}
4382
4383
4384/** Opcode 0x01. */
4385FNIEMOP_DEF(iemOp_add_Ev_Gv)
4386{
4387 IEMOP_MNEMONIC("add Ev,Gv");
4388 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
4389}
4390
4391
4392/** Opcode 0x02. */
4393FNIEMOP_DEF(iemOp_add_Gb_Eb)
4394{
4395 IEMOP_MNEMONIC("add Gb,Eb");
4396 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
4397}
4398
4399
4400/** Opcode 0x03. */
4401FNIEMOP_DEF(iemOp_add_Gv_Ev)
4402{
4403 IEMOP_MNEMONIC("add Gv,Ev");
4404 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
4405}
4406
4407
4408/** Opcode 0x04. */
4409FNIEMOP_DEF(iemOp_add_Al_Ib)
4410{
4411 IEMOP_MNEMONIC("add al,Ib");
4412 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
4413}
4414
4415
4416/** Opcode 0x05. */
4417FNIEMOP_DEF(iemOp_add_eAX_Iz)
4418{
4419 IEMOP_MNEMONIC("add rAX,Iz");
4420 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
4421}
4422
4423
4424/** Opcode 0x06. */
4425FNIEMOP_DEF(iemOp_push_ES)
4426{
4427 IEMOP_MNEMONIC("push es");
4428 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
4429}
4430
4431
4432/** Opcode 0x07. */
4433FNIEMOP_DEF(iemOp_pop_ES)
4434{
4435 IEMOP_MNEMONIC("pop es");
4436 IEMOP_HLP_NO_64BIT();
4437 IEMOP_HLP_NO_LOCK_PREFIX();
4438 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
4439}
4440
4441
4442/** Opcode 0x08. */
4443FNIEMOP_DEF(iemOp_or_Eb_Gb)
4444{
4445 IEMOP_MNEMONIC("or Eb,Gb");
4446 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4447 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
4448}
4449
4450
4451/** Opcode 0x09. */
4452FNIEMOP_DEF(iemOp_or_Ev_Gv)
4453{
4454 IEMOP_MNEMONIC("or Ev,Gv ");
4455 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4456 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
4457}
4458
4459
4460/** Opcode 0x0a. */
4461FNIEMOP_DEF(iemOp_or_Gb_Eb)
4462{
4463 IEMOP_MNEMONIC("or Gb,Eb");
4464 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4465 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
4466}
4467
4468
4469/** Opcode 0x0b. */
4470FNIEMOP_DEF(iemOp_or_Gv_Ev)
4471{
4472 IEMOP_MNEMONIC("or Gv,Ev");
4473 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4474 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
4475}
4476
4477
4478/** Opcode 0x0c. */
4479FNIEMOP_DEF(iemOp_or_Al_Ib)
4480{
4481 IEMOP_MNEMONIC("or al,Ib");
4482 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4483 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
4484}
4485
4486
4487/** Opcode 0x0d. */
4488FNIEMOP_DEF(iemOp_or_eAX_Iz)
4489{
4490 IEMOP_MNEMONIC("or rAX,Iz");
4491 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4492 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
4493}
4494
4495
4496/** Opcode 0x0e. */
4497FNIEMOP_DEF(iemOp_push_CS)
4498{
4499 IEMOP_MNEMONIC("push cs");
4500 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
4501}
4502
4503
4504/** Opcode 0x0f. */
4505FNIEMOP_DEF(iemOp_2byteEscape)
4506{
4507 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4508 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
4509}
4510
4511/** Opcode 0x10. */
4512FNIEMOP_DEF(iemOp_adc_Eb_Gb)
4513{
4514 IEMOP_MNEMONIC("adc Eb,Gb");
4515 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
4516}
4517
4518
4519/** Opcode 0x11. */
4520FNIEMOP_DEF(iemOp_adc_Ev_Gv)
4521{
4522 IEMOP_MNEMONIC("adc Ev,Gv");
4523 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
4524}
4525
4526
4527/** Opcode 0x12. */
4528FNIEMOP_DEF(iemOp_adc_Gb_Eb)
4529{
4530 IEMOP_MNEMONIC("adc Gb,Eb");
4531 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
4532}
4533
4534
4535/** Opcode 0x13. */
4536FNIEMOP_DEF(iemOp_adc_Gv_Ev)
4537{
4538 IEMOP_MNEMONIC("adc Gv,Ev");
4539 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
4540}
4541
4542
4543/** Opcode 0x14. */
4544FNIEMOP_DEF(iemOp_adc_Al_Ib)
4545{
4546 IEMOP_MNEMONIC("adc al,Ib");
4547 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
4548}
4549
4550
4551/** Opcode 0x15. */
4552FNIEMOP_DEF(iemOp_adc_eAX_Iz)
4553{
4554 IEMOP_MNEMONIC("adc rAX,Iz");
4555 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
4556}
4557
4558
4559/** Opcode 0x16. */
4560FNIEMOP_DEF(iemOp_push_SS)
4561{
4562 IEMOP_MNEMONIC("push ss");
4563 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
4564}
4565
4566
4567/** Opcode 0x17. */
4568FNIEMOP_DEF(iemOp_pop_SS)
4569{
4570 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
4571 IEMOP_HLP_NO_LOCK_PREFIX();
4572 IEMOP_HLP_NO_64BIT();
4573 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
4574}
4575
4576
4577/** Opcode 0x18. */
4578FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
4579{
4580 IEMOP_MNEMONIC("sbb Eb,Gb");
4581 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
4582}
4583
4584
4585/** Opcode 0x19. */
4586FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
4587{
4588 IEMOP_MNEMONIC("sbb Ev,Gv");
4589 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
4590}
4591
4592
4593/** Opcode 0x1a. */
4594FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
4595{
4596 IEMOP_MNEMONIC("sbb Gb,Eb");
4597 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
4598}
4599
4600
4601/** Opcode 0x1b. */
4602FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
4603{
4604 IEMOP_MNEMONIC("sbb Gv,Ev");
4605 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
4606}
4607
4608
4609/** Opcode 0x1c. */
4610FNIEMOP_DEF(iemOp_sbb_Al_Ib)
4611{
4612 IEMOP_MNEMONIC("sbb al,Ib");
4613 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
4614}
4615
4616
4617/** Opcode 0x1d. */
4618FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
4619{
4620 IEMOP_MNEMONIC("sbb rAX,Iz");
4621 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
4622}
4623
4624
4625/** Opcode 0x1e. */
4626FNIEMOP_DEF(iemOp_push_DS)
4627{
4628 IEMOP_MNEMONIC("push ds");
4629 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
4630}
4631
4632
4633/** Opcode 0x1f. */
4634FNIEMOP_DEF(iemOp_pop_DS)
4635{
4636 IEMOP_MNEMONIC("pop ds");
4637 IEMOP_HLP_NO_LOCK_PREFIX();
4638 IEMOP_HLP_NO_64BIT();
4639 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
4640}
4641
4642
4643/** Opcode 0x20. */
4644FNIEMOP_DEF(iemOp_and_Eb_Gb)
4645{
4646 IEMOP_MNEMONIC("and Eb,Gb");
4647 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4648 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
4649}
4650
4651
4652/** Opcode 0x21. */
4653FNIEMOP_DEF(iemOp_and_Ev_Gv)
4654{
4655 IEMOP_MNEMONIC("and Ev,Gv");
4656 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4657 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
4658}
4659
4660
4661/** Opcode 0x22. */
4662FNIEMOP_DEF(iemOp_and_Gb_Eb)
4663{
4664 IEMOP_MNEMONIC("and Gb,Eb");
4665 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4666 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
4667}
4668
4669
4670/** Opcode 0x23. */
4671FNIEMOP_DEF(iemOp_and_Gv_Ev)
4672{
4673 IEMOP_MNEMONIC("and Gv,Ev");
4674 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4675 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
4676}
4677
4678
4679/** Opcode 0x24. */
4680FNIEMOP_DEF(iemOp_and_Al_Ib)
4681{
4682 IEMOP_MNEMONIC("and al,Ib");
4683 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4684 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
4685}
4686
4687
4688/** Opcode 0x25. */
4689FNIEMOP_DEF(iemOp_and_eAX_Iz)
4690{
4691 IEMOP_MNEMONIC("and rAX,Iz");
4692 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4693 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
4694}
4695
4696
4697/** Opcode 0x26. */
4698FNIEMOP_DEF(iemOp_seg_ES)
4699{
4700 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
4701 pIemCpu->iEffSeg = X86_SREG_ES;
4702
4703 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4704 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4705}
4706
4707
4708/** Opcode 0x27. */
4709FNIEMOP_STUB(iemOp_daa);
4710
4711
4712/** Opcode 0x28. */
4713FNIEMOP_DEF(iemOp_sub_Eb_Gb)
4714{
4715 IEMOP_MNEMONIC("sub Eb,Gb");
4716 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
4717}
4718
4719
4720/** Opcode 0x29. */
4721FNIEMOP_DEF(iemOp_sub_Ev_Gv)
4722{
4723 IEMOP_MNEMONIC("sub Ev,Gv");
4724 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
4725}
4726
4727
4728/** Opcode 0x2a. */
4729FNIEMOP_DEF(iemOp_sub_Gb_Eb)
4730{
4731 IEMOP_MNEMONIC("sub Gb,Eb");
4732 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
4733}
4734
4735
4736/** Opcode 0x2b. */
4737FNIEMOP_DEF(iemOp_sub_Gv_Ev)
4738{
4739 IEMOP_MNEMONIC("sub Gv,Ev");
4740 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
4741}
4742
4743
4744/** Opcode 0x2c. */
4745FNIEMOP_DEF(iemOp_sub_Al_Ib)
4746{
4747 IEMOP_MNEMONIC("sub al,Ib");
4748 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
4749}
4750
4751
4752/** Opcode 0x2d. */
4753FNIEMOP_DEF(iemOp_sub_eAX_Iz)
4754{
4755 IEMOP_MNEMONIC("sub rAX,Iz");
4756 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
4757}
4758
4759
4760/** Opcode 0x2e. */
4761FNIEMOP_DEF(iemOp_seg_CS)
4762{
4763 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
4764 pIemCpu->iEffSeg = X86_SREG_CS;
4765
4766 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4767 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4768}
4769
4770
4771/** Opcode 0x2f. */
4772FNIEMOP_STUB(iemOp_das);
4773
4774
4775/** Opcode 0x30. */
4776FNIEMOP_DEF(iemOp_xor_Eb_Gb)
4777{
4778 IEMOP_MNEMONIC("xor Eb,Gb");
4779 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4780 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
4781}
4782
4783
4784/** Opcode 0x31. */
4785FNIEMOP_DEF(iemOp_xor_Ev_Gv)
4786{
4787 IEMOP_MNEMONIC("xor Ev,Gv");
4788 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4789 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
4790}
4791
4792
4793/** Opcode 0x32. */
4794FNIEMOP_DEF(iemOp_xor_Gb_Eb)
4795{
4796 IEMOP_MNEMONIC("xor Gb,Eb");
4797 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4798 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
4799}
4800
4801
4802/** Opcode 0x33. */
4803FNIEMOP_DEF(iemOp_xor_Gv_Ev)
4804{
4805 IEMOP_MNEMONIC("xor Gv,Ev");
4806 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4807 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
4808}
4809
4810
4811/** Opcode 0x34. */
4812FNIEMOP_DEF(iemOp_xor_Al_Ib)
4813{
4814 IEMOP_MNEMONIC("xor al,Ib");
4815 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4816 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
4817}
4818
4819
4820/** Opcode 0x35. */
4821FNIEMOP_DEF(iemOp_xor_eAX_Iz)
4822{
4823 IEMOP_MNEMONIC("xor rAX,Iz");
4824 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
4825 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
4826}
4827
4828
4829/** Opcode 0x36. */
4830FNIEMOP_DEF(iemOp_seg_SS)
4831{
4832 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
4833 pIemCpu->iEffSeg = X86_SREG_SS;
4834
4835 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4836 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4837}
4838
4839
4840/** Opcode 0x37. */
4841FNIEMOP_STUB(iemOp_aaa);
4842
4843
4844/** Opcode 0x38. */
4845FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
4846{
4847 IEMOP_MNEMONIC("cmp Eb,Gb");
4848 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4849 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
4850}
4851
4852
4853/** Opcode 0x39. */
4854FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
4855{
4856 IEMOP_MNEMONIC("cmp Ev,Gv");
4857 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
4858 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
4859}
4860
4861
4862/** Opcode 0x3a. */
4863FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
4864{
4865 IEMOP_MNEMONIC("cmp Gb,Eb");
4866 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
4867}
4868
4869
4870/** Opcode 0x3b. */
4871FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
4872{
4873 IEMOP_MNEMONIC("cmp Gv,Ev");
4874 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
4875}
4876
4877
4878/** Opcode 0x3c. */
4879FNIEMOP_DEF(iemOp_cmp_Al_Ib)
4880{
4881 IEMOP_MNEMONIC("cmp al,Ib");
4882 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
4883}
4884
4885
4886/** Opcode 0x3d. */
4887FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
4888{
4889 IEMOP_MNEMONIC("cmp rAX,Iz");
4890 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
4891}
4892
4893
4894/** Opcode 0x3e. */
4895FNIEMOP_DEF(iemOp_seg_DS)
4896{
4897 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
4898 pIemCpu->iEffSeg = X86_SREG_DS;
4899
4900 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4901 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4902}
4903
4904
4905/** Opcode 0x3f. */
4906FNIEMOP_STUB(iemOp_aas);
4907
4908/**
4909 * Common 'inc/dec/not/neg register' helper.
4910 */
4911FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
4912{
4913 IEMOP_HLP_NO_LOCK_PREFIX();
4914 switch (pIemCpu->enmEffOpSize)
4915 {
4916 case IEMMODE_16BIT:
4917 IEM_MC_BEGIN(2, 0);
4918 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4919 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4920 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
4921 IEM_MC_REF_EFLAGS(pEFlags);
4922 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
4923 IEM_MC_ADVANCE_RIP();
4924 IEM_MC_END();
4925 return VINF_SUCCESS;
4926
4927 case IEMMODE_32BIT:
4928 IEM_MC_BEGIN(2, 0);
4929 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4930 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4931 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
4932 IEM_MC_REF_EFLAGS(pEFlags);
4933 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
4934 IEM_MC_ADVANCE_RIP();
4935 IEM_MC_END();
4936 return VINF_SUCCESS;
4937
4938 case IEMMODE_64BIT:
4939 IEM_MC_BEGIN(2, 0);
4940 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4941 IEM_MC_ARG(uint32_t *, pEFlags, 1);
4942 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
4943 IEM_MC_REF_EFLAGS(pEFlags);
4944 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
4945 IEM_MC_ADVANCE_RIP();
4946 IEM_MC_END();
4947 return VINF_SUCCESS;
4948 }
4949 return VINF_SUCCESS;
4950}
4951
4952
4953/** Opcode 0x40. */
4954FNIEMOP_DEF(iemOp_inc_eAX)
4955{
4956 /*
4957 * This is a REX prefix in 64-bit mode.
4958 */
4959 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4960 {
4961 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
4962
4963 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4964 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4965 }
4966
4967 IEMOP_MNEMONIC("inc eAX");
4968 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
4969}
4970
4971
4972/** Opcode 0x41. */
4973FNIEMOP_DEF(iemOp_inc_eCX)
4974{
4975 /*
4976 * This is a REX prefix in 64-bit mode.
4977 */
4978 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4979 {
4980 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
4981 pIemCpu->uRexB = 1 << 3;
4982
4983 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
4984 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
4985 }
4986
4987 IEMOP_MNEMONIC("inc eCX");
4988 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
4989}
4990
4991
4992/** Opcode 0x42. */
4993FNIEMOP_DEF(iemOp_inc_eDX)
4994{
4995 /*
4996 * This is a REX prefix in 64-bit mode.
4997 */
4998 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4999 {
5000 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
5001 pIemCpu->uRexIndex = 1 << 3;
5002
5003 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5004 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5005 }
5006
5007 IEMOP_MNEMONIC("inc eDX");
5008 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
5009}
5010
5011
5012
5013/** Opcode 0x43. */
5014FNIEMOP_DEF(iemOp_inc_eBX)
5015{
5016 /*
5017 * This is a REX prefix in 64-bit mode.
5018 */
5019 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5020 {
5021 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5022 pIemCpu->uRexB = 1 << 3;
5023 pIemCpu->uRexIndex = 1 << 3;
5024
5025 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5026 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5027 }
5028
5029 IEMOP_MNEMONIC("inc eBX");
5030 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
5031}
5032
5033
5034/** Opcode 0x44. */
5035FNIEMOP_DEF(iemOp_inc_eSP)
5036{
5037 /*
5038 * This is a REX prefix in 64-bit mode.
5039 */
5040 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5041 {
5042 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
5043 pIemCpu->uRexReg = 1 << 3;
5044
5045 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5046 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5047 }
5048
5049 IEMOP_MNEMONIC("inc eSP");
5050 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
5051}
5052
5053
5054/** Opcode 0x45. */
5055FNIEMOP_DEF(iemOp_inc_eBP)
5056{
5057 /*
5058 * This is a REX prefix in 64-bit mode.
5059 */
5060 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5061 {
5062 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
5063 pIemCpu->uRexReg = 1 << 3;
5064 pIemCpu->uRexB = 1 << 3;
5065
5066 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5067 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5068 }
5069
5070 IEMOP_MNEMONIC("inc eBP");
5071 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
5072}
5073
5074
5075/** Opcode 0x46. */
5076FNIEMOP_DEF(iemOp_inc_eSI)
5077{
5078 /*
5079 * This is a REX prefix in 64-bit mode.
5080 */
5081 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5082 {
5083 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
5084 pIemCpu->uRexReg = 1 << 3;
5085 pIemCpu->uRexIndex = 1 << 3;
5086
5087 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5088 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5089 }
5090
5091 IEMOP_MNEMONIC("inc eSI");
5092 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
5093}
5094
5095
5096/** Opcode 0x47. */
5097FNIEMOP_DEF(iemOp_inc_eDI)
5098{
5099 /*
5100 * This is a REX prefix in 64-bit mode.
5101 */
5102 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5103 {
5104 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
5105 pIemCpu->uRexReg = 1 << 3;
5106 pIemCpu->uRexB = 1 << 3;
5107 pIemCpu->uRexIndex = 1 << 3;
5108
5109 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5110 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5111 }
5112
5113 IEMOP_MNEMONIC("inc eDI");
5114 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
5115}
5116
5117
5118/** Opcode 0x48. */
5119FNIEMOP_DEF(iemOp_dec_eAX)
5120{
5121 /*
5122 * This is a REX prefix in 64-bit mode.
5123 */
5124 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5125 {
5126 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
5127 iemRecalEffOpSize(pIemCpu);
5128
5129 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5130 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5131 }
5132
5133 IEMOP_MNEMONIC("dec eAX");
5134 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
5135}
5136
5137
5138/** Opcode 0x49. */
5139FNIEMOP_DEF(iemOp_dec_eCX)
5140{
5141 /*
5142 * This is a REX prefix in 64-bit mode.
5143 */
5144 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5145 {
5146 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5147 pIemCpu->uRexB = 1 << 3;
5148 iemRecalEffOpSize(pIemCpu);
5149
5150 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5151 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5152 }
5153
5154 IEMOP_MNEMONIC("dec eCX");
5155 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
5156}
5157
5158
5159/** Opcode 0x4a. */
5160FNIEMOP_DEF(iemOp_dec_eDX)
5161{
5162 /*
5163 * This is a REX prefix in 64-bit mode.
5164 */
5165 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5166 {
5167 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5168 pIemCpu->uRexIndex = 1 << 3;
5169 iemRecalEffOpSize(pIemCpu);
5170
5171 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5172 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5173 }
5174
5175 IEMOP_MNEMONIC("dec eDX");
5176 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
5177}
5178
5179
5180/** Opcode 0x4b. */
5181FNIEMOP_DEF(iemOp_dec_eBX)
5182{
5183 /*
5184 * This is a REX prefix in 64-bit mode.
5185 */
5186 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5187 {
5188 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5189 pIemCpu->uRexB = 1 << 3;
5190 pIemCpu->uRexIndex = 1 << 3;
5191 iemRecalEffOpSize(pIemCpu);
5192
5193 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5194 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5195 }
5196
5197 IEMOP_MNEMONIC("dec eBX");
5198 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
5199}
5200
5201
5202/** Opcode 0x4c. */
5203FNIEMOP_DEF(iemOp_dec_eSP)
5204{
5205 /*
5206 * This is a REX prefix in 64-bit mode.
5207 */
5208 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5209 {
5210 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
5211 pIemCpu->uRexReg = 1 << 3;
5212 iemRecalEffOpSize(pIemCpu);
5213
5214 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5215 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5216 }
5217
5218 IEMOP_MNEMONIC("dec eSP");
5219 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
5220}
5221
5222
5223/** Opcode 0x4d. */
5224FNIEMOP_DEF(iemOp_dec_eBP)
5225{
5226 /*
5227 * This is a REX prefix in 64-bit mode.
5228 */
5229 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5230 {
5231 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
5232 pIemCpu->uRexReg = 1 << 3;
5233 pIemCpu->uRexB = 1 << 3;
5234 iemRecalEffOpSize(pIemCpu);
5235
5236 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5237 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5238 }
5239
5240 IEMOP_MNEMONIC("dec eBP");
5241 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
5242}
5243
5244
5245/** Opcode 0x4e. */
5246FNIEMOP_DEF(iemOp_dec_eSI)
5247{
5248 /*
5249 * This is a REX prefix in 64-bit mode.
5250 */
5251 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5252 {
5253 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5254 pIemCpu->uRexReg = 1 << 3;
5255 pIemCpu->uRexIndex = 1 << 3;
5256 iemRecalEffOpSize(pIemCpu);
5257
5258 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5259 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5260 }
5261
5262 IEMOP_MNEMONIC("dec eSI");
5263 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
5264}
5265
5266
5267/** Opcode 0x4f. */
5268FNIEMOP_DEF(iemOp_dec_eDI)
5269{
5270 /*
5271 * This is a REX prefix in 64-bit mode.
5272 */
5273 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5274 {
5275 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
5276 pIemCpu->uRexReg = 1 << 3;
5277 pIemCpu->uRexB = 1 << 3;
5278 pIemCpu->uRexIndex = 1 << 3;
5279 iemRecalEffOpSize(pIemCpu);
5280
5281 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5282 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5283 }
5284
5285 IEMOP_MNEMONIC("dec eDI");
5286 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
5287}
5288
5289
5290/**
5291 * Common 'push register' helper.
5292 */
5293FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
5294{
5295 IEMOP_HLP_NO_LOCK_PREFIX();
5296 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5297 {
5298 iReg |= pIemCpu->uRexB;
5299 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5300 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5301 }
5302
5303 switch (pIemCpu->enmEffOpSize)
5304 {
5305 case IEMMODE_16BIT:
5306 IEM_MC_BEGIN(0, 1);
5307 IEM_MC_LOCAL(uint16_t, u16Value);
5308 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
5309 IEM_MC_PUSH_U16(u16Value);
5310 IEM_MC_ADVANCE_RIP();
5311 IEM_MC_END();
5312 break;
5313
5314 case IEMMODE_32BIT:
5315 IEM_MC_BEGIN(0, 1);
5316 IEM_MC_LOCAL(uint32_t, u32Value);
5317 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
5318 IEM_MC_PUSH_U32(u32Value);
5319 IEM_MC_ADVANCE_RIP();
5320 IEM_MC_END();
5321 break;
5322
5323 case IEMMODE_64BIT:
5324 IEM_MC_BEGIN(0, 1);
5325 IEM_MC_LOCAL(uint64_t, u64Value);
5326 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
5327 IEM_MC_PUSH_U64(u64Value);
5328 IEM_MC_ADVANCE_RIP();
5329 IEM_MC_END();
5330 break;
5331 }
5332
5333 return VINF_SUCCESS;
5334}
5335
5336
5337/** Opcode 0x50. */
5338FNIEMOP_DEF(iemOp_push_eAX)
5339{
5340 IEMOP_MNEMONIC("push rAX");
5341 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
5342}
5343
5344
5345/** Opcode 0x51. */
5346FNIEMOP_DEF(iemOp_push_eCX)
5347{
5348 IEMOP_MNEMONIC("push rCX");
5349 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
5350}
5351
5352
5353/** Opcode 0x52. */
5354FNIEMOP_DEF(iemOp_push_eDX)
5355{
5356 IEMOP_MNEMONIC("push rDX");
5357 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
5358}
5359
5360
5361/** Opcode 0x53. */
5362FNIEMOP_DEF(iemOp_push_eBX)
5363{
5364 IEMOP_MNEMONIC("push rBX");
5365 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
5366}
5367
5368
5369/** Opcode 0x54. */
5370FNIEMOP_DEF(iemOp_push_eSP)
5371{
5372 IEMOP_MNEMONIC("push rSP");
5373 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
5374}
5375
5376
5377/** Opcode 0x55. */
5378FNIEMOP_DEF(iemOp_push_eBP)
5379{
5380 IEMOP_MNEMONIC("push rBP");
5381 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
5382}
5383
5384
5385/** Opcode 0x56. */
5386FNIEMOP_DEF(iemOp_push_eSI)
5387{
5388 IEMOP_MNEMONIC("push rSI");
5389 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
5390}
5391
5392
5393/** Opcode 0x57. */
5394FNIEMOP_DEF(iemOp_push_eDI)
5395{
5396 IEMOP_MNEMONIC("push rDI");
5397 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
5398}
5399
5400
5401/**
5402 * Common 'pop register' helper.
5403 */
5404FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
5405{
5406 IEMOP_HLP_NO_LOCK_PREFIX();
5407 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5408 {
5409 iReg |= pIemCpu->uRexB;
5410 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
5411 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
5412 }
5413
5414/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
5415 * handle it, for that matter (Intel pseudo code hints that the popped
5416 * value is incremented by the stack item size.) Test it, both encodings
5417 * and all three register sizes. */
5418 switch (pIemCpu->enmEffOpSize)
5419 {
5420 case IEMMODE_16BIT:
5421 IEM_MC_BEGIN(0, 1);
5422 IEM_MC_LOCAL(uint16_t, *pu16Dst);
5423 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
5424 IEM_MC_POP_U16(pu16Dst);
5425 IEM_MC_ADVANCE_RIP();
5426 IEM_MC_END();
5427 break;
5428
5429 case IEMMODE_32BIT:
5430 IEM_MC_BEGIN(0, 1);
5431 IEM_MC_LOCAL(uint32_t, *pu32Dst);
5432 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
5433 IEM_MC_POP_U32(pu32Dst);
5434 IEM_MC_ADVANCE_RIP();
5435 IEM_MC_END();
5436 break;
5437
5438 case IEMMODE_64BIT:
5439 IEM_MC_BEGIN(0, 1);
5440 IEM_MC_LOCAL(uint64_t, *pu64Dst);
5441 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
5442 IEM_MC_POP_U64(pu64Dst);
5443 IEM_MC_ADVANCE_RIP();
5444 IEM_MC_END();
5445 break;
5446 }
5447
5448 return VINF_SUCCESS;
5449}
5450
5451
5452/** Opcode 0x58. */
5453FNIEMOP_DEF(iemOp_pop_eAX)
5454{
5455 IEMOP_MNEMONIC("pop rAX");
5456 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
5457}
5458
5459
5460/** Opcode 0x59. */
5461FNIEMOP_DEF(iemOp_pop_eCX)
5462{
5463 IEMOP_MNEMONIC("pop rCX");
5464 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
5465}
5466
5467
5468/** Opcode 0x5a. */
5469FNIEMOP_DEF(iemOp_pop_eDX)
5470{
5471 IEMOP_MNEMONIC("pop rDX");
5472 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
5473}
5474
5475
5476/** Opcode 0x5b. */
5477FNIEMOP_DEF(iemOp_pop_eBX)
5478{
5479 IEMOP_MNEMONIC("pop rBX");
5480 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
5481}
5482
5483
5484/** Opcode 0x5c. */
5485FNIEMOP_DEF(iemOp_pop_eSP)
5486{
5487 IEMOP_MNEMONIC("pop rSP");
5488 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
5489}
5490
5491
5492/** Opcode 0x5d. */
5493FNIEMOP_DEF(iemOp_pop_eBP)
5494{
5495 IEMOP_MNEMONIC("pop rBP");
5496 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
5497}
5498
5499
5500/** Opcode 0x5e. */
5501FNIEMOP_DEF(iemOp_pop_eSI)
5502{
5503 IEMOP_MNEMONIC("pop rSI");
5504 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
5505}
5506
5507
5508/** Opcode 0x5f. */
5509FNIEMOP_DEF(iemOp_pop_eDI)
5510{
5511 IEMOP_MNEMONIC("pop rDI");
5512 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
5513}
5514
5515
5516/** Opcode 0x60. */
5517FNIEMOP_DEF(iemOp_pusha)
5518{
5519 IEMOP_MNEMONIC("pusha");
5520 IEMOP_HLP_NO_64BIT();
5521 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5522 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
5523 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5524 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
5525}
5526
5527
5528/** Opcode 0x61. */
5529FNIEMOP_DEF(iemOp_popa)
5530{
5531 IEMOP_MNEMONIC("popa");
5532 IEMOP_HLP_NO_64BIT();
5533 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
5534 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
5535 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
5536 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
5537}
5538
5539
5540/** Opcode 0x62. */
5541FNIEMOP_STUB(iemOp_bound_Gv_Ma);
5542/** Opcode 0x63. */
5543FNIEMOP_STUB(iemOp_arpl_Ew_Gw);
5544
5545
5546/** Opcode 0x64. */
5547FNIEMOP_DEF(iemOp_seg_FS)
5548{
5549 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
5550 pIemCpu->iEffSeg = X86_SREG_FS;
5551
5552 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5553 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5554}
5555
5556
5557/** Opcode 0x65. */
5558FNIEMOP_DEF(iemOp_seg_GS)
5559{
5560 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
5561 pIemCpu->iEffSeg = X86_SREG_GS;
5562
5563 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5564 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5565}
5566
5567
5568/** Opcode 0x66. */
5569FNIEMOP_DEF(iemOp_op_size)
5570{
5571 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
5572 iemRecalEffOpSize(pIemCpu);
5573
5574 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5575 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5576}
5577
5578
5579/** Opcode 0x67. */
5580FNIEMOP_DEF(iemOp_addr_size)
5581{
5582 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
5583 switch (pIemCpu->enmDefAddrMode)
5584 {
5585 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5586 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
5587 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
5588 default: AssertFailed();
5589 }
5590
5591 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
5592 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
5593}
5594
5595
5596/** Opcode 0x68. */
5597FNIEMOP_DEF(iemOp_push_Iz)
5598{
5599 IEMOP_MNEMONIC("push Iz");
5600 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5601 switch (pIemCpu->enmEffOpSize)
5602 {
5603 case IEMMODE_16BIT:
5604 {
5605 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5606 IEMOP_HLP_NO_LOCK_PREFIX();
5607 IEM_MC_BEGIN(0,0);
5608 IEM_MC_PUSH_U16(u16Imm);
5609 IEM_MC_ADVANCE_RIP();
5610 IEM_MC_END();
5611 return VINF_SUCCESS;
5612 }
5613
5614 case IEMMODE_32BIT:
5615 {
5616 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5617 IEMOP_HLP_NO_LOCK_PREFIX();
5618 IEM_MC_BEGIN(0,0);
5619 IEM_MC_PUSH_U32(u32Imm);
5620 IEM_MC_ADVANCE_RIP();
5621 IEM_MC_END();
5622 return VINF_SUCCESS;
5623 }
5624
5625 case IEMMODE_64BIT:
5626 {
5627 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5628 IEMOP_HLP_NO_LOCK_PREFIX();
5629 IEM_MC_BEGIN(0,0);
5630 IEM_MC_PUSH_U64(u64Imm);
5631 IEM_MC_ADVANCE_RIP();
5632 IEM_MC_END();
5633 return VINF_SUCCESS;
5634 }
5635
5636 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5637 }
5638}
5639
5640
5641/** Opcode 0x69. */
5642FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
5643{
5644 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
5645 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5646 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5647
5648 switch (pIemCpu->enmEffOpSize)
5649 {
5650 case IEMMODE_16BIT:
5651 {
5652 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
5653 IEMOP_HLP_NO_LOCK_PREFIX();
5654 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5655 {
5656 /* register operand */
5657 IEM_MC_BEGIN(3, 1);
5658 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5659 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5660 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5661 IEM_MC_LOCAL(uint16_t, u16Tmp);
5662
5663 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5664 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5665 IEM_MC_REF_EFLAGS(pEFlags);
5666 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5667 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5668
5669 IEM_MC_ADVANCE_RIP();
5670 IEM_MC_END();
5671 }
5672 else
5673 {
5674 /* memory operand */
5675 IEM_MC_BEGIN(3, 2);
5676 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5677 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
5678 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5679 IEM_MC_LOCAL(uint16_t, u16Tmp);
5680 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5681
5682 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5683 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5684 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5685 IEM_MC_REF_EFLAGS(pEFlags);
5686 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5687 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5688
5689 IEM_MC_ADVANCE_RIP();
5690 IEM_MC_END();
5691 }
5692 return VINF_SUCCESS;
5693 }
5694
5695 case IEMMODE_32BIT:
5696 {
5697 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
5698 IEMOP_HLP_NO_LOCK_PREFIX();
5699 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5700 {
5701 /* register operand */
5702 IEM_MC_BEGIN(3, 1);
5703 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5704 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5705 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5706 IEM_MC_LOCAL(uint32_t, u32Tmp);
5707
5708 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5709 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5710 IEM_MC_REF_EFLAGS(pEFlags);
5711 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5712 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5713
5714 IEM_MC_ADVANCE_RIP();
5715 IEM_MC_END();
5716 }
5717 else
5718 {
5719 /* memory operand */
5720 IEM_MC_BEGIN(3, 2);
5721 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5722 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
5723 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5724 IEM_MC_LOCAL(uint32_t, u32Tmp);
5725 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5726
5727 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5728 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5729 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5730 IEM_MC_REF_EFLAGS(pEFlags);
5731 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5732 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5733
5734 IEM_MC_ADVANCE_RIP();
5735 IEM_MC_END();
5736 }
5737 return VINF_SUCCESS;
5738 }
5739
5740 case IEMMODE_64BIT:
5741 {
5742 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
5743 IEMOP_HLP_NO_LOCK_PREFIX();
5744 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5745 {
5746 /* register operand */
5747 IEM_MC_BEGIN(3, 1);
5748 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5749 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5750 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5751 IEM_MC_LOCAL(uint64_t, u64Tmp);
5752
5753 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5754 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5755 IEM_MC_REF_EFLAGS(pEFlags);
5756 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5757 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5758
5759 IEM_MC_ADVANCE_RIP();
5760 IEM_MC_END();
5761 }
5762 else
5763 {
5764 /* memory operand */
5765 IEM_MC_BEGIN(3, 2);
5766 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5767 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
5768 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5769 IEM_MC_LOCAL(uint64_t, u64Tmp);
5770 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5771
5772 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5773 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5774 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5775 IEM_MC_REF_EFLAGS(pEFlags);
5776 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5777 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5778
5779 IEM_MC_ADVANCE_RIP();
5780 IEM_MC_END();
5781 }
5782 return VINF_SUCCESS;
5783 }
5784 }
5785 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5786}
5787
5788
5789/** Opcode 0x6a. */
5790FNIEMOP_DEF(iemOp_push_Ib)
5791{
5792 IEMOP_MNEMONIC("push Ib");
5793 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
5794 IEMOP_HLP_NO_LOCK_PREFIX();
5795 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
5796
5797 IEM_MC_BEGIN(0,0);
5798 switch (pIemCpu->enmEffOpSize)
5799 {
5800 case IEMMODE_16BIT:
5801 IEM_MC_PUSH_U16(i8Imm);
5802 break;
5803 case IEMMODE_32BIT:
5804 IEM_MC_PUSH_U32(i8Imm);
5805 break;
5806 case IEMMODE_64BIT:
5807 IEM_MC_PUSH_U64(i8Imm);
5808 break;
5809 }
5810 IEM_MC_ADVANCE_RIP();
5811 IEM_MC_END();
5812 return VINF_SUCCESS;
5813}
5814
5815
5816/** Opcode 0x6b. */
5817FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
5818{
5819 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
5820 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5821 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
5822 IEMOP_HLP_NO_LOCK_PREFIX();
5823 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5824
5825 switch (pIemCpu->enmEffOpSize)
5826 {
5827 case IEMMODE_16BIT:
5828 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5829 {
5830 /* register operand */
5831 IEM_MC_BEGIN(3, 1);
5832 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5833 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5834 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5835 IEM_MC_LOCAL(uint16_t, u16Tmp);
5836
5837 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5838 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5839 IEM_MC_REF_EFLAGS(pEFlags);
5840 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5841 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5842
5843 IEM_MC_ADVANCE_RIP();
5844 IEM_MC_END();
5845 }
5846 else
5847 {
5848 /* memory operand */
5849 IEM_MC_BEGIN(3, 2);
5850 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5851 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
5852 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5853 IEM_MC_LOCAL(uint16_t, u16Tmp);
5854 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5855
5856 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5857 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5858 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
5859 IEM_MC_REF_EFLAGS(pEFlags);
5860 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
5861 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
5862
5863 IEM_MC_ADVANCE_RIP();
5864 IEM_MC_END();
5865 }
5866 return VINF_SUCCESS;
5867
5868 case IEMMODE_32BIT:
5869 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5870 {
5871 /* register operand */
5872 IEM_MC_BEGIN(3, 1);
5873 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5874 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5875 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5876 IEM_MC_LOCAL(uint32_t, u32Tmp);
5877
5878 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5879 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5880 IEM_MC_REF_EFLAGS(pEFlags);
5881 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5882 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5883
5884 IEM_MC_ADVANCE_RIP();
5885 IEM_MC_END();
5886 }
5887 else
5888 {
5889 /* memory operand */
5890 IEM_MC_BEGIN(3, 2);
5891 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5892 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
5893 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5894 IEM_MC_LOCAL(uint32_t, u32Tmp);
5895 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5896
5897 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5898 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5899 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
5900 IEM_MC_REF_EFLAGS(pEFlags);
5901 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
5902 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
5903
5904 IEM_MC_ADVANCE_RIP();
5905 IEM_MC_END();
5906 }
5907 return VINF_SUCCESS;
5908
5909 case IEMMODE_64BIT:
5910 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5911 {
5912 /* register operand */
5913 IEM_MC_BEGIN(3, 1);
5914 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5915 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5916 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5917 IEM_MC_LOCAL(uint64_t, u64Tmp);
5918
5919 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5920 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5921 IEM_MC_REF_EFLAGS(pEFlags);
5922 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5923 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5924
5925 IEM_MC_ADVANCE_RIP();
5926 IEM_MC_END();
5927 }
5928 else
5929 {
5930 /* memory operand */
5931 IEM_MC_BEGIN(3, 2);
5932 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5933 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
5934 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5935 IEM_MC_LOCAL(uint64_t, u64Tmp);
5936 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5937
5938 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
5939 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
5940 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
5941 IEM_MC_REF_EFLAGS(pEFlags);
5942 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
5943 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
5944
5945 IEM_MC_ADVANCE_RIP();
5946 IEM_MC_END();
5947 }
5948 return VINF_SUCCESS;
5949 }
5950 AssertFailedReturn(VERR_INTERNAL_ERROR_3);
5951}
5952
5953
5954/** Opcode 0x6c. */
5955FNIEMOP_DEF(iemOp_insb_Yb_DX)
5956{
5957 IEMOP_HLP_NO_LOCK_PREFIX();
5958 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
5959 {
5960 IEMOP_MNEMONIC("rep ins Yb,DX");
5961 switch (pIemCpu->enmEffAddrMode)
5962 {
5963 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr16);
5964 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr32);
5965 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op8_addr64);
5966 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5967 }
5968 }
5969 else
5970 {
5971 IEMOP_MNEMONIC("ins Yb,DX");
5972 switch (pIemCpu->enmEffAddrMode)
5973 {
5974 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr16);
5975 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr32);
5976 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op8_addr64);
5977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5978 }
5979 }
5980}
5981
5982
5983/** Opcode 0x6d. */
5984FNIEMOP_DEF(iemOp_inswd_Yv_DX)
5985{
5986 IEMOP_HLP_NO_LOCK_PREFIX();
5987 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
5988 {
5989 IEMOP_MNEMONIC("rep ins Yv,DX");
5990 switch (pIemCpu->enmEffOpSize)
5991 {
5992 case IEMMODE_16BIT:
5993 switch (pIemCpu->enmEffAddrMode)
5994 {
5995 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr16);
5996 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr32);
5997 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op16_addr64);
5998 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5999 }
6000 break;
6001 case IEMMODE_64BIT:
6002 case IEMMODE_32BIT:
6003 switch (pIemCpu->enmEffAddrMode)
6004 {
6005 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr16);
6006 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr32);
6007 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rep_ins_op32_addr64);
6008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6009 }
6010 break;
6011 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6012 }
6013 }
6014 else
6015 {
6016 IEMOP_MNEMONIC("ins Yv,DX");
6017 switch (pIemCpu->enmEffOpSize)
6018 {
6019 case IEMMODE_16BIT:
6020 switch (pIemCpu->enmEffAddrMode)
6021 {
6022 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr16);
6023 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr32);
6024 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op16_addr64);
6025 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6026 }
6027 break;
6028 case IEMMODE_64BIT:
6029 case IEMMODE_32BIT:
6030 switch (pIemCpu->enmEffAddrMode)
6031 {
6032 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr16);
6033 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr32);
6034 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_ins_op32_addr64);
6035 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6036 }
6037 break;
6038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6039 }
6040 }
6041}
6042
6043
6044/** Opcode 0x6e. */
6045FNIEMOP_DEF(iemOp_outsb_Yb_DX)
6046{
6047 IEMOP_HLP_NO_LOCK_PREFIX();
6048 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6049 {
6050 IEMOP_MNEMONIC("rep out DX,Yb");
6051 switch (pIemCpu->enmEffAddrMode)
6052 {
6053 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg);
6054 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg);
6055 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg);
6056 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6057 }
6058 }
6059 else
6060 {
6061 IEMOP_MNEMONIC("out DX,Yb");
6062 switch (pIemCpu->enmEffAddrMode)
6063 {
6064 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg);
6065 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg);
6066 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg);
6067 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6068 }
6069 }
6070}
6071
6072
6073/** Opcode 0x6f. */
6074FNIEMOP_DEF(iemOp_outswd_Yv_DX)
6075{
6076 IEMOP_HLP_NO_LOCK_PREFIX();
6077 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
6078 {
6079 IEMOP_MNEMONIC("rep outs DX,Yv");
6080 switch (pIemCpu->enmEffOpSize)
6081 {
6082 case IEMMODE_16BIT:
6083 switch (pIemCpu->enmEffAddrMode)
6084 {
6085 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
6086 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
6087 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
6088 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6089 }
6090 break;
6091 case IEMMODE_64BIT:
6092 case IEMMODE_32BIT:
6093 switch (pIemCpu->enmEffAddrMode)
6094 {
6095 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
6096 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
6097 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
6098 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6099 }
6100 break;
6101 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6102 }
6103 }
6104 else
6105 {
6106 IEMOP_MNEMONIC("outs DX,Yv");
6107 switch (pIemCpu->enmEffOpSize)
6108 {
6109 case IEMMODE_16BIT:
6110 switch (pIemCpu->enmEffAddrMode)
6111 {
6112 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg);
6113 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg);
6114 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg);
6115 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6116 }
6117 break;
6118 case IEMMODE_64BIT:
6119 case IEMMODE_32BIT:
6120 switch (pIemCpu->enmEffAddrMode)
6121 {
6122 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg);
6123 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg);
6124 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg);
6125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6126 }
6127 break;
6128 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6129 }
6130 }
6131}
6132
6133
6134/** Opcode 0x70. */
6135FNIEMOP_DEF(iemOp_jo_Jb)
6136{
6137 IEMOP_MNEMONIC("jo Jb");
6138 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6139 IEMOP_HLP_NO_LOCK_PREFIX();
6140 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6141
6142 IEM_MC_BEGIN(0, 0);
6143 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6144 IEM_MC_REL_JMP_S8(i8Imm);
6145 } IEM_MC_ELSE() {
6146 IEM_MC_ADVANCE_RIP();
6147 } IEM_MC_ENDIF();
6148 IEM_MC_END();
6149 return VINF_SUCCESS;
6150}
6151
6152
6153/** Opcode 0x71. */
6154FNIEMOP_DEF(iemOp_jno_Jb)
6155{
6156 IEMOP_MNEMONIC("jno Jb");
6157 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6158 IEMOP_HLP_NO_LOCK_PREFIX();
6159 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6160
6161 IEM_MC_BEGIN(0, 0);
6162 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
6163 IEM_MC_ADVANCE_RIP();
6164 } IEM_MC_ELSE() {
6165 IEM_MC_REL_JMP_S8(i8Imm);
6166 } IEM_MC_ENDIF();
6167 IEM_MC_END();
6168 return VINF_SUCCESS;
6169}
6170
6171/** Opcode 0x72. */
6172FNIEMOP_DEF(iemOp_jc_Jb)
6173{
6174 IEMOP_MNEMONIC("jc/jnae Jb");
6175 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6176 IEMOP_HLP_NO_LOCK_PREFIX();
6177 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6178
6179 IEM_MC_BEGIN(0, 0);
6180 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6181 IEM_MC_REL_JMP_S8(i8Imm);
6182 } IEM_MC_ELSE() {
6183 IEM_MC_ADVANCE_RIP();
6184 } IEM_MC_ENDIF();
6185 IEM_MC_END();
6186 return VINF_SUCCESS;
6187}
6188
6189
6190/** Opcode 0x73. */
6191FNIEMOP_DEF(iemOp_jnc_Jb)
6192{
6193 IEMOP_MNEMONIC("jnc/jnb Jb");
6194 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6195 IEMOP_HLP_NO_LOCK_PREFIX();
6196 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6197
6198 IEM_MC_BEGIN(0, 0);
6199 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
6200 IEM_MC_ADVANCE_RIP();
6201 } IEM_MC_ELSE() {
6202 IEM_MC_REL_JMP_S8(i8Imm);
6203 } IEM_MC_ENDIF();
6204 IEM_MC_END();
6205 return VINF_SUCCESS;
6206}
6207
6208
6209/** Opcode 0x74. */
6210FNIEMOP_DEF(iemOp_je_Jb)
6211{
6212 IEMOP_MNEMONIC("je/jz Jb");
6213 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6214 IEMOP_HLP_NO_LOCK_PREFIX();
6215 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6216
6217 IEM_MC_BEGIN(0, 0);
6218 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6219 IEM_MC_REL_JMP_S8(i8Imm);
6220 } IEM_MC_ELSE() {
6221 IEM_MC_ADVANCE_RIP();
6222 } IEM_MC_ENDIF();
6223 IEM_MC_END();
6224 return VINF_SUCCESS;
6225}
6226
6227
6228/** Opcode 0x75. */
6229FNIEMOP_DEF(iemOp_jne_Jb)
6230{
6231 IEMOP_MNEMONIC("jne/jnz Jb");
6232 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6233 IEMOP_HLP_NO_LOCK_PREFIX();
6234 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6235
6236 IEM_MC_BEGIN(0, 0);
6237 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
6238 IEM_MC_ADVANCE_RIP();
6239 } IEM_MC_ELSE() {
6240 IEM_MC_REL_JMP_S8(i8Imm);
6241 } IEM_MC_ENDIF();
6242 IEM_MC_END();
6243 return VINF_SUCCESS;
6244}
6245
6246
6247/** Opcode 0x76. */
6248FNIEMOP_DEF(iemOp_jbe_Jb)
6249{
6250 IEMOP_MNEMONIC("jbe/jna Jb");
6251 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6252 IEMOP_HLP_NO_LOCK_PREFIX();
6253 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6254
6255 IEM_MC_BEGIN(0, 0);
6256 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6257 IEM_MC_REL_JMP_S8(i8Imm);
6258 } IEM_MC_ELSE() {
6259 IEM_MC_ADVANCE_RIP();
6260 } IEM_MC_ENDIF();
6261 IEM_MC_END();
6262 return VINF_SUCCESS;
6263}
6264
6265
6266/** Opcode 0x77. */
6267FNIEMOP_DEF(iemOp_jnbe_Jb)
6268{
6269 IEMOP_MNEMONIC("jnbe/ja Jb");
6270 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6271 IEMOP_HLP_NO_LOCK_PREFIX();
6272 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6273
6274 IEM_MC_BEGIN(0, 0);
6275 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
6276 IEM_MC_ADVANCE_RIP();
6277 } IEM_MC_ELSE() {
6278 IEM_MC_REL_JMP_S8(i8Imm);
6279 } IEM_MC_ENDIF();
6280 IEM_MC_END();
6281 return VINF_SUCCESS;
6282}
6283
6284
6285/** Opcode 0x78. */
6286FNIEMOP_DEF(iemOp_js_Jb)
6287{
6288 IEMOP_MNEMONIC("js Jb");
6289 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6290 IEMOP_HLP_NO_LOCK_PREFIX();
6291 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6292
6293 IEM_MC_BEGIN(0, 0);
6294 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6295 IEM_MC_REL_JMP_S8(i8Imm);
6296 } IEM_MC_ELSE() {
6297 IEM_MC_ADVANCE_RIP();
6298 } IEM_MC_ENDIF();
6299 IEM_MC_END();
6300 return VINF_SUCCESS;
6301}
6302
6303
6304/** Opcode 0x79. */
6305FNIEMOP_DEF(iemOp_jns_Jb)
6306{
6307 IEMOP_MNEMONIC("jns Jb");
6308 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6309 IEMOP_HLP_NO_LOCK_PREFIX();
6310 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6311
6312 IEM_MC_BEGIN(0, 0);
6313 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
6314 IEM_MC_ADVANCE_RIP();
6315 } IEM_MC_ELSE() {
6316 IEM_MC_REL_JMP_S8(i8Imm);
6317 } IEM_MC_ENDIF();
6318 IEM_MC_END();
6319 return VINF_SUCCESS;
6320}
6321
6322
6323/** Opcode 0x7a. */
6324FNIEMOP_DEF(iemOp_jp_Jb)
6325{
6326 IEMOP_MNEMONIC("jp Jb");
6327 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6328 IEMOP_HLP_NO_LOCK_PREFIX();
6329 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6330
6331 IEM_MC_BEGIN(0, 0);
6332 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6333 IEM_MC_REL_JMP_S8(i8Imm);
6334 } IEM_MC_ELSE() {
6335 IEM_MC_ADVANCE_RIP();
6336 } IEM_MC_ENDIF();
6337 IEM_MC_END();
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/** Opcode 0x7b. */
6343FNIEMOP_DEF(iemOp_jnp_Jb)
6344{
6345 IEMOP_MNEMONIC("jnp Jb");
6346 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6347 IEMOP_HLP_NO_LOCK_PREFIX();
6348 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6349
6350 IEM_MC_BEGIN(0, 0);
6351 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
6352 IEM_MC_ADVANCE_RIP();
6353 } IEM_MC_ELSE() {
6354 IEM_MC_REL_JMP_S8(i8Imm);
6355 } IEM_MC_ENDIF();
6356 IEM_MC_END();
6357 return VINF_SUCCESS;
6358}
6359
6360
6361/** Opcode 0x7c. */
6362FNIEMOP_DEF(iemOp_jl_Jb)
6363{
6364 IEMOP_MNEMONIC("jl/jnge Jb");
6365 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6366 IEMOP_HLP_NO_LOCK_PREFIX();
6367 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6368
6369 IEM_MC_BEGIN(0, 0);
6370 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6371 IEM_MC_REL_JMP_S8(i8Imm);
6372 } IEM_MC_ELSE() {
6373 IEM_MC_ADVANCE_RIP();
6374 } IEM_MC_ENDIF();
6375 IEM_MC_END();
6376 return VINF_SUCCESS;
6377}
6378
6379
6380/** Opcode 0x7d. */
6381FNIEMOP_DEF(iemOp_jnl_Jb)
6382{
6383 IEMOP_MNEMONIC("jnl/jge Jb");
6384 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6385 IEMOP_HLP_NO_LOCK_PREFIX();
6386 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6387
6388 IEM_MC_BEGIN(0, 0);
6389 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
6390 IEM_MC_ADVANCE_RIP();
6391 } IEM_MC_ELSE() {
6392 IEM_MC_REL_JMP_S8(i8Imm);
6393 } IEM_MC_ENDIF();
6394 IEM_MC_END();
6395 return VINF_SUCCESS;
6396}
6397
6398
6399/** Opcode 0x7e. */
6400FNIEMOP_DEF(iemOp_jle_Jb)
6401{
6402 IEMOP_MNEMONIC("jle/jng Jb");
6403 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6404 IEMOP_HLP_NO_LOCK_PREFIX();
6405 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6406
6407 IEM_MC_BEGIN(0, 0);
6408 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6409 IEM_MC_REL_JMP_S8(i8Imm);
6410 } IEM_MC_ELSE() {
6411 IEM_MC_ADVANCE_RIP();
6412 } IEM_MC_ENDIF();
6413 IEM_MC_END();
6414 return VINF_SUCCESS;
6415}
6416
6417
6418/** Opcode 0x7f. */
6419FNIEMOP_DEF(iemOp_jnle_Jb)
6420{
6421 IEMOP_MNEMONIC("jnle/jg Jb");
6422 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
6423 IEMOP_HLP_NO_LOCK_PREFIX();
6424 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
6425
6426 IEM_MC_BEGIN(0, 0);
6427 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
6428 IEM_MC_ADVANCE_RIP();
6429 } IEM_MC_ELSE() {
6430 IEM_MC_REL_JMP_S8(i8Imm);
6431 } IEM_MC_ENDIF();
6432 IEM_MC_END();
6433 return VINF_SUCCESS;
6434}
6435
6436
6437/** Opcode 0x80. */
6438FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
6439{
6440 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6441 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
6442 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6443
6444 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6445 {
6446 /* register target */
6447 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6448 IEMOP_HLP_NO_LOCK_PREFIX();
6449 IEM_MC_BEGIN(3, 0);
6450 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6451 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6452 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6453
6454 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6455 IEM_MC_REF_EFLAGS(pEFlags);
6456 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6457
6458 IEM_MC_ADVANCE_RIP();
6459 IEM_MC_END();
6460 }
6461 else
6462 {
6463 /* memory target */
6464 uint32_t fAccess;
6465 if (pImpl->pfnLockedU8)
6466 fAccess = IEM_ACCESS_DATA_RW;
6467 else
6468 { /* CMP */
6469 IEMOP_HLP_NO_LOCK_PREFIX();
6470 fAccess = IEM_ACCESS_DATA_R;
6471 }
6472 IEM_MC_BEGIN(3, 2);
6473 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6474 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6475 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6476
6477 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6478 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6479 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
6480
6481 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6482 IEM_MC_FETCH_EFLAGS(EFlags);
6483 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6484 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
6485 else
6486 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
6487
6488 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
6489 IEM_MC_COMMIT_EFLAGS(EFlags);
6490 IEM_MC_ADVANCE_RIP();
6491 IEM_MC_END();
6492 }
6493 return VINF_SUCCESS;
6494}
6495
6496
6497/** Opcode 0x81. */
6498FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
6499{
6500 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6501 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
6502 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6503
6504 switch (pIemCpu->enmEffOpSize)
6505 {
6506 case IEMMODE_16BIT:
6507 {
6508 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6509 {
6510 /* register target */
6511 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6512 IEMOP_HLP_NO_LOCK_PREFIX();
6513 IEM_MC_BEGIN(3, 0);
6514 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6515 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
6516 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6517
6518 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6519 IEM_MC_REF_EFLAGS(pEFlags);
6520 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6521
6522 IEM_MC_ADVANCE_RIP();
6523 IEM_MC_END();
6524 }
6525 else
6526 {
6527 /* memory target */
6528 uint32_t fAccess;
6529 if (pImpl->pfnLockedU16)
6530 fAccess = IEM_ACCESS_DATA_RW;
6531 else
6532 { /* CMP, TEST */
6533 IEMOP_HLP_NO_LOCK_PREFIX();
6534 fAccess = IEM_ACCESS_DATA_R;
6535 }
6536 IEM_MC_BEGIN(3, 2);
6537 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6538 IEM_MC_ARG(uint16_t, u16Src, 1);
6539 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6540 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6541
6542 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6543 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
6544 IEM_MC_ASSIGN(u16Src, u16Imm);
6545 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6546 IEM_MC_FETCH_EFLAGS(EFlags);
6547 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6548 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6549 else
6550 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6551
6552 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6553 IEM_MC_COMMIT_EFLAGS(EFlags);
6554 IEM_MC_ADVANCE_RIP();
6555 IEM_MC_END();
6556 }
6557 break;
6558 }
6559
6560 case IEMMODE_32BIT:
6561 {
6562 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6563 {
6564 /* register target */
6565 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6566 IEMOP_HLP_NO_LOCK_PREFIX();
6567 IEM_MC_BEGIN(3, 0);
6568 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6569 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
6570 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6571
6572 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6573 IEM_MC_REF_EFLAGS(pEFlags);
6574 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6575
6576 IEM_MC_ADVANCE_RIP();
6577 IEM_MC_END();
6578 }
6579 else
6580 {
6581 /* memory target */
6582 uint32_t fAccess;
6583 if (pImpl->pfnLockedU32)
6584 fAccess = IEM_ACCESS_DATA_RW;
6585 else
6586 { /* CMP, TEST */
6587 IEMOP_HLP_NO_LOCK_PREFIX();
6588 fAccess = IEM_ACCESS_DATA_R;
6589 }
6590 IEM_MC_BEGIN(3, 2);
6591 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6592 IEM_MC_ARG(uint32_t, u32Src, 1);
6593 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6594 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6595
6596 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6597 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
6598 IEM_MC_ASSIGN(u32Src, u32Imm);
6599 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6600 IEM_MC_FETCH_EFLAGS(EFlags);
6601 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6602 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6603 else
6604 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6605
6606 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6607 IEM_MC_COMMIT_EFLAGS(EFlags);
6608 IEM_MC_ADVANCE_RIP();
6609 IEM_MC_END();
6610 }
6611 break;
6612 }
6613
6614 case IEMMODE_64BIT:
6615 {
6616 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6617 {
6618 /* register target */
6619 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6620 IEMOP_HLP_NO_LOCK_PREFIX();
6621 IEM_MC_BEGIN(3, 0);
6622 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6623 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
6624 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6625
6626 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6627 IEM_MC_REF_EFLAGS(pEFlags);
6628 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6629
6630 IEM_MC_ADVANCE_RIP();
6631 IEM_MC_END();
6632 }
6633 else
6634 {
6635 /* memory target */
6636 uint32_t fAccess;
6637 if (pImpl->pfnLockedU64)
6638 fAccess = IEM_ACCESS_DATA_RW;
6639 else
6640 { /* CMP */
6641 IEMOP_HLP_NO_LOCK_PREFIX();
6642 fAccess = IEM_ACCESS_DATA_R;
6643 }
6644 IEM_MC_BEGIN(3, 2);
6645 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6646 IEM_MC_ARG(uint64_t, u64Src, 1);
6647 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6648 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6649
6650 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6651 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
6652 IEM_MC_ASSIGN(u64Src, u64Imm);
6653 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6654 IEM_MC_FETCH_EFLAGS(EFlags);
6655 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6656 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6657 else
6658 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6659
6660 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6661 IEM_MC_COMMIT_EFLAGS(EFlags);
6662 IEM_MC_ADVANCE_RIP();
6663 IEM_MC_END();
6664 }
6665 break;
6666 }
6667 }
6668 return VINF_SUCCESS;
6669}
6670
6671
6672/** Opcode 0x82. */
6673 FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
6674{
6675 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
6676 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
6677}
6678
6679
6680/** Opcode 0x83. */
6681FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
6682{
6683 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6684 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
6685 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
6686
6687 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6688 {
6689 /*
6690 * Register target
6691 */
6692 IEMOP_HLP_NO_LOCK_PREFIX();
6693 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6694 switch (pIemCpu->enmEffOpSize)
6695 {
6696 case IEMMODE_16BIT:
6697 {
6698 IEM_MC_BEGIN(3, 0);
6699 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6700 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
6701 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6702
6703 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6704 IEM_MC_REF_EFLAGS(pEFlags);
6705 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6706
6707 IEM_MC_ADVANCE_RIP();
6708 IEM_MC_END();
6709 break;
6710 }
6711
6712 case IEMMODE_32BIT:
6713 {
6714 IEM_MC_BEGIN(3, 0);
6715 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6716 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
6717 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6718
6719 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6720 IEM_MC_REF_EFLAGS(pEFlags);
6721 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6722
6723 IEM_MC_ADVANCE_RIP();
6724 IEM_MC_END();
6725 break;
6726 }
6727
6728 case IEMMODE_64BIT:
6729 {
6730 IEM_MC_BEGIN(3, 0);
6731 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6732 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
6733 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6734
6735 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6736 IEM_MC_REF_EFLAGS(pEFlags);
6737 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6738
6739 IEM_MC_ADVANCE_RIP();
6740 IEM_MC_END();
6741 break;
6742 }
6743 }
6744 }
6745 else
6746 {
6747 /*
6748 * Memory target.
6749 */
6750 uint32_t fAccess;
6751 if (pImpl->pfnLockedU16)
6752 fAccess = IEM_ACCESS_DATA_RW;
6753 else
6754 { /* CMP */
6755 IEMOP_HLP_NO_LOCK_PREFIX();
6756 fAccess = IEM_ACCESS_DATA_R;
6757 }
6758
6759 switch (pIemCpu->enmEffOpSize)
6760 {
6761 case IEMMODE_16BIT:
6762 {
6763 IEM_MC_BEGIN(3, 2);
6764 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6765 IEM_MC_ARG(uint16_t, u16Src, 1);
6766 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6767 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6768
6769 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6770 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6771 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
6772 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6773 IEM_MC_FETCH_EFLAGS(EFlags);
6774 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6775 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6776 else
6777 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6778
6779 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6780 IEM_MC_COMMIT_EFLAGS(EFlags);
6781 IEM_MC_ADVANCE_RIP();
6782 IEM_MC_END();
6783 break;
6784 }
6785
6786 case IEMMODE_32BIT:
6787 {
6788 IEM_MC_BEGIN(3, 2);
6789 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6790 IEM_MC_ARG(uint32_t, u32Src, 1);
6791 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6792 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6793
6794 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6795 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6796 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
6797 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6798 IEM_MC_FETCH_EFLAGS(EFlags);
6799 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6800 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6801 else
6802 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6803
6804 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6805 IEM_MC_COMMIT_EFLAGS(EFlags);
6806 IEM_MC_ADVANCE_RIP();
6807 IEM_MC_END();
6808 break;
6809 }
6810
6811 case IEMMODE_64BIT:
6812 {
6813 IEM_MC_BEGIN(3, 2);
6814 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6815 IEM_MC_ARG(uint64_t, u64Src, 1);
6816 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6817 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6818
6819 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6820 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
6821 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
6822 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6823 IEM_MC_FETCH_EFLAGS(EFlags);
6824 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6825 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6826 else
6827 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6828
6829 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6830 IEM_MC_COMMIT_EFLAGS(EFlags);
6831 IEM_MC_ADVANCE_RIP();
6832 IEM_MC_END();
6833 break;
6834 }
6835 }
6836 }
6837 return VINF_SUCCESS;
6838}
6839
6840
6841/** Opcode 0x84. */
6842FNIEMOP_DEF(iemOp_test_Eb_Gb)
6843{
6844 IEMOP_MNEMONIC("test Eb,Gb");
6845 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6846 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6847 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
6848}
6849
6850
6851/** Opcode 0x85. */
6852FNIEMOP_DEF(iemOp_test_Ev_Gv)
6853{
6854 IEMOP_MNEMONIC("test Ev,Gv");
6855 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
6856 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
6857 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
6858}
6859
6860
6861/** Opcode 0x86. */
6862FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
6863{
6864 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6865 IEMOP_MNEMONIC("xchg Eb,Gb");
6866
6867 /*
6868 * If rm is denoting a register, no more instruction bytes.
6869 */
6870 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6871 {
6872 IEMOP_HLP_NO_LOCK_PREFIX();
6873
6874 IEM_MC_BEGIN(0, 2);
6875 IEM_MC_LOCAL(uint8_t, uTmp1);
6876 IEM_MC_LOCAL(uint8_t, uTmp2);
6877
6878 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6879 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6880 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6881 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6882
6883 IEM_MC_ADVANCE_RIP();
6884 IEM_MC_END();
6885 }
6886 else
6887 {
6888 /*
6889 * We're accessing memory.
6890 */
6891/** @todo the register must be committed separately! */
6892 IEM_MC_BEGIN(2, 2);
6893 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
6894 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6895 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6896
6897 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6898 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6899 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6900 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
6901 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
6902
6903 IEM_MC_ADVANCE_RIP();
6904 IEM_MC_END();
6905 }
6906 return VINF_SUCCESS;
6907}
6908
6909
6910/** Opcode 0x87. */
6911FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
6912{
6913 IEMOP_MNEMONIC("xchg Ev,Gv");
6914 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6915
6916 /*
6917 * If rm is denoting a register, no more instruction bytes.
6918 */
6919 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6920 {
6921 IEMOP_HLP_NO_LOCK_PREFIX();
6922
6923 switch (pIemCpu->enmEffOpSize)
6924 {
6925 case IEMMODE_16BIT:
6926 IEM_MC_BEGIN(0, 2);
6927 IEM_MC_LOCAL(uint16_t, uTmp1);
6928 IEM_MC_LOCAL(uint16_t, uTmp2);
6929
6930 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6931 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6932 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6933 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6934
6935 IEM_MC_ADVANCE_RIP();
6936 IEM_MC_END();
6937 return VINF_SUCCESS;
6938
6939 case IEMMODE_32BIT:
6940 IEM_MC_BEGIN(0, 2);
6941 IEM_MC_LOCAL(uint32_t, uTmp1);
6942 IEM_MC_LOCAL(uint32_t, uTmp2);
6943
6944 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6945 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6946 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6947 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6948
6949 IEM_MC_ADVANCE_RIP();
6950 IEM_MC_END();
6951 return VINF_SUCCESS;
6952
6953 case IEMMODE_64BIT:
6954 IEM_MC_BEGIN(0, 2);
6955 IEM_MC_LOCAL(uint64_t, uTmp1);
6956 IEM_MC_LOCAL(uint64_t, uTmp2);
6957
6958 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6959 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6960 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
6961 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
6962
6963 IEM_MC_ADVANCE_RIP();
6964 IEM_MC_END();
6965 return VINF_SUCCESS;
6966
6967 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6968 }
6969 }
6970 else
6971 {
6972 /*
6973 * We're accessing memory.
6974 */
6975 switch (pIemCpu->enmEffOpSize)
6976 {
6977/** @todo the register must be committed separately! */
6978 case IEMMODE_16BIT:
6979 IEM_MC_BEGIN(2, 2);
6980 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
6981 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6982 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6983
6984 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
6985 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6986 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6987 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
6988 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
6989
6990 IEM_MC_ADVANCE_RIP();
6991 IEM_MC_END();
6992 return VINF_SUCCESS;
6993
6994 case IEMMODE_32BIT:
6995 IEM_MC_BEGIN(2, 2);
6996 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
6997 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6998 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6999
7000 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7001 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7002 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7003 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
7004 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
7005
7006 IEM_MC_ADVANCE_RIP();
7007 IEM_MC_END();
7008 return VINF_SUCCESS;
7009
7010 case IEMMODE_64BIT:
7011 IEM_MC_BEGIN(2, 2);
7012 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
7013 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
7014 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7015
7016 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7017 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
7018 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7019 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
7020 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
7021
7022 IEM_MC_ADVANCE_RIP();
7023 IEM_MC_END();
7024 return VINF_SUCCESS;
7025
7026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7027 }
7028 }
7029}
7030
7031
7032/** Opcode 0x88. */
7033FNIEMOP_DEF(iemOp_mov_Eb_Gb)
7034{
7035 IEMOP_MNEMONIC("mov Eb,Gb");
7036
7037 uint8_t bRm;
7038 IEM_OPCODE_GET_NEXT_U8(&bRm);
7039 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7040
7041 /*
7042 * If rm is denoting a register, no more instruction bytes.
7043 */
7044 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7045 {
7046 IEM_MC_BEGIN(0, 1);
7047 IEM_MC_LOCAL(uint8_t, u8Value);
7048 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7049 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
7050 IEM_MC_ADVANCE_RIP();
7051 IEM_MC_END();
7052 }
7053 else
7054 {
7055 /*
7056 * We're writing a register to memory.
7057 */
7058 IEM_MC_BEGIN(0, 2);
7059 IEM_MC_LOCAL(uint8_t, u8Value);
7060 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7061 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7062 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7063 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
7064 IEM_MC_ADVANCE_RIP();
7065 IEM_MC_END();
7066 }
7067 return VINF_SUCCESS;
7068
7069}
7070
7071
7072/** Opcode 0x89. */
7073FNIEMOP_DEF(iemOp_mov_Ev_Gv)
7074{
7075 IEMOP_MNEMONIC("mov Ev,Gv");
7076
7077 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7078 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7079
7080 /*
7081 * If rm is denoting a register, no more instruction bytes.
7082 */
7083 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7084 {
7085 switch (pIemCpu->enmEffOpSize)
7086 {
7087 case IEMMODE_16BIT:
7088 IEM_MC_BEGIN(0, 1);
7089 IEM_MC_LOCAL(uint16_t, u16Value);
7090 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7091 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7092 IEM_MC_ADVANCE_RIP();
7093 IEM_MC_END();
7094 break;
7095
7096 case IEMMODE_32BIT:
7097 IEM_MC_BEGIN(0, 1);
7098 IEM_MC_LOCAL(uint32_t, u32Value);
7099 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7100 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7101 IEM_MC_ADVANCE_RIP();
7102 IEM_MC_END();
7103 break;
7104
7105 case IEMMODE_64BIT:
7106 IEM_MC_BEGIN(0, 1);
7107 IEM_MC_LOCAL(uint64_t, u64Value);
7108 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7109 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7110 IEM_MC_ADVANCE_RIP();
7111 IEM_MC_END();
7112 break;
7113 }
7114 }
7115 else
7116 {
7117 /*
7118 * We're writing a register to memory.
7119 */
7120 switch (pIemCpu->enmEffOpSize)
7121 {
7122 case IEMMODE_16BIT:
7123 IEM_MC_BEGIN(0, 2);
7124 IEM_MC_LOCAL(uint16_t, u16Value);
7125 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7126 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7127 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7128 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7129 IEM_MC_ADVANCE_RIP();
7130 IEM_MC_END();
7131 break;
7132
7133 case IEMMODE_32BIT:
7134 IEM_MC_BEGIN(0, 2);
7135 IEM_MC_LOCAL(uint32_t, u32Value);
7136 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7137 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7138 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7139 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
7140 IEM_MC_ADVANCE_RIP();
7141 IEM_MC_END();
7142 break;
7143
7144 case IEMMODE_64BIT:
7145 IEM_MC_BEGIN(0, 2);
7146 IEM_MC_LOCAL(uint64_t, u64Value);
7147 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7148 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7149 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
7150 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
7151 IEM_MC_ADVANCE_RIP();
7152 IEM_MC_END();
7153 break;
7154 }
7155 }
7156 return VINF_SUCCESS;
7157}
7158
7159
7160/** Opcode 0x8a. */
7161FNIEMOP_DEF(iemOp_mov_Gb_Eb)
7162{
7163 IEMOP_MNEMONIC("mov Gb,Eb");
7164
7165 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7166 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7167
7168 /*
7169 * If rm is denoting a register, no more instruction bytes.
7170 */
7171 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7172 {
7173 IEM_MC_BEGIN(0, 1);
7174 IEM_MC_LOCAL(uint8_t, u8Value);
7175 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7176 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7177 IEM_MC_ADVANCE_RIP();
7178 IEM_MC_END();
7179 }
7180 else
7181 {
7182 /*
7183 * We're loading a register from memory.
7184 */
7185 IEM_MC_BEGIN(0, 2);
7186 IEM_MC_LOCAL(uint8_t, u8Value);
7187 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7188 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7189 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
7190 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
7191 IEM_MC_ADVANCE_RIP();
7192 IEM_MC_END();
7193 }
7194 return VINF_SUCCESS;
7195}
7196
7197
7198/** Opcode 0x8b. */
7199FNIEMOP_DEF(iemOp_mov_Gv_Ev)
7200{
7201 IEMOP_MNEMONIC("mov Gv,Ev");
7202
7203 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7204 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7205
7206 /*
7207 * If rm is denoting a register, no more instruction bytes.
7208 */
7209 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7210 {
7211 switch (pIemCpu->enmEffOpSize)
7212 {
7213 case IEMMODE_16BIT:
7214 IEM_MC_BEGIN(0, 1);
7215 IEM_MC_LOCAL(uint16_t, u16Value);
7216 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7217 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7218 IEM_MC_ADVANCE_RIP();
7219 IEM_MC_END();
7220 break;
7221
7222 case IEMMODE_32BIT:
7223 IEM_MC_BEGIN(0, 1);
7224 IEM_MC_LOCAL(uint32_t, u32Value);
7225 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7226 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7227 IEM_MC_ADVANCE_RIP();
7228 IEM_MC_END();
7229 break;
7230
7231 case IEMMODE_64BIT:
7232 IEM_MC_BEGIN(0, 1);
7233 IEM_MC_LOCAL(uint64_t, u64Value);
7234 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7235 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7236 IEM_MC_ADVANCE_RIP();
7237 IEM_MC_END();
7238 break;
7239 }
7240 }
7241 else
7242 {
7243 /*
7244 * We're loading a register from memory.
7245 */
7246 switch (pIemCpu->enmEffOpSize)
7247 {
7248 case IEMMODE_16BIT:
7249 IEM_MC_BEGIN(0, 2);
7250 IEM_MC_LOCAL(uint16_t, u16Value);
7251 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7252 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7253 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7254 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
7255 IEM_MC_ADVANCE_RIP();
7256 IEM_MC_END();
7257 break;
7258
7259 case IEMMODE_32BIT:
7260 IEM_MC_BEGIN(0, 2);
7261 IEM_MC_LOCAL(uint32_t, u32Value);
7262 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7263 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7264 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
7265 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
7266 IEM_MC_ADVANCE_RIP();
7267 IEM_MC_END();
7268 break;
7269
7270 case IEMMODE_64BIT:
7271 IEM_MC_BEGIN(0, 2);
7272 IEM_MC_LOCAL(uint64_t, u64Value);
7273 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7274 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7275 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
7276 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
7277 IEM_MC_ADVANCE_RIP();
7278 IEM_MC_END();
7279 break;
7280 }
7281 }
7282 return VINF_SUCCESS;
7283}
7284
7285
7286/** Opcode 0x8c. */
7287FNIEMOP_DEF(iemOp_mov_Ev_Sw)
7288{
7289 IEMOP_MNEMONIC("mov Ev,Sw");
7290
7291 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7292 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7293
7294 /*
7295 * Check that the destination register exists. The REX.R prefix is ignored.
7296 */
7297 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7298 if ( iSegReg > X86_SREG_GS)
7299 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7300
7301 /*
7302 * If rm is denoting a register, no more instruction bytes.
7303 * In that case, the operand size is respected and the upper bits are
7304 * cleared (starting with some pentium).
7305 */
7306 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7307 {
7308 switch (pIemCpu->enmEffOpSize)
7309 {
7310 case IEMMODE_16BIT:
7311 IEM_MC_BEGIN(0, 1);
7312 IEM_MC_LOCAL(uint16_t, u16Value);
7313 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7314 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
7315 IEM_MC_ADVANCE_RIP();
7316 IEM_MC_END();
7317 break;
7318
7319 case IEMMODE_32BIT:
7320 IEM_MC_BEGIN(0, 1);
7321 IEM_MC_LOCAL(uint32_t, u32Value);
7322 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
7323 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
7324 IEM_MC_ADVANCE_RIP();
7325 IEM_MC_END();
7326 break;
7327
7328 case IEMMODE_64BIT:
7329 IEM_MC_BEGIN(0, 1);
7330 IEM_MC_LOCAL(uint64_t, u64Value);
7331 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
7332 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
7333 IEM_MC_ADVANCE_RIP();
7334 IEM_MC_END();
7335 break;
7336 }
7337 }
7338 else
7339 {
7340 /*
7341 * We're saving the register to memory. The access is word sized
7342 * regardless of operand size prefixes.
7343 */
7344#if 0 /* not necessary */
7345 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7346#endif
7347 IEM_MC_BEGIN(0, 2);
7348 IEM_MC_LOCAL(uint16_t, u16Value);
7349 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7350 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7351 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
7352 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
7353 IEM_MC_ADVANCE_RIP();
7354 IEM_MC_END();
7355 }
7356 return VINF_SUCCESS;
7357}
7358
7359
7360
7361
7362/** Opcode 0x8d. */
7363FNIEMOP_DEF(iemOp_lea_Gv_M)
7364{
7365 IEMOP_MNEMONIC("lea Gv,M");
7366 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7367 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7368 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7369 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
7370
7371 switch (pIemCpu->enmEffOpSize)
7372 {
7373 case IEMMODE_16BIT:
7374 IEM_MC_BEGIN(0, 2);
7375 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7376 IEM_MC_LOCAL(uint16_t, u16Cast);
7377 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7378 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
7379 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
7380 IEM_MC_ADVANCE_RIP();
7381 IEM_MC_END();
7382 return VINF_SUCCESS;
7383
7384 case IEMMODE_32BIT:
7385 IEM_MC_BEGIN(0, 2);
7386 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7387 IEM_MC_LOCAL(uint32_t, u32Cast);
7388 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7389 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
7390 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
7391 IEM_MC_ADVANCE_RIP();
7392 IEM_MC_END();
7393 return VINF_SUCCESS;
7394
7395 case IEMMODE_64BIT:
7396 IEM_MC_BEGIN(0, 1);
7397 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
7398 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
7399 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
7400 IEM_MC_ADVANCE_RIP();
7401 IEM_MC_END();
7402 return VINF_SUCCESS;
7403 }
7404 AssertFailedReturn(VERR_INTERNAL_ERROR_5);
7405}
7406
7407
7408/** Opcode 0x8e. */
7409FNIEMOP_DEF(iemOp_mov_Sw_Ev)
7410{
7411 IEMOP_MNEMONIC("mov Sw,Ev");
7412
7413 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7414 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7415
7416 /*
7417 * The practical operand size is 16-bit.
7418 */
7419#if 0 /* not necessary */
7420 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
7421#endif
7422
7423 /*
7424 * Check that the destination register exists and can be used with this
7425 * instruction. The REX.R prefix is ignored.
7426 */
7427 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
7428 if ( iSegReg == X86_SREG_CS
7429 || iSegReg > X86_SREG_GS)
7430 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7431
7432 /*
7433 * If rm is denoting a register, no more instruction bytes.
7434 */
7435 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7436 {
7437 IEM_MC_BEGIN(2, 0);
7438 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7439 IEM_MC_ARG(uint16_t, u16Value, 1);
7440 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7441 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7442 IEM_MC_END();
7443 }
7444 else
7445 {
7446 /*
7447 * We're loading the register from memory. The access is word sized
7448 * regardless of operand size prefixes.
7449 */
7450 IEM_MC_BEGIN(2, 1);
7451 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
7452 IEM_MC_ARG(uint16_t, u16Value, 1);
7453 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
7454 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
7455 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
7456 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
7457 IEM_MC_END();
7458 }
7459 return VINF_SUCCESS;
7460}
7461
7462
7463/** Opcode 0x8f. */
7464FNIEMOP_DEF(iemOp_pop_Ev)
7465{
7466 /* This bugger is rather annoying as it requires rSP to be updated before
7467 doing the effective address calculations. Will eventually require a
7468 split between the R/M+SIB decoding and the effective address
7469 calculation - which is something that is required for any attempt at
7470 reusing this code for a recompiler. It may also be good to have if we
7471 need to delay #UD exception caused by invalid lock prefixes.
7472
7473 For now, we'll do a mostly safe interpreter-only implementation here. */
7474 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
7475 * now until tests show it's checked.. */
7476 IEMOP_MNEMONIC("pop Ev");
7477 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
7478 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
7479
7480 /* Register access is relatively easy and can share code. */
7481 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
7482 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
7483
7484 /*
7485 * Memory target.
7486 *
7487 * Intel says that RSP is incremented before it's used in any effective
7488 * address calcuations. This means some serious extra annoyance here since
7489 * we decode and caclulate the effective address in one step and like to
7490 * delay committing registers till everything is done.
7491 *
7492 * So, we'll decode and calculate the effective address twice. This will
7493 * require some recoding if turned into a recompiler.
7494 */
7495 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
7496
7497#ifndef TST_IEM_CHECK_MC
7498 /* Calc effective address with modified ESP. */
7499 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
7500 RTGCPTR GCPtrEff;
7501 VBOXSTRICTRC rcStrict;
7502 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7503 if (rcStrict != VINF_SUCCESS)
7504 return rcStrict;
7505 pIemCpu->offOpcode = offOpcodeSaved;
7506
7507 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
7508 uint64_t const RspSaved = pCtx->rsp;
7509 switch (pIemCpu->enmEffOpSize)
7510 {
7511 case IEMMODE_16BIT: iemRegAddToRsp(pCtx, 2); break;
7512 case IEMMODE_32BIT: iemRegAddToRsp(pCtx, 4); break;
7513 case IEMMODE_64BIT: iemRegAddToRsp(pCtx, 8); break;
7514 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7515 }
7516 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, &GCPtrEff);
7517 Assert(rcStrict == VINF_SUCCESS);
7518 pCtx->rsp = RspSaved;
7519
7520 /* Perform the operation - this should be CImpl. */
7521 RTUINT64U TmpRsp;
7522 TmpRsp.u = pCtx->rsp;
7523 switch (pIemCpu->enmEffOpSize)
7524 {
7525 case IEMMODE_16BIT:
7526 {
7527 uint16_t u16Value;
7528 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
7529 if (rcStrict == VINF_SUCCESS)
7530 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
7531 break;
7532 }
7533
7534 case IEMMODE_32BIT:
7535 {
7536 uint32_t u32Value;
7537 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
7538 if (rcStrict == VINF_SUCCESS)
7539 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
7540 break;
7541 }
7542
7543 case IEMMODE_64BIT:
7544 {
7545 uint64_t u64Value;
7546 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
7547 if (rcStrict == VINF_SUCCESS)
7548 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
7549 break;
7550 }
7551
7552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7553 }
7554 if (rcStrict == VINF_SUCCESS)
7555 {
7556 pCtx->rsp = TmpRsp.u;
7557 iemRegUpdateRip(pIemCpu);
7558 }
7559 return rcStrict;
7560
7561#else
7562 return VERR_NOT_IMPLEMENTED;
7563#endif
7564}
7565
7566
7567/**
7568 * Common 'xchg reg,rAX' helper.
7569 */
7570FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
7571{
7572 IEMOP_HLP_NO_LOCK_PREFIX();
7573
7574 iReg |= pIemCpu->uRexB;
7575 switch (pIemCpu->enmEffOpSize)
7576 {
7577 case IEMMODE_16BIT:
7578 IEM_MC_BEGIN(0, 2);
7579 IEM_MC_LOCAL(uint16_t, u16Tmp1);
7580 IEM_MC_LOCAL(uint16_t, u16Tmp2);
7581 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
7582 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
7583 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
7584 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
7585 IEM_MC_ADVANCE_RIP();
7586 IEM_MC_END();
7587 return VINF_SUCCESS;
7588
7589 case IEMMODE_32BIT:
7590 IEM_MC_BEGIN(0, 2);
7591 IEM_MC_LOCAL(uint32_t, u32Tmp1);
7592 IEM_MC_LOCAL(uint32_t, u32Tmp2);
7593 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
7594 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
7595 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
7596 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
7597 IEM_MC_ADVANCE_RIP();
7598 IEM_MC_END();
7599 return VINF_SUCCESS;
7600
7601 case IEMMODE_64BIT:
7602 IEM_MC_BEGIN(0, 2);
7603 IEM_MC_LOCAL(uint64_t, u64Tmp1);
7604 IEM_MC_LOCAL(uint64_t, u64Tmp2);
7605 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
7606 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
7607 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
7608 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
7609 IEM_MC_ADVANCE_RIP();
7610 IEM_MC_END();
7611 return VINF_SUCCESS;
7612
7613 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7614 }
7615}
7616
7617
7618/** Opcode 0x90. */
7619FNIEMOP_DEF(iemOp_nop)
7620{
7621 /* R8/R8D and RAX/EAX can be exchanged. */
7622 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
7623 {
7624 IEMOP_MNEMONIC("xchg r8,rAX");
7625 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
7626 }
7627
7628 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
7629 IEMOP_MNEMONIC("pause");
7630 else
7631 IEMOP_MNEMONIC("nop");
7632 IEM_MC_BEGIN(0, 0);
7633 IEM_MC_ADVANCE_RIP();
7634 IEM_MC_END();
7635 return VINF_SUCCESS;
7636}
7637
7638
7639/** Opcode 0x91. */
7640FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
7641{
7642 IEMOP_MNEMONIC("xchg rCX,rAX");
7643 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
7644}
7645
7646
7647/** Opcode 0x92. */
7648FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
7649{
7650 IEMOP_MNEMONIC("xchg rDX,rAX");
7651 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
7652}
7653
7654
7655/** Opcode 0x93. */
7656FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
7657{
7658 IEMOP_MNEMONIC("xchg rBX,rAX");
7659 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
7660}
7661
7662
7663/** Opcode 0x94. */
7664FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
7665{
7666 IEMOP_MNEMONIC("xchg rSX,rAX");
7667 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
7668}
7669
7670
7671/** Opcode 0x95. */
7672FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
7673{
7674 IEMOP_MNEMONIC("xchg rBP,rAX");
7675 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
7676}
7677
7678
7679/** Opcode 0x96. */
7680FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
7681{
7682 IEMOP_MNEMONIC("xchg rSI,rAX");
7683 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
7684}
7685
7686
7687/** Opcode 0x97. */
7688FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
7689{
7690 IEMOP_MNEMONIC("xchg rDI,rAX");
7691 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
7692}
7693
7694
7695/** Opcode 0x98. */
7696FNIEMOP_DEF(iemOp_cbw)
7697{
7698 IEMOP_HLP_NO_LOCK_PREFIX();
7699 switch (pIemCpu->enmEffOpSize)
7700 {
7701 case IEMMODE_16BIT:
7702 IEMOP_MNEMONIC("cbw");
7703 IEM_MC_BEGIN(0, 1);
7704 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
7705 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
7706 } IEM_MC_ELSE() {
7707 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
7708 } IEM_MC_ENDIF();
7709 IEM_MC_ADVANCE_RIP();
7710 IEM_MC_END();
7711 return VINF_SUCCESS;
7712
7713 case IEMMODE_32BIT:
7714 IEMOP_MNEMONIC("cwde");
7715 IEM_MC_BEGIN(0, 1);
7716 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7717 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
7718 } IEM_MC_ELSE() {
7719 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
7720 } IEM_MC_ENDIF();
7721 IEM_MC_ADVANCE_RIP();
7722 IEM_MC_END();
7723 return VINF_SUCCESS;
7724
7725 case IEMMODE_64BIT:
7726 IEMOP_MNEMONIC("cdqe");
7727 IEM_MC_BEGIN(0, 1);
7728 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7729 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
7730 } IEM_MC_ELSE() {
7731 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
7732 } IEM_MC_ENDIF();
7733 IEM_MC_ADVANCE_RIP();
7734 IEM_MC_END();
7735 return VINF_SUCCESS;
7736
7737 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7738 }
7739}
7740
7741
7742/** Opcode 0x99. */
7743FNIEMOP_DEF(iemOp_cwd)
7744{
7745 IEMOP_HLP_NO_LOCK_PREFIX();
7746 switch (pIemCpu->enmEffOpSize)
7747 {
7748 case IEMMODE_16BIT:
7749 IEMOP_MNEMONIC("cwd");
7750 IEM_MC_BEGIN(0, 1);
7751 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
7752 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
7753 } IEM_MC_ELSE() {
7754 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
7755 } IEM_MC_ENDIF();
7756 IEM_MC_ADVANCE_RIP();
7757 IEM_MC_END();
7758 return VINF_SUCCESS;
7759
7760 case IEMMODE_32BIT:
7761 IEMOP_MNEMONIC("cdq");
7762 IEM_MC_BEGIN(0, 1);
7763 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
7764 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
7765 } IEM_MC_ELSE() {
7766 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
7767 } IEM_MC_ENDIF();
7768 IEM_MC_ADVANCE_RIP();
7769 IEM_MC_END();
7770 return VINF_SUCCESS;
7771
7772 case IEMMODE_64BIT:
7773 IEMOP_MNEMONIC("cqo");
7774 IEM_MC_BEGIN(0, 1);
7775 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
7776 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
7777 } IEM_MC_ELSE() {
7778 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
7779 } IEM_MC_ENDIF();
7780 IEM_MC_ADVANCE_RIP();
7781 IEM_MC_END();
7782 return VINF_SUCCESS;
7783
7784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7785 }
7786}
7787
7788
7789/** Opcode 0x9a. */
7790FNIEMOP_DEF(iemOp_call_Ap)
7791{
7792 IEMOP_MNEMONIC("call Ap");
7793 IEMOP_HLP_NO_64BIT();
7794
7795 /* Decode the far pointer address and pass it on to the far call C implementation. */
7796 uint32_t offSeg;
7797 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
7798 IEM_OPCODE_GET_NEXT_U32(&offSeg);
7799 else
7800 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
7801 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
7802 IEMOP_HLP_NO_LOCK_PREFIX();
7803 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
7804}
7805
7806
7807/** Opcode 0x9b. (aka fwait) */
7808FNIEMOP_DEF(iemOp_wait)
7809{
7810 IEMOP_MNEMONIC("wait");
7811 IEMOP_HLP_NO_LOCK_PREFIX();
7812
7813 IEM_MC_BEGIN(0, 0);
7814 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
7815 IEM_MC_MAYBE_RAISE_FPU_XCPT();
7816 IEM_MC_ADVANCE_RIP();
7817 IEM_MC_END();
7818 return VINF_SUCCESS;
7819}
7820
7821
7822/** Opcode 0x9c. */
7823FNIEMOP_DEF(iemOp_pushf_Fv)
7824{
7825 IEMOP_HLP_NO_LOCK_PREFIX();
7826 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7827 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
7828}
7829
7830
7831/** Opcode 0x9d. */
7832FNIEMOP_DEF(iemOp_popf_Fv)
7833{
7834 IEMOP_HLP_NO_LOCK_PREFIX();
7835 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
7836 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
7837}
7838
7839
7840/** Opcode 0x9e. */
7841FNIEMOP_DEF(iemOp_sahf)
7842{
7843 IEMOP_MNEMONIC("sahf");
7844 IEMOP_HLP_NO_LOCK_PREFIX();
7845 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
7846 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
7847 return IEMOP_RAISE_INVALID_OPCODE();
7848 IEM_MC_BEGIN(0, 2);
7849 IEM_MC_LOCAL(uint32_t, u32Flags);
7850 IEM_MC_LOCAL(uint32_t, EFlags);
7851 IEM_MC_FETCH_EFLAGS(EFlags);
7852 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
7853 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
7854 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
7855 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
7856 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
7857 IEM_MC_COMMIT_EFLAGS(EFlags);
7858 IEM_MC_ADVANCE_RIP();
7859 IEM_MC_END();
7860 return VINF_SUCCESS;
7861}
7862
7863
7864/** Opcode 0x9f. */
7865FNIEMOP_DEF(iemOp_lahf)
7866{
7867 IEMOP_MNEMONIC("lahf");
7868 IEMOP_HLP_NO_LOCK_PREFIX();
7869 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
7870 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
7871 return IEMOP_RAISE_INVALID_OPCODE();
7872 IEM_MC_BEGIN(0, 1);
7873 IEM_MC_LOCAL(uint8_t, u8Flags);
7874 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
7875 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
7876 IEM_MC_ADVANCE_RIP();
7877 IEM_MC_END();
7878 return VINF_SUCCESS;
7879}
7880
7881
7882/**
7883 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
7884 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
7885 * prefixes. Will return on failures.
7886 * @param a_GCPtrMemOff The variable to store the offset in.
7887 */
7888#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
7889 do \
7890 { \
7891 switch (pIemCpu->enmEffAddrMode) \
7892 { \
7893 case IEMMODE_16BIT: \
7894 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
7895 break; \
7896 case IEMMODE_32BIT: \
7897 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
7898 break; \
7899 case IEMMODE_64BIT: \
7900 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
7901 break; \
7902 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
7903 } \
7904 IEMOP_HLP_NO_LOCK_PREFIX(); \
7905 } while (0)
7906
7907/** Opcode 0xa0. */
7908FNIEMOP_DEF(iemOp_mov_Al_Ob)
7909{
7910 /*
7911 * Get the offset and fend of lock prefixes.
7912 */
7913 RTGCPTR GCPtrMemOff;
7914 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7915
7916 /*
7917 * Fetch AL.
7918 */
7919 IEM_MC_BEGIN(0,1);
7920 IEM_MC_LOCAL(uint8_t, u8Tmp);
7921 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7922 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
7923 IEM_MC_ADVANCE_RIP();
7924 IEM_MC_END();
7925 return VINF_SUCCESS;
7926}
7927
7928
7929/** Opcode 0xa1. */
7930FNIEMOP_DEF(iemOp_mov_rAX_Ov)
7931{
7932 /*
7933 * Get the offset and fend of lock prefixes.
7934 */
7935 IEMOP_MNEMONIC("mov rAX,Ov");
7936 RTGCPTR GCPtrMemOff;
7937 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7938
7939 /*
7940 * Fetch rAX.
7941 */
7942 switch (pIemCpu->enmEffOpSize)
7943 {
7944 case IEMMODE_16BIT:
7945 IEM_MC_BEGIN(0,1);
7946 IEM_MC_LOCAL(uint16_t, u16Tmp);
7947 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7948 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
7949 IEM_MC_ADVANCE_RIP();
7950 IEM_MC_END();
7951 return VINF_SUCCESS;
7952
7953 case IEMMODE_32BIT:
7954 IEM_MC_BEGIN(0,1);
7955 IEM_MC_LOCAL(uint32_t, u32Tmp);
7956 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7957 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
7958 IEM_MC_ADVANCE_RIP();
7959 IEM_MC_END();
7960 return VINF_SUCCESS;
7961
7962 case IEMMODE_64BIT:
7963 IEM_MC_BEGIN(0,1);
7964 IEM_MC_LOCAL(uint64_t, u64Tmp);
7965 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
7966 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
7967 IEM_MC_ADVANCE_RIP();
7968 IEM_MC_END();
7969 return VINF_SUCCESS;
7970
7971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7972 }
7973}
7974
7975
7976/** Opcode 0xa2. */
7977FNIEMOP_DEF(iemOp_mov_Ob_AL)
7978{
7979 /*
7980 * Get the offset and fend of lock prefixes.
7981 */
7982 RTGCPTR GCPtrMemOff;
7983 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
7984
7985 /*
7986 * Store AL.
7987 */
7988 IEM_MC_BEGIN(0,1);
7989 IEM_MC_LOCAL(uint8_t, u8Tmp);
7990 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
7991 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
7992 IEM_MC_ADVANCE_RIP();
7993 IEM_MC_END();
7994 return VINF_SUCCESS;
7995}
7996
7997
7998/** Opcode 0xa3. */
7999FNIEMOP_DEF(iemOp_mov_Ov_rAX)
8000{
8001 /*
8002 * Get the offset and fend of lock prefixes.
8003 */
8004 RTGCPTR GCPtrMemOff;
8005 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
8006
8007 /*
8008 * Store rAX.
8009 */
8010 switch (pIemCpu->enmEffOpSize)
8011 {
8012 case IEMMODE_16BIT:
8013 IEM_MC_BEGIN(0,1);
8014 IEM_MC_LOCAL(uint16_t, u16Tmp);
8015 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
8016 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
8017 IEM_MC_ADVANCE_RIP();
8018 IEM_MC_END();
8019 return VINF_SUCCESS;
8020
8021 case IEMMODE_32BIT:
8022 IEM_MC_BEGIN(0,1);
8023 IEM_MC_LOCAL(uint32_t, u32Tmp);
8024 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
8025 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
8026 IEM_MC_ADVANCE_RIP();
8027 IEM_MC_END();
8028 return VINF_SUCCESS;
8029
8030 case IEMMODE_64BIT:
8031 IEM_MC_BEGIN(0,1);
8032 IEM_MC_LOCAL(uint64_t, u64Tmp);
8033 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
8034 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
8035 IEM_MC_ADVANCE_RIP();
8036 IEM_MC_END();
8037 return VINF_SUCCESS;
8038
8039 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8040 }
8041}
8042
8043/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
8044#define IEM_MOVS_CASE(ValBits, AddrBits) \
8045 IEM_MC_BEGIN(0, 2); \
8046 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8047 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8048 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8049 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8050 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8051 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8052 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8053 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8054 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8055 } IEM_MC_ELSE() { \
8056 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8057 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8058 } IEM_MC_ENDIF(); \
8059 IEM_MC_ADVANCE_RIP(); \
8060 IEM_MC_END();
8061
8062/** Opcode 0xa4. */
8063FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
8064{
8065 IEMOP_HLP_NO_LOCK_PREFIX();
8066
8067 /*
8068 * Use the C implementation if a repeat prefix is encountered.
8069 */
8070 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8071 {
8072 IEMOP_MNEMONIC("rep movsb Xb,Yb");
8073 switch (pIemCpu->enmEffAddrMode)
8074 {
8075 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
8076 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
8077 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
8078 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8079 }
8080 }
8081 IEMOP_MNEMONIC("movsb Xb,Yb");
8082
8083 /*
8084 * Sharing case implementation with movs[wdq] below.
8085 */
8086 switch (pIemCpu->enmEffAddrMode)
8087 {
8088 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
8089 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
8090 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
8091 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8092 }
8093 return VINF_SUCCESS;
8094}
8095
8096
8097/** Opcode 0xa5. */
8098FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
8099{
8100 IEMOP_HLP_NO_LOCK_PREFIX();
8101
8102 /*
8103 * Use the C implementation if a repeat prefix is encountered.
8104 */
8105 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8106 {
8107 IEMOP_MNEMONIC("rep movs Xv,Yv");
8108 switch (pIemCpu->enmEffOpSize)
8109 {
8110 case IEMMODE_16BIT:
8111 switch (pIemCpu->enmEffAddrMode)
8112 {
8113 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
8114 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
8115 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
8116 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8117 }
8118 break;
8119 case IEMMODE_32BIT:
8120 switch (pIemCpu->enmEffAddrMode)
8121 {
8122 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
8123 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
8124 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
8125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8126 }
8127 case IEMMODE_64BIT:
8128 switch (pIemCpu->enmEffAddrMode)
8129 {
8130 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8131 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
8132 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
8133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8134 }
8135 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8136 }
8137 }
8138 IEMOP_MNEMONIC("movs Xv,Yv");
8139
8140 /*
8141 * Annoying double switch here.
8142 * Using ugly macro for implementing the cases, sharing it with movsb.
8143 */
8144 switch (pIemCpu->enmEffOpSize)
8145 {
8146 case IEMMODE_16BIT:
8147 switch (pIemCpu->enmEffAddrMode)
8148 {
8149 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
8150 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
8151 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
8152 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8153 }
8154 break;
8155
8156 case IEMMODE_32BIT:
8157 switch (pIemCpu->enmEffAddrMode)
8158 {
8159 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
8160 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
8161 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
8162 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8163 }
8164 break;
8165
8166 case IEMMODE_64BIT:
8167 switch (pIemCpu->enmEffAddrMode)
8168 {
8169 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8170 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
8171 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
8172 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8173 }
8174 break;
8175 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8176 }
8177 return VINF_SUCCESS;
8178}
8179
8180#undef IEM_MOVS_CASE
8181
8182/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
8183#define IEM_CMPS_CASE(ValBits, AddrBits) \
8184 IEM_MC_BEGIN(3, 3); \
8185 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
8186 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
8187 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8188 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
8189 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8190 \
8191 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8192 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
8193 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8194 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
8195 IEM_MC_REF_LOCAL(puValue1, uValue1); \
8196 IEM_MC_REF_EFLAGS(pEFlags); \
8197 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
8198 \
8199 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8200 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8201 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8202 } IEM_MC_ELSE() { \
8203 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8204 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8205 } IEM_MC_ENDIF(); \
8206 IEM_MC_ADVANCE_RIP(); \
8207 IEM_MC_END(); \
8208
8209/** Opcode 0xa6. */
8210FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
8211{
8212 IEMOP_HLP_NO_LOCK_PREFIX();
8213
8214 /*
8215 * Use the C implementation if a repeat prefix is encountered.
8216 */
8217 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8218 {
8219 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8220 switch (pIemCpu->enmEffAddrMode)
8221 {
8222 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
8223 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
8224 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
8225 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8226 }
8227 }
8228 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8229 {
8230 IEMOP_MNEMONIC("repe cmps Xb,Yb");
8231 switch (pIemCpu->enmEffAddrMode)
8232 {
8233 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
8234 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
8235 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
8236 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8237 }
8238 }
8239 IEMOP_MNEMONIC("cmps Xb,Yb");
8240
8241 /*
8242 * Sharing case implementation with cmps[wdq] below.
8243 */
8244 switch (pIemCpu->enmEffAddrMode)
8245 {
8246 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
8247 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
8248 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
8249 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8250 }
8251 return VINF_SUCCESS;
8252
8253}
8254
8255
8256/** Opcode 0xa7. */
8257FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
8258{
8259 IEMOP_HLP_NO_LOCK_PREFIX();
8260
8261 /*
8262 * Use the C implementation if a repeat prefix is encountered.
8263 */
8264 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8265 {
8266 IEMOP_MNEMONIC("repe cmps Xv,Yv");
8267 switch (pIemCpu->enmEffOpSize)
8268 {
8269 case IEMMODE_16BIT:
8270 switch (pIemCpu->enmEffAddrMode)
8271 {
8272 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
8273 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
8274 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
8275 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8276 }
8277 break;
8278 case IEMMODE_32BIT:
8279 switch (pIemCpu->enmEffAddrMode)
8280 {
8281 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
8282 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
8283 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
8284 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8285 }
8286 case IEMMODE_64BIT:
8287 switch (pIemCpu->enmEffAddrMode)
8288 {
8289 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8290 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
8291 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
8292 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8293 }
8294 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8295 }
8296 }
8297
8298 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8299 {
8300 IEMOP_MNEMONIC("repne cmps Xv,Yv");
8301 switch (pIemCpu->enmEffOpSize)
8302 {
8303 case IEMMODE_16BIT:
8304 switch (pIemCpu->enmEffAddrMode)
8305 {
8306 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
8307 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
8308 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
8309 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8310 }
8311 break;
8312 case IEMMODE_32BIT:
8313 switch (pIemCpu->enmEffAddrMode)
8314 {
8315 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
8316 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
8317 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
8318 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8319 }
8320 case IEMMODE_64BIT:
8321 switch (pIemCpu->enmEffAddrMode)
8322 {
8323 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8324 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
8325 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
8326 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8327 }
8328 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8329 }
8330 }
8331
8332 IEMOP_MNEMONIC("cmps Xv,Yv");
8333
8334 /*
8335 * Annoying double switch here.
8336 * Using ugly macro for implementing the cases, sharing it with cmpsb.
8337 */
8338 switch (pIemCpu->enmEffOpSize)
8339 {
8340 case IEMMODE_16BIT:
8341 switch (pIemCpu->enmEffAddrMode)
8342 {
8343 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
8344 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
8345 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
8346 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8347 }
8348 break;
8349
8350 case IEMMODE_32BIT:
8351 switch (pIemCpu->enmEffAddrMode)
8352 {
8353 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
8354 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
8355 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
8356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8357 }
8358 break;
8359
8360 case IEMMODE_64BIT:
8361 switch (pIemCpu->enmEffAddrMode)
8362 {
8363 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8364 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
8365 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
8366 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8367 }
8368 break;
8369 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8370 }
8371 return VINF_SUCCESS;
8372
8373}
8374
8375#undef IEM_CMPS_CASE
8376
8377/** Opcode 0xa8. */
8378FNIEMOP_DEF(iemOp_test_AL_Ib)
8379{
8380 IEMOP_MNEMONIC("test al,Ib");
8381 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8382 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
8383}
8384
8385
8386/** Opcode 0xa9. */
8387FNIEMOP_DEF(iemOp_test_eAX_Iz)
8388{
8389 IEMOP_MNEMONIC("test rAX,Iz");
8390 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
8391 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
8392}
8393
8394
8395/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
8396#define IEM_STOS_CASE(ValBits, AddrBits) \
8397 IEM_MC_BEGIN(0, 2); \
8398 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8399 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8400 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
8401 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8402 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
8403 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8404 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8405 } IEM_MC_ELSE() { \
8406 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8407 } IEM_MC_ENDIF(); \
8408 IEM_MC_ADVANCE_RIP(); \
8409 IEM_MC_END(); \
8410
8411/** Opcode 0xaa. */
8412FNIEMOP_DEF(iemOp_stosb_Yb_AL)
8413{
8414 IEMOP_HLP_NO_LOCK_PREFIX();
8415
8416 /*
8417 * Use the C implementation if a repeat prefix is encountered.
8418 */
8419 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8420 {
8421 IEMOP_MNEMONIC("rep stos Yb,al");
8422 switch (pIemCpu->enmEffAddrMode)
8423 {
8424 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
8425 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
8426 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
8427 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8428 }
8429 }
8430 IEMOP_MNEMONIC("stos Yb,al");
8431
8432 /*
8433 * Sharing case implementation with stos[wdq] below.
8434 */
8435 switch (pIemCpu->enmEffAddrMode)
8436 {
8437 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
8438 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
8439 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
8440 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8441 }
8442 return VINF_SUCCESS;
8443}
8444
8445
8446/** Opcode 0xab. */
8447FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
8448{
8449 IEMOP_HLP_NO_LOCK_PREFIX();
8450
8451 /*
8452 * Use the C implementation if a repeat prefix is encountered.
8453 */
8454 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8455 {
8456 IEMOP_MNEMONIC("rep stos Yv,rAX");
8457 switch (pIemCpu->enmEffOpSize)
8458 {
8459 case IEMMODE_16BIT:
8460 switch (pIemCpu->enmEffAddrMode)
8461 {
8462 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
8463 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
8464 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
8465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8466 }
8467 break;
8468 case IEMMODE_32BIT:
8469 switch (pIemCpu->enmEffAddrMode)
8470 {
8471 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
8472 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
8473 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
8474 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8475 }
8476 case IEMMODE_64BIT:
8477 switch (pIemCpu->enmEffAddrMode)
8478 {
8479 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8480 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
8481 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
8482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8483 }
8484 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8485 }
8486 }
8487 IEMOP_MNEMONIC("stos Yv,rAX");
8488
8489 /*
8490 * Annoying double switch here.
8491 * Using ugly macro for implementing the cases, sharing it with stosb.
8492 */
8493 switch (pIemCpu->enmEffOpSize)
8494 {
8495 case IEMMODE_16BIT:
8496 switch (pIemCpu->enmEffAddrMode)
8497 {
8498 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
8499 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
8500 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
8501 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8502 }
8503 break;
8504
8505 case IEMMODE_32BIT:
8506 switch (pIemCpu->enmEffAddrMode)
8507 {
8508 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
8509 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
8510 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
8511 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8512 }
8513 break;
8514
8515 case IEMMODE_64BIT:
8516 switch (pIemCpu->enmEffAddrMode)
8517 {
8518 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8519 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
8520 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
8521 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8522 }
8523 break;
8524 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8525 }
8526 return VINF_SUCCESS;
8527}
8528
8529#undef IEM_STOS_CASE
8530
8531/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
8532#define IEM_LODS_CASE(ValBits, AddrBits) \
8533 IEM_MC_BEGIN(0, 2); \
8534 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
8535 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8536 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
8537 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
8538 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
8539 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8540 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8541 } IEM_MC_ELSE() { \
8542 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
8543 } IEM_MC_ENDIF(); \
8544 IEM_MC_ADVANCE_RIP(); \
8545 IEM_MC_END();
8546
8547/** Opcode 0xac. */
8548FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
8549{
8550 IEMOP_HLP_NO_LOCK_PREFIX();
8551
8552 /*
8553 * Use the C implementation if a repeat prefix is encountered.
8554 */
8555 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8556 {
8557 IEMOP_MNEMONIC("rep lodsb al,Xb");
8558 switch (pIemCpu->enmEffAddrMode)
8559 {
8560 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
8561 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
8562 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
8563 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8564 }
8565 }
8566 IEMOP_MNEMONIC("lodsb al,Xb");
8567
8568 /*
8569 * Sharing case implementation with stos[wdq] below.
8570 */
8571 switch (pIemCpu->enmEffAddrMode)
8572 {
8573 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
8574 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
8575 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
8576 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8577 }
8578 return VINF_SUCCESS;
8579}
8580
8581
8582/** Opcode 0xad. */
8583FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
8584{
8585 IEMOP_HLP_NO_LOCK_PREFIX();
8586
8587 /*
8588 * Use the C implementation if a repeat prefix is encountered.
8589 */
8590 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
8591 {
8592 IEMOP_MNEMONIC("rep lods rAX,Xv");
8593 switch (pIemCpu->enmEffOpSize)
8594 {
8595 case IEMMODE_16BIT:
8596 switch (pIemCpu->enmEffAddrMode)
8597 {
8598 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
8599 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
8600 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
8601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8602 }
8603 break;
8604 case IEMMODE_32BIT:
8605 switch (pIemCpu->enmEffAddrMode)
8606 {
8607 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
8608 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
8609 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
8610 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8611 }
8612 case IEMMODE_64BIT:
8613 switch (pIemCpu->enmEffAddrMode)
8614 {
8615 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8616 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
8617 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
8618 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8619 }
8620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8621 }
8622 }
8623 IEMOP_MNEMONIC("lods rAX,Xv");
8624
8625 /*
8626 * Annoying double switch here.
8627 * Using ugly macro for implementing the cases, sharing it with lodsb.
8628 */
8629 switch (pIemCpu->enmEffOpSize)
8630 {
8631 case IEMMODE_16BIT:
8632 switch (pIemCpu->enmEffAddrMode)
8633 {
8634 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
8635 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
8636 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
8637 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8638 }
8639 break;
8640
8641 case IEMMODE_32BIT:
8642 switch (pIemCpu->enmEffAddrMode)
8643 {
8644 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
8645 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
8646 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
8647 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8648 }
8649 break;
8650
8651 case IEMMODE_64BIT:
8652 switch (pIemCpu->enmEffAddrMode)
8653 {
8654 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8655 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
8656 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
8657 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8658 }
8659 break;
8660 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8661 }
8662 return VINF_SUCCESS;
8663}
8664
8665#undef IEM_LODS_CASE
8666
8667/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
8668#define IEM_SCAS_CASE(ValBits, AddrBits) \
8669 IEM_MC_BEGIN(3, 2); \
8670 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
8671 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
8672 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
8673 IEM_MC_LOCAL(RTGCPTR, uAddr); \
8674 \
8675 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
8676 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
8677 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
8678 IEM_MC_REF_EFLAGS(pEFlags); \
8679 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
8680 \
8681 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
8682 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8683 } IEM_MC_ELSE() { \
8684 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
8685 } IEM_MC_ENDIF(); \
8686 IEM_MC_ADVANCE_RIP(); \
8687 IEM_MC_END();
8688
8689/** Opcode 0xae. */
8690FNIEMOP_DEF(iemOp_scasb_AL_Xb)
8691{
8692 IEMOP_HLP_NO_LOCK_PREFIX();
8693
8694 /*
8695 * Use the C implementation if a repeat prefix is encountered.
8696 */
8697 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8698 {
8699 IEMOP_MNEMONIC("repe scasb al,Xb");
8700 switch (pIemCpu->enmEffAddrMode)
8701 {
8702 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
8703 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
8704 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
8705 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8706 }
8707 }
8708 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8709 {
8710 IEMOP_MNEMONIC("repne scasb al,Xb");
8711 switch (pIemCpu->enmEffAddrMode)
8712 {
8713 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
8714 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
8715 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
8716 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8717 }
8718 }
8719 IEMOP_MNEMONIC("scasb al,Xb");
8720
8721 /*
8722 * Sharing case implementation with stos[wdq] below.
8723 */
8724 switch (pIemCpu->enmEffAddrMode)
8725 {
8726 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
8727 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
8728 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
8729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8730 }
8731 return VINF_SUCCESS;
8732}
8733
8734
8735/** Opcode 0xaf. */
8736FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
8737{
8738 IEMOP_HLP_NO_LOCK_PREFIX();
8739
8740 /*
8741 * Use the C implementation if a repeat prefix is encountered.
8742 */
8743 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
8744 {
8745 IEMOP_MNEMONIC("repe scas rAX,Xv");
8746 switch (pIemCpu->enmEffOpSize)
8747 {
8748 case IEMMODE_16BIT:
8749 switch (pIemCpu->enmEffAddrMode)
8750 {
8751 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8752 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8753 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8754 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8755 }
8756 break;
8757 case IEMMODE_32BIT:
8758 switch (pIemCpu->enmEffAddrMode)
8759 {
8760 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8761 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8762 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8763 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8764 }
8765 case IEMMODE_64BIT:
8766 switch (pIemCpu->enmEffAddrMode)
8767 {
8768 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
8769 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8770 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8771 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8772 }
8773 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8774 }
8775 }
8776 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
8777 {
8778 IEMOP_MNEMONIC("repne scas rAX,Xv");
8779 switch (pIemCpu->enmEffOpSize)
8780 {
8781 case IEMMODE_16BIT:
8782 switch (pIemCpu->enmEffAddrMode)
8783 {
8784 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
8785 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
8786 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
8787 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8788 }
8789 break;
8790 case IEMMODE_32BIT:
8791 switch (pIemCpu->enmEffAddrMode)
8792 {
8793 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
8794 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
8795 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
8796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8797 }
8798 case IEMMODE_64BIT:
8799 switch (pIemCpu->enmEffAddrMode)
8800 {
8801 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
8802 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
8803 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
8804 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8805 }
8806 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8807 }
8808 }
8809 IEMOP_MNEMONIC("scas rAX,Xv");
8810
8811 /*
8812 * Annoying double switch here.
8813 * Using ugly macro for implementing the cases, sharing it with scasb.
8814 */
8815 switch (pIemCpu->enmEffOpSize)
8816 {
8817 case IEMMODE_16BIT:
8818 switch (pIemCpu->enmEffAddrMode)
8819 {
8820 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
8821 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
8822 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
8823 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8824 }
8825 break;
8826
8827 case IEMMODE_32BIT:
8828 switch (pIemCpu->enmEffAddrMode)
8829 {
8830 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
8831 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
8832 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
8833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8834 }
8835 break;
8836
8837 case IEMMODE_64BIT:
8838 switch (pIemCpu->enmEffAddrMode)
8839 {
8840 case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_4); /* cannot be encoded */ break;
8841 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
8842 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
8843 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8844 }
8845 break;
8846 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8847 }
8848 return VINF_SUCCESS;
8849}
8850
8851#undef IEM_SCAS_CASE
8852
8853/**
8854 * Common 'mov r8, imm8' helper.
8855 */
8856FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
8857{
8858 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8859 IEMOP_HLP_NO_LOCK_PREFIX();
8860
8861 IEM_MC_BEGIN(0, 1);
8862 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
8863 IEM_MC_STORE_GREG_U8(iReg, u8Value);
8864 IEM_MC_ADVANCE_RIP();
8865 IEM_MC_END();
8866
8867 return VINF_SUCCESS;
8868}
8869
8870
8871/** Opcode 0xb0. */
8872FNIEMOP_DEF(iemOp_mov_AL_Ib)
8873{
8874 IEMOP_MNEMONIC("mov AL,Ib");
8875 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX);
8876}
8877
8878
8879/** Opcode 0xb1. */
8880FNIEMOP_DEF(iemOp_CL_Ib)
8881{
8882 IEMOP_MNEMONIC("mov CL,Ib");
8883 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX);
8884}
8885
8886
8887/** Opcode 0xb2. */
8888FNIEMOP_DEF(iemOp_DL_Ib)
8889{
8890 IEMOP_MNEMONIC("mov DL,Ib");
8891 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX);
8892}
8893
8894
8895/** Opcode 0xb3. */
8896FNIEMOP_DEF(iemOp_BL_Ib)
8897{
8898 IEMOP_MNEMONIC("mov BL,Ib");
8899 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX);
8900}
8901
8902
8903/** Opcode 0xb4. */
8904FNIEMOP_DEF(iemOp_mov_AH_Ib)
8905{
8906 IEMOP_MNEMONIC("mov AH,Ib");
8907 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP);
8908}
8909
8910
8911/** Opcode 0xb5. */
8912FNIEMOP_DEF(iemOp_CH_Ib)
8913{
8914 IEMOP_MNEMONIC("mov CH,Ib");
8915 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP);
8916}
8917
8918
8919/** Opcode 0xb6. */
8920FNIEMOP_DEF(iemOp_DH_Ib)
8921{
8922 IEMOP_MNEMONIC("mov DH,Ib");
8923 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI);
8924}
8925
8926
8927/** Opcode 0xb7. */
8928FNIEMOP_DEF(iemOp_BH_Ib)
8929{
8930 IEMOP_MNEMONIC("mov BH,Ib");
8931 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI);
8932}
8933
8934
8935/**
8936 * Common 'mov regX,immX' helper.
8937 */
8938FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
8939{
8940 switch (pIemCpu->enmEffOpSize)
8941 {
8942 case IEMMODE_16BIT:
8943 {
8944 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8945 IEMOP_HLP_NO_LOCK_PREFIX();
8946
8947 IEM_MC_BEGIN(0, 1);
8948 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
8949 IEM_MC_STORE_GREG_U16(iReg, u16Value);
8950 IEM_MC_ADVANCE_RIP();
8951 IEM_MC_END();
8952 break;
8953 }
8954
8955 case IEMMODE_32BIT:
8956 {
8957 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8958 IEMOP_HLP_NO_LOCK_PREFIX();
8959
8960 IEM_MC_BEGIN(0, 1);
8961 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
8962 IEM_MC_STORE_GREG_U32(iReg, u32Value);
8963 IEM_MC_ADVANCE_RIP();
8964 IEM_MC_END();
8965 break;
8966 }
8967 case IEMMODE_64BIT:
8968 {
8969 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
8970 IEMOP_HLP_NO_LOCK_PREFIX();
8971
8972 IEM_MC_BEGIN(0, 1);
8973 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
8974 IEM_MC_STORE_GREG_U64(iReg, u64Value);
8975 IEM_MC_ADVANCE_RIP();
8976 IEM_MC_END();
8977 break;
8978 }
8979 }
8980
8981 return VINF_SUCCESS;
8982}
8983
8984
8985/** Opcode 0xb8. */
8986FNIEMOP_DEF(iemOp_eAX_Iv)
8987{
8988 IEMOP_MNEMONIC("mov rAX,IV");
8989 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX);
8990}
8991
8992
8993/** Opcode 0xb9. */
8994FNIEMOP_DEF(iemOp_eCX_Iv)
8995{
8996 IEMOP_MNEMONIC("mov rCX,IV");
8997 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX);
8998}
8999
9000
9001/** Opcode 0xba. */
9002FNIEMOP_DEF(iemOp_eDX_Iv)
9003{
9004 IEMOP_MNEMONIC("mov rDX,IV");
9005 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX);
9006}
9007
9008
9009/** Opcode 0xbb. */
9010FNIEMOP_DEF(iemOp_eBX_Iv)
9011{
9012 IEMOP_MNEMONIC("mov rBX,IV");
9013 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX);
9014}
9015
9016
9017/** Opcode 0xbc. */
9018FNIEMOP_DEF(iemOp_eSP_Iv)
9019{
9020 IEMOP_MNEMONIC("mov rSP,IV");
9021 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP);
9022}
9023
9024
9025/** Opcode 0xbd. */
9026FNIEMOP_DEF(iemOp_eBP_Iv)
9027{
9028 IEMOP_MNEMONIC("mov rBP,IV");
9029 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP);
9030}
9031
9032
9033/** Opcode 0xbe. */
9034FNIEMOP_DEF(iemOp_eSI_Iv)
9035{
9036 IEMOP_MNEMONIC("mov rSI,IV");
9037 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI);
9038}
9039
9040
9041/** Opcode 0xbf. */
9042FNIEMOP_DEF(iemOp_eDI_Iv)
9043{
9044 IEMOP_MNEMONIC("mov rDI,IV");
9045 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI);
9046}
9047
9048
9049/** Opcode 0xc0. */
9050FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
9051{
9052 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9053 PCIEMOPSHIFTSIZES pImpl;
9054 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9055 {
9056 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
9057 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
9058 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
9059 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
9060 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
9061 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
9062 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
9063 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9064 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9065 }
9066 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9067
9068 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9069 {
9070 /* register */
9071 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9072 IEMOP_HLP_NO_LOCK_PREFIX();
9073 IEM_MC_BEGIN(3, 0);
9074 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9075 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9076 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9077 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9078 IEM_MC_REF_EFLAGS(pEFlags);
9079 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9080 IEM_MC_ADVANCE_RIP();
9081 IEM_MC_END();
9082 }
9083 else
9084 {
9085 /* memory */
9086 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9087 IEM_MC_BEGIN(3, 2);
9088 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9089 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9090 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9091 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9092
9093 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9094 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9095 IEM_MC_ASSIGN(cShiftArg, cShift);
9096 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9097 IEM_MC_FETCH_EFLAGS(EFlags);
9098 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9099
9100 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9101 IEM_MC_COMMIT_EFLAGS(EFlags);
9102 IEM_MC_ADVANCE_RIP();
9103 IEM_MC_END();
9104 }
9105 return VINF_SUCCESS;
9106}
9107
9108
9109/** Opcode 0xc1. */
9110FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
9111{
9112 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9113 PCIEMOPSHIFTSIZES pImpl;
9114 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9115 {
9116 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
9117 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
9118 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
9119 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
9120 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
9121 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
9122 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
9123 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9124 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9125 }
9126 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9127
9128 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9129 {
9130 /* register */
9131 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9132 IEMOP_HLP_NO_LOCK_PREFIX();
9133 switch (pIemCpu->enmEffOpSize)
9134 {
9135 case IEMMODE_16BIT:
9136 IEM_MC_BEGIN(3, 0);
9137 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9138 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9139 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9140 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9141 IEM_MC_REF_EFLAGS(pEFlags);
9142 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9143 IEM_MC_ADVANCE_RIP();
9144 IEM_MC_END();
9145 return VINF_SUCCESS;
9146
9147 case IEMMODE_32BIT:
9148 IEM_MC_BEGIN(3, 0);
9149 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9150 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9151 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9152 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9153 IEM_MC_REF_EFLAGS(pEFlags);
9154 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9155 IEM_MC_ADVANCE_RIP();
9156 IEM_MC_END();
9157 return VINF_SUCCESS;
9158
9159 case IEMMODE_64BIT:
9160 IEM_MC_BEGIN(3, 0);
9161 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9162 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
9163 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9164 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9165 IEM_MC_REF_EFLAGS(pEFlags);
9166 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9167 IEM_MC_ADVANCE_RIP();
9168 IEM_MC_END();
9169 return VINF_SUCCESS;
9170
9171 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9172 }
9173 }
9174 else
9175 {
9176 /* memory */
9177 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9178 switch (pIemCpu->enmEffOpSize)
9179 {
9180 case IEMMODE_16BIT:
9181 IEM_MC_BEGIN(3, 2);
9182 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9183 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9184 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9185 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9186
9187 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9188 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9189 IEM_MC_ASSIGN(cShiftArg, cShift);
9190 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9191 IEM_MC_FETCH_EFLAGS(EFlags);
9192 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9193
9194 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9195 IEM_MC_COMMIT_EFLAGS(EFlags);
9196 IEM_MC_ADVANCE_RIP();
9197 IEM_MC_END();
9198 return VINF_SUCCESS;
9199
9200 case IEMMODE_32BIT:
9201 IEM_MC_BEGIN(3, 2);
9202 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9203 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9204 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9205 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9206
9207 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9208 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9209 IEM_MC_ASSIGN(cShiftArg, cShift);
9210 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9211 IEM_MC_FETCH_EFLAGS(EFlags);
9212 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9213
9214 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9215 IEM_MC_COMMIT_EFLAGS(EFlags);
9216 IEM_MC_ADVANCE_RIP();
9217 IEM_MC_END();
9218 return VINF_SUCCESS;
9219
9220 case IEMMODE_64BIT:
9221 IEM_MC_BEGIN(3, 2);
9222 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9223 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9224 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9225 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9226
9227 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9228 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
9229 IEM_MC_ASSIGN(cShiftArg, cShift);
9230 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9231 IEM_MC_FETCH_EFLAGS(EFlags);
9232 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9233
9234 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9235 IEM_MC_COMMIT_EFLAGS(EFlags);
9236 IEM_MC_ADVANCE_RIP();
9237 IEM_MC_END();
9238 return VINF_SUCCESS;
9239
9240 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9241 }
9242 }
9243}
9244
9245
9246/** Opcode 0xc2. */
9247FNIEMOP_DEF(iemOp_retn_Iw)
9248{
9249 IEMOP_MNEMONIC("retn Iw");
9250 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9251 IEMOP_HLP_NO_LOCK_PREFIX();
9252 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9253 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
9254}
9255
9256
9257/** Opcode 0xc3. */
9258FNIEMOP_DEF(iemOp_retn)
9259{
9260 IEMOP_MNEMONIC("retn");
9261 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9262 IEMOP_HLP_NO_LOCK_PREFIX();
9263 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
9264}
9265
9266
9267/** Opcode 0xc4. */
9268FNIEMOP_DEF(iemOp_les_Gv_Mp)
9269{
9270 IEMOP_MNEMONIC("les Gv,Mp");
9271 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_ES);
9272}
9273
9274
9275/** Opcode 0xc5. */
9276FNIEMOP_DEF(iemOp_lds_Gv_Mp)
9277{
9278 IEMOP_MNEMONIC("lds Gv,Mp");
9279 return FNIEMOP_CALL_1(iemOpCommonLoadSRegAndGreg, X86_SREG_DS);
9280}
9281
9282
9283/** Opcode 0xc6. */
9284FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
9285{
9286 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9287 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9288 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9289 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9290 IEMOP_MNEMONIC("mov Eb,Ib");
9291
9292 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9293 {
9294 /* register access */
9295 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9296 IEM_MC_BEGIN(0, 0);
9297 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
9298 IEM_MC_ADVANCE_RIP();
9299 IEM_MC_END();
9300 }
9301 else
9302 {
9303 /* memory access. */
9304 IEM_MC_BEGIN(0, 1);
9305 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9306 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9307 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9308 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
9309 IEM_MC_ADVANCE_RIP();
9310 IEM_MC_END();
9311 }
9312 return VINF_SUCCESS;
9313}
9314
9315
9316/** Opcode 0xc7. */
9317FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
9318{
9319 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9320 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9321 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
9322 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9323 IEMOP_MNEMONIC("mov Ev,Iz");
9324
9325 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9326 {
9327 /* register access */
9328 switch (pIemCpu->enmEffOpSize)
9329 {
9330 case IEMMODE_16BIT:
9331 IEM_MC_BEGIN(0, 0);
9332 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9333 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
9334 IEM_MC_ADVANCE_RIP();
9335 IEM_MC_END();
9336 return VINF_SUCCESS;
9337
9338 case IEMMODE_32BIT:
9339 IEM_MC_BEGIN(0, 0);
9340 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9341 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
9342 IEM_MC_ADVANCE_RIP();
9343 IEM_MC_END();
9344 return VINF_SUCCESS;
9345
9346 case IEMMODE_64BIT:
9347 IEM_MC_BEGIN(0, 0);
9348 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9349 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
9350 IEM_MC_ADVANCE_RIP();
9351 IEM_MC_END();
9352 return VINF_SUCCESS;
9353
9354 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9355 }
9356 }
9357 else
9358 {
9359 /* memory access. */
9360 switch (pIemCpu->enmEffOpSize)
9361 {
9362 case IEMMODE_16BIT:
9363 IEM_MC_BEGIN(0, 1);
9364 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9365 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9366 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9367 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
9368 IEM_MC_ADVANCE_RIP();
9369 IEM_MC_END();
9370 return VINF_SUCCESS;
9371
9372 case IEMMODE_32BIT:
9373 IEM_MC_BEGIN(0, 1);
9374 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9375 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9376 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9377 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
9378 IEM_MC_ADVANCE_RIP();
9379 IEM_MC_END();
9380 return VINF_SUCCESS;
9381
9382 case IEMMODE_64BIT:
9383 IEM_MC_BEGIN(0, 1);
9384 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9385 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9386 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm);
9387 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
9388 IEM_MC_ADVANCE_RIP();
9389 IEM_MC_END();
9390 return VINF_SUCCESS;
9391
9392 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9393 }
9394 }
9395}
9396
9397
9398
9399
9400/** Opcode 0xc8. */
9401FNIEMOP_STUB(iemOp_enter_Iw_Ib);
9402
9403
9404/** Opcode 0xc9. */
9405FNIEMOP_DEF(iemOp_leave)
9406{
9407 IEMOP_MNEMONIC("retn");
9408 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9409 IEMOP_HLP_NO_LOCK_PREFIX();
9410 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
9411}
9412
9413
9414/** Opcode 0xca. */
9415FNIEMOP_DEF(iemOp_retf_Iw)
9416{
9417 IEMOP_MNEMONIC("retf Iw");
9418 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9419 IEMOP_HLP_NO_LOCK_PREFIX();
9420 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9421 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
9422}
9423
9424
9425/** Opcode 0xcb. */
9426FNIEMOP_DEF(iemOp_retf)
9427{
9428 IEMOP_MNEMONIC("retf");
9429 IEMOP_HLP_NO_LOCK_PREFIX();
9430 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9431 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
9432}
9433
9434
9435/** Opcode 0xcc. */
9436FNIEMOP_DEF(iemOp_int_3)
9437{
9438 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
9439}
9440
9441
9442/** Opcode 0xcd. */
9443FNIEMOP_DEF(iemOp_int_Ib)
9444{
9445 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
9446 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
9447}
9448
9449
9450/** Opcode 0xce. */
9451FNIEMOP_DEF(iemOp_into)
9452{
9453 IEM_MC_BEGIN(2, 0);
9454 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
9455 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
9456 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
9457 IEM_MC_END();
9458 return VINF_SUCCESS;
9459}
9460
9461
9462/** Opcode 0xcf. */
9463FNIEMOP_DEF(iemOp_iret)
9464{
9465 IEMOP_MNEMONIC("iret");
9466 IEMOP_HLP_NO_LOCK_PREFIX();
9467 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
9468}
9469
9470
9471/** Opcode 0xd0. */
9472FNIEMOP_DEF(iemOp_Grp2_Eb_1)
9473{
9474 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9475 PCIEMOPSHIFTSIZES pImpl;
9476 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9477 {
9478 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
9479 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
9480 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
9481 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
9482 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
9483 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
9484 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
9485 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9486 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9487 }
9488 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9489
9490 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9491 {
9492 /* register */
9493 IEMOP_HLP_NO_LOCK_PREFIX();
9494 IEM_MC_BEGIN(3, 0);
9495 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9496 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9497 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9498 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9499 IEM_MC_REF_EFLAGS(pEFlags);
9500 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9501 IEM_MC_ADVANCE_RIP();
9502 IEM_MC_END();
9503 }
9504 else
9505 {
9506 /* memory */
9507 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9508 IEM_MC_BEGIN(3, 2);
9509 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9510 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
9511 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9512 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9513
9514 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9515 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9516 IEM_MC_FETCH_EFLAGS(EFlags);
9517 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9518
9519 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9520 IEM_MC_COMMIT_EFLAGS(EFlags);
9521 IEM_MC_ADVANCE_RIP();
9522 IEM_MC_END();
9523 }
9524 return VINF_SUCCESS;
9525}
9526
9527
9528
9529/** Opcode 0xd1. */
9530FNIEMOP_DEF(iemOp_Grp2_Ev_1)
9531{
9532 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9533 PCIEMOPSHIFTSIZES pImpl;
9534 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9535 {
9536 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
9537 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
9538 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
9539 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
9540 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
9541 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
9542 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
9543 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9544 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
9545 }
9546 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9547
9548 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9549 {
9550 /* register */
9551 IEMOP_HLP_NO_LOCK_PREFIX();
9552 switch (pIemCpu->enmEffOpSize)
9553 {
9554 case IEMMODE_16BIT:
9555 IEM_MC_BEGIN(3, 0);
9556 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9557 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9558 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9559 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9560 IEM_MC_REF_EFLAGS(pEFlags);
9561 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9562 IEM_MC_ADVANCE_RIP();
9563 IEM_MC_END();
9564 return VINF_SUCCESS;
9565
9566 case IEMMODE_32BIT:
9567 IEM_MC_BEGIN(3, 0);
9568 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9569 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9570 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9571 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9572 IEM_MC_REF_EFLAGS(pEFlags);
9573 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9574 IEM_MC_ADVANCE_RIP();
9575 IEM_MC_END();
9576 return VINF_SUCCESS;
9577
9578 case IEMMODE_64BIT:
9579 IEM_MC_BEGIN(3, 0);
9580 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9581 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9582 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9583 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9584 IEM_MC_REF_EFLAGS(pEFlags);
9585 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9586 IEM_MC_ADVANCE_RIP();
9587 IEM_MC_END();
9588 return VINF_SUCCESS;
9589
9590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9591 }
9592 }
9593 else
9594 {
9595 /* memory */
9596 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9597 switch (pIemCpu->enmEffOpSize)
9598 {
9599 case IEMMODE_16BIT:
9600 IEM_MC_BEGIN(3, 2);
9601 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9602 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9603 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9604 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9605
9606 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9607 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9608 IEM_MC_FETCH_EFLAGS(EFlags);
9609 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9610
9611 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9612 IEM_MC_COMMIT_EFLAGS(EFlags);
9613 IEM_MC_ADVANCE_RIP();
9614 IEM_MC_END();
9615 return VINF_SUCCESS;
9616
9617 case IEMMODE_32BIT:
9618 IEM_MC_BEGIN(3, 2);
9619 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9620 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9621 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9622 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9623
9624 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9625 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9626 IEM_MC_FETCH_EFLAGS(EFlags);
9627 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9628
9629 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9630 IEM_MC_COMMIT_EFLAGS(EFlags);
9631 IEM_MC_ADVANCE_RIP();
9632 IEM_MC_END();
9633 return VINF_SUCCESS;
9634
9635 case IEMMODE_64BIT:
9636 IEM_MC_BEGIN(3, 2);
9637 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9638 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
9639 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9640 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9641
9642 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9643 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9644 IEM_MC_FETCH_EFLAGS(EFlags);
9645 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9646
9647 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9648 IEM_MC_COMMIT_EFLAGS(EFlags);
9649 IEM_MC_ADVANCE_RIP();
9650 IEM_MC_END();
9651 return VINF_SUCCESS;
9652
9653 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9654 }
9655 }
9656}
9657
9658
9659/** Opcode 0xd2. */
9660FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
9661{
9662 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9663 PCIEMOPSHIFTSIZES pImpl;
9664 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9665 {
9666 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
9667 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
9668 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
9669 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
9670 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
9671 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
9672 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
9673 case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
9674 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
9675 }
9676 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9677
9678 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9679 {
9680 /* register */
9681 IEMOP_HLP_NO_LOCK_PREFIX();
9682 IEM_MC_BEGIN(3, 0);
9683 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9684 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9685 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9686 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9687 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9688 IEM_MC_REF_EFLAGS(pEFlags);
9689 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9690 IEM_MC_ADVANCE_RIP();
9691 IEM_MC_END();
9692 }
9693 else
9694 {
9695 /* memory */
9696 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9697 IEM_MC_BEGIN(3, 2);
9698 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9699 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9700 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9701 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9702
9703 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9704 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9705 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9706 IEM_MC_FETCH_EFLAGS(EFlags);
9707 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
9708
9709 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
9710 IEM_MC_COMMIT_EFLAGS(EFlags);
9711 IEM_MC_ADVANCE_RIP();
9712 IEM_MC_END();
9713 }
9714 return VINF_SUCCESS;
9715}
9716
9717
9718/** Opcode 0xd3. */
9719FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
9720{
9721 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9722 PCIEMOPSHIFTSIZES pImpl;
9723 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
9724 {
9725 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
9726 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
9727 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
9728 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
9729 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
9730 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
9731 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
9732 case 6: return IEMOP_RAISE_INVALID_OPCODE();
9733 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
9734 }
9735 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
9736
9737 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9738 {
9739 /* register */
9740 IEMOP_HLP_NO_LOCK_PREFIX();
9741 switch (pIemCpu->enmEffOpSize)
9742 {
9743 case IEMMODE_16BIT:
9744 IEM_MC_BEGIN(3, 0);
9745 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9746 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9747 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9748 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9749 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9750 IEM_MC_REF_EFLAGS(pEFlags);
9751 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9752 IEM_MC_ADVANCE_RIP();
9753 IEM_MC_END();
9754 return VINF_SUCCESS;
9755
9756 case IEMMODE_32BIT:
9757 IEM_MC_BEGIN(3, 0);
9758 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9759 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9760 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9761 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9762 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9763 IEM_MC_REF_EFLAGS(pEFlags);
9764 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9765 IEM_MC_ADVANCE_RIP();
9766 IEM_MC_END();
9767 return VINF_SUCCESS;
9768
9769 case IEMMODE_64BIT:
9770 IEM_MC_BEGIN(3, 0);
9771 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9772 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9773 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9774 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9775 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9776 IEM_MC_REF_EFLAGS(pEFlags);
9777 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9778 IEM_MC_ADVANCE_RIP();
9779 IEM_MC_END();
9780 return VINF_SUCCESS;
9781
9782 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9783 }
9784 }
9785 else
9786 {
9787 /* memory */
9788 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
9789 switch (pIemCpu->enmEffOpSize)
9790 {
9791 case IEMMODE_16BIT:
9792 IEM_MC_BEGIN(3, 2);
9793 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9794 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9795 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9796 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9797
9798 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9799 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9800 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9801 IEM_MC_FETCH_EFLAGS(EFlags);
9802 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
9803
9804 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
9805 IEM_MC_COMMIT_EFLAGS(EFlags);
9806 IEM_MC_ADVANCE_RIP();
9807 IEM_MC_END();
9808 return VINF_SUCCESS;
9809
9810 case IEMMODE_32BIT:
9811 IEM_MC_BEGIN(3, 2);
9812 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9813 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9814 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9815 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9816
9817 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9818 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9819 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9820 IEM_MC_FETCH_EFLAGS(EFlags);
9821 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
9822
9823 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
9824 IEM_MC_COMMIT_EFLAGS(EFlags);
9825 IEM_MC_ADVANCE_RIP();
9826 IEM_MC_END();
9827 return VINF_SUCCESS;
9828
9829 case IEMMODE_64BIT:
9830 IEM_MC_BEGIN(3, 2);
9831 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9832 IEM_MC_ARG(uint8_t, cShiftArg, 1);
9833 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
9834 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9835
9836 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
9837 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
9838 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9839 IEM_MC_FETCH_EFLAGS(EFlags);
9840 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
9841
9842 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
9843 IEM_MC_COMMIT_EFLAGS(EFlags);
9844 IEM_MC_ADVANCE_RIP();
9845 IEM_MC_END();
9846 return VINF_SUCCESS;
9847
9848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9849 }
9850 }
9851}
9852
9853/** Opcode 0xd4. */
9854FNIEMOP_DEF(iemOp_aam_Ib)
9855{
9856 IEMOP_MNEMONIC("aam Ib");
9857 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
9858 IEMOP_HLP_NO_LOCK_PREFIX();
9859 IEMOP_HLP_NO_64BIT();
9860 if (!bImm)
9861 return IEMOP_RAISE_DIVIDE_ERROR();
9862 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
9863}
9864
9865
9866/** Opcode 0xd5. */
9867FNIEMOP_DEF(iemOp_aad_Ib)
9868{
9869 IEMOP_MNEMONIC("aad Ib");
9870 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
9871 IEMOP_HLP_NO_LOCK_PREFIX();
9872 IEMOP_HLP_NO_64BIT();
9873 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
9874}
9875
9876
9877/** Opcode 0xd7. */
9878FNIEMOP_DEF(iemOp_xlat)
9879{
9880 IEMOP_MNEMONIC("xlat");
9881 IEMOP_HLP_NO_LOCK_PREFIX();
9882 switch (pIemCpu->enmEffAddrMode)
9883 {
9884 case IEMMODE_16BIT:
9885 IEM_MC_BEGIN(2, 0);
9886 IEM_MC_LOCAL(uint8_t, u8Tmp);
9887 IEM_MC_LOCAL(uint16_t, u16Addr);
9888 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
9889 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
9890 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
9891 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9892 IEM_MC_ADVANCE_RIP();
9893 IEM_MC_END();
9894 return VINF_SUCCESS;
9895
9896 case IEMMODE_32BIT:
9897 IEM_MC_BEGIN(2, 0);
9898 IEM_MC_LOCAL(uint8_t, u8Tmp);
9899 IEM_MC_LOCAL(uint32_t, u32Addr);
9900 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
9901 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
9902 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
9903 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9904 IEM_MC_ADVANCE_RIP();
9905 IEM_MC_END();
9906 return VINF_SUCCESS;
9907
9908 case IEMMODE_64BIT:
9909 IEM_MC_BEGIN(2, 0);
9910 IEM_MC_LOCAL(uint8_t, u8Tmp);
9911 IEM_MC_LOCAL(uint64_t, u64Addr);
9912 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
9913 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
9914 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
9915 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
9916 IEM_MC_ADVANCE_RIP();
9917 IEM_MC_END();
9918 return VINF_SUCCESS;
9919
9920 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9921 }
9922}
9923
9924
9925/** Opcode 0xd8. */
9926FNIEMOP_STUB(iemOp_EscF0);
9927/** Opcode 0xd9. */
9928FNIEMOP_STUB(iemOp_EscF1);
9929/** Opcode 0xda. */
9930FNIEMOP_STUB(iemOp_EscF2);
9931
9932
9933/** Opcode 0xdb /0. */
9934FNIEMOP_STUB_1(iemOp_fild_dw, uint8_t, bRm);
9935/** Opcode 0xdb /1. */
9936FNIEMOP_STUB_1(iemOp_fisttp_dw, uint8_t, bRm);
9937/** Opcode 0xdb /2. */
9938FNIEMOP_STUB_1(iemOp_fist_dw, uint8_t, bRm);
9939/** Opcode 0xdb /3. */
9940FNIEMOP_STUB_1(iemOp_fistp_dw, uint8_t, bRm);
9941/** Opcode 0xdb /5. */
9942FNIEMOP_STUB_1(iemOp_fld_xr, uint8_t, bRm);
9943/** Opcode 0xdb /7. */
9944FNIEMOP_STUB_1(iemOp_fstp_xr, uint8_t, bRm);
9945
9946
9947/** Opcode 0xdb 0xe0. */
9948FNIEMOP_DEF(iemOp_fneni)
9949{
9950 IEMOP_MNEMONIC("fneni (8087/ign)");
9951 IEM_MC_BEGIN(0,0);
9952 IEM_MC_ADVANCE_RIP();
9953 IEM_MC_END();
9954 return VINF_SUCCESS;
9955}
9956
9957
9958/** Opcode 0xdb 0xe1. */
9959FNIEMOP_DEF(iemOp_fndisi)
9960{
9961 IEMOP_MNEMONIC("fndisi (8087/ign)");
9962 IEM_MC_BEGIN(0,0);
9963 IEM_MC_ADVANCE_RIP();
9964 IEM_MC_END();
9965 return VINF_SUCCESS;
9966}
9967
9968
9969/** Opcode 0xdb 0xe2. */
9970FNIEMOP_STUB(iemOp_fnclex);
9971
9972
9973/** Opcode 0xdb 0xe3. */
9974FNIEMOP_DEF(iemOp_fninit)
9975{
9976 IEMOP_MNEMONIC("fninit");
9977 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
9978}
9979
9980
9981/** Opcode 0xdb 0xe4. */
9982FNIEMOP_DEF(iemOp_fnsetpm)
9983{
9984 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
9985 IEM_MC_BEGIN(0,0);
9986 IEM_MC_ADVANCE_RIP();
9987 IEM_MC_END();
9988 return VINF_SUCCESS;
9989}
9990
9991
9992/** Opcode 0xdb 0xe5. */
9993FNIEMOP_DEF(iemOp_frstpm)
9994{
9995 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
9996 IEM_MC_BEGIN(0,0);
9997 IEM_MC_ADVANCE_RIP();
9998 IEM_MC_END();
9999 return VINF_SUCCESS;
10000}
10001
10002
10003/** Opcode 0xdb. */
10004FNIEMOP_DEF(iemOp_EscF3)
10005{
10006 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10007 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10008 {
10009 switch (bRm & 0xf8)
10010 {
10011 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnb
10012 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovne
10013 case 0xd0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnbe
10014 case 0xd8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcmovnu
10015 case 0xe0:
10016 IEMOP_HLP_NO_LOCK_PREFIX();
10017 switch (bRm)
10018 {
10019 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
10020 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
10021 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
10022 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
10023 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
10024 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
10025 default: return IEMOP_RAISE_INVALID_OPCODE();
10026 }
10027 break;
10028 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomi
10029 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomi
10030 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10031 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10032 }
10033 }
10034 else
10035 {
10036 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10037 {
10038 case 0: return FNIEMOP_CALL_1(iemOp_fild_dw, bRm);
10039 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_dw,bRm);
10040 case 2: return FNIEMOP_CALL_1(iemOp_fist_dw, bRm);
10041 case 3: return FNIEMOP_CALL_1(iemOp_fistp_dw, bRm);
10042 case 4: return IEMOP_RAISE_INVALID_OPCODE();
10043 case 5: return FNIEMOP_CALL_1(iemOp_fld_xr, bRm);
10044 case 6: return IEMOP_RAISE_INVALID_OPCODE();
10045 case 7: return FNIEMOP_CALL_1(iemOp_fstp_xr, bRm);
10046 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10047 }
10048 }
10049}
10050
10051/** Opcode 0xdc. */
10052FNIEMOP_STUB(iemOp_EscF4);
10053/** Opcode 0xdd. */
10054FNIEMOP_STUB(iemOp_EscF5);
10055
10056/** Opcode 0xde 0xd9. */
10057FNIEMOP_STUB(iemOp_fcompp);
10058
10059/** Opcode 0xde. */
10060FNIEMOP_DEF(iemOp_EscF6)
10061{
10062 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10063 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10064 {
10065 switch (bRm & 0xf8)
10066 {
10067 case 0xc0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fiaddp
10068 case 0xc8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fimulp
10069 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10070 case 0xd8:
10071 switch (bRm)
10072 {
10073 case 0xd9: return FNIEMOP_CALL(iemOp_fcompp);
10074 default: return IEMOP_RAISE_INVALID_OPCODE();
10075 }
10076 case 0xe0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubrp
10077 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fsubp
10078 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivrp
10079 case 0xf8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fdivp
10080 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10081 }
10082 }
10083 else
10084 {
10085#if 0
10086 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
10087 {
10088 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_w, bRm);
10089 case 1: return FNIEMOP_CALL_1(iemOp_fimul_w, bRm);
10090 case 2: return FNIEMOP_CALL_1(iemOp_ficom_w, bRm);
10091 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_w, bRm);
10092 case 4: return FNIEMOP_CALL_1(iemOp_fisub_w, bRm);
10093 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_w, bRm);
10094 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_w, bRm);
10095 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_w, bRm);
10096 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10097 }
10098#endif
10099 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
10100 }
10101}
10102
10103
10104/** Opcode 0xdf 0xe0. */
10105FNIEMOP_DEF(iemOp_fnstsw_ax)
10106{
10107 IEMOP_MNEMONIC("fnstsw ax");
10108 IEMOP_HLP_NO_LOCK_PREFIX();
10109
10110 IEM_MC_BEGIN(0, 1);
10111 IEM_MC_LOCAL(uint16_t, u16Tmp);
10112 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10113 IEM_MC_FETCH_FSW(u16Tmp);
10114 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
10115 IEM_MC_ADVANCE_RIP();
10116 IEM_MC_END();
10117 return VINF_SUCCESS;
10118}
10119
10120
10121/** Opcode 0xdf. */
10122FNIEMOP_DEF(iemOp_EscF7)
10123{
10124 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10125 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10126 {
10127 switch (bRm & 0xf8)
10128 {
10129 case 0xc0: return IEMOP_RAISE_INVALID_OPCODE();
10130 case 0xc8: return IEMOP_RAISE_INVALID_OPCODE();
10131 case 0xd0: return IEMOP_RAISE_INVALID_OPCODE();
10132 case 0xd8: return IEMOP_RAISE_INVALID_OPCODE();
10133 case 0xe0:
10134 switch (bRm)
10135 {
10136 case 0xe0: return FNIEMOP_CALL(iemOp_fnstsw_ax);
10137 default: return IEMOP_RAISE_INVALID_OPCODE();
10138 }
10139 case 0xe8: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fucomip
10140 case 0xf0: AssertFailedReturn(VERR_NOT_IMPLEMENTED); // fcomip
10141 case 0xf8: return IEMOP_RAISE_INVALID_OPCODE();
10142 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10143 }
10144 }
10145 else
10146 {
10147 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
10148 }
10149}
10150
10151
10152/** Opcode 0xe0. */
10153FNIEMOP_DEF(iemOp_loopne_Jb)
10154{
10155 IEMOP_MNEMONIC("loopne Jb");
10156 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10157 IEMOP_HLP_NO_LOCK_PREFIX();
10158 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10159
10160 switch (pIemCpu->enmEffAddrMode)
10161 {
10162 case IEMMODE_16BIT:
10163 IEM_MC_BEGIN(0,0);
10164 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10165 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10166 IEM_MC_REL_JMP_S8(i8Imm);
10167 } IEM_MC_ELSE() {
10168 IEM_MC_ADVANCE_RIP();
10169 } IEM_MC_ENDIF();
10170 IEM_MC_END();
10171 return VINF_SUCCESS;
10172
10173 case IEMMODE_32BIT:
10174 IEM_MC_BEGIN(0,0);
10175 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10176 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10177 IEM_MC_REL_JMP_S8(i8Imm);
10178 } IEM_MC_ELSE() {
10179 IEM_MC_ADVANCE_RIP();
10180 } IEM_MC_ENDIF();
10181 IEM_MC_END();
10182 return VINF_SUCCESS;
10183
10184 case IEMMODE_64BIT:
10185 IEM_MC_BEGIN(0,0);
10186 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10187 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
10188 IEM_MC_REL_JMP_S8(i8Imm);
10189 } IEM_MC_ELSE() {
10190 IEM_MC_ADVANCE_RIP();
10191 } IEM_MC_ENDIF();
10192 IEM_MC_END();
10193 return VINF_SUCCESS;
10194
10195 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10196 }
10197}
10198
10199
10200/** Opcode 0xe1. */
10201FNIEMOP_DEF(iemOp_loope_Jb)
10202{
10203 IEMOP_MNEMONIC("loope Jb");
10204 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10205 IEMOP_HLP_NO_LOCK_PREFIX();
10206 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10207
10208 switch (pIemCpu->enmEffAddrMode)
10209 {
10210 case IEMMODE_16BIT:
10211 IEM_MC_BEGIN(0,0);
10212 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10213 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10214 IEM_MC_REL_JMP_S8(i8Imm);
10215 } IEM_MC_ELSE() {
10216 IEM_MC_ADVANCE_RIP();
10217 } IEM_MC_ENDIF();
10218 IEM_MC_END();
10219 return VINF_SUCCESS;
10220
10221 case IEMMODE_32BIT:
10222 IEM_MC_BEGIN(0,0);
10223 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10224 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10225 IEM_MC_REL_JMP_S8(i8Imm);
10226 } IEM_MC_ELSE() {
10227 IEM_MC_ADVANCE_RIP();
10228 } IEM_MC_ENDIF();
10229 IEM_MC_END();
10230 return VINF_SUCCESS;
10231
10232 case IEMMODE_64BIT:
10233 IEM_MC_BEGIN(0,0);
10234 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10235 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
10236 IEM_MC_REL_JMP_S8(i8Imm);
10237 } IEM_MC_ELSE() {
10238 IEM_MC_ADVANCE_RIP();
10239 } IEM_MC_ENDIF();
10240 IEM_MC_END();
10241 return VINF_SUCCESS;
10242
10243 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10244 }
10245}
10246
10247
10248/** Opcode 0xe2. */
10249FNIEMOP_DEF(iemOp_loop_Jb)
10250{
10251 IEMOP_MNEMONIC("loop Jb");
10252 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10253 IEMOP_HLP_NO_LOCK_PREFIX();
10254 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10255
10256 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
10257 * using the 32-bit operand size override. How can that be restarted? See
10258 * weird pseudo code in intel manual. */
10259 switch (pIemCpu->enmEffAddrMode)
10260 {
10261 case IEMMODE_16BIT:
10262 IEM_MC_BEGIN(0,0);
10263 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
10264 IEM_MC_IF_CX_IS_NZ() {
10265 IEM_MC_REL_JMP_S8(i8Imm);
10266 } IEM_MC_ELSE() {
10267 IEM_MC_ADVANCE_RIP();
10268 } IEM_MC_ENDIF();
10269 IEM_MC_END();
10270 return VINF_SUCCESS;
10271
10272 case IEMMODE_32BIT:
10273 IEM_MC_BEGIN(0,0);
10274 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
10275 IEM_MC_IF_ECX_IS_NZ() {
10276 IEM_MC_REL_JMP_S8(i8Imm);
10277 } IEM_MC_ELSE() {
10278 IEM_MC_ADVANCE_RIP();
10279 } IEM_MC_ENDIF();
10280 IEM_MC_END();
10281 return VINF_SUCCESS;
10282
10283 case IEMMODE_64BIT:
10284 IEM_MC_BEGIN(0,0);
10285 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
10286 IEM_MC_IF_RCX_IS_NZ() {
10287 IEM_MC_REL_JMP_S8(i8Imm);
10288 } IEM_MC_ELSE() {
10289 IEM_MC_ADVANCE_RIP();
10290 } IEM_MC_ENDIF();
10291 IEM_MC_END();
10292 return VINF_SUCCESS;
10293
10294 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10295 }
10296}
10297
10298
10299/** Opcode 0xe3. */
10300FNIEMOP_DEF(iemOp_jecxz_Jb)
10301{
10302 IEMOP_MNEMONIC("jecxz Jb");
10303 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10304 IEMOP_HLP_NO_LOCK_PREFIX();
10305 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10306
10307 switch (pIemCpu->enmEffAddrMode)
10308 {
10309 case IEMMODE_16BIT:
10310 IEM_MC_BEGIN(0,0);
10311 IEM_MC_IF_CX_IS_NZ() {
10312 IEM_MC_ADVANCE_RIP();
10313 } IEM_MC_ELSE() {
10314 IEM_MC_REL_JMP_S8(i8Imm);
10315 } IEM_MC_ENDIF();
10316 IEM_MC_END();
10317 return VINF_SUCCESS;
10318
10319 case IEMMODE_32BIT:
10320 IEM_MC_BEGIN(0,0);
10321 IEM_MC_IF_ECX_IS_NZ() {
10322 IEM_MC_ADVANCE_RIP();
10323 } IEM_MC_ELSE() {
10324 IEM_MC_REL_JMP_S8(i8Imm);
10325 } IEM_MC_ENDIF();
10326 IEM_MC_END();
10327 return VINF_SUCCESS;
10328
10329 case IEMMODE_64BIT:
10330 IEM_MC_BEGIN(0,0);
10331 IEM_MC_IF_RCX_IS_NZ() {
10332 IEM_MC_ADVANCE_RIP();
10333 } IEM_MC_ELSE() {
10334 IEM_MC_REL_JMP_S8(i8Imm);
10335 } IEM_MC_ENDIF();
10336 IEM_MC_END();
10337 return VINF_SUCCESS;
10338
10339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10340 }
10341}
10342
10343
10344/** Opcode 0xe4 */
10345FNIEMOP_DEF(iemOp_in_AL_Ib)
10346{
10347 IEMOP_MNEMONIC("in eAX,Ib");
10348 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10349 IEMOP_HLP_NO_LOCK_PREFIX();
10350 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
10351}
10352
10353
10354/** Opcode 0xe5 */
10355FNIEMOP_DEF(iemOp_in_eAX_Ib)
10356{
10357 IEMOP_MNEMONIC("in eAX,Ib");
10358 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10359 IEMOP_HLP_NO_LOCK_PREFIX();
10360 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10361}
10362
10363
10364/** Opcode 0xe6 */
10365FNIEMOP_DEF(iemOp_out_Ib_AL)
10366{
10367 IEMOP_MNEMONIC("out Ib,AL");
10368 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10369 IEMOP_HLP_NO_LOCK_PREFIX();
10370 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
10371}
10372
10373
10374/** Opcode 0xe7 */
10375FNIEMOP_DEF(iemOp_out_Ib_eAX)
10376{
10377 IEMOP_MNEMONIC("out Ib,eAX");
10378 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10379 IEMOP_HLP_NO_LOCK_PREFIX();
10380 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10381}
10382
10383
10384/** Opcode 0xe8. */
10385FNIEMOP_DEF(iemOp_call_Jv)
10386{
10387 IEMOP_MNEMONIC("call Jv");
10388 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10389 switch (pIemCpu->enmEffOpSize)
10390 {
10391 case IEMMODE_16BIT:
10392 {
10393 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10394 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
10395 }
10396
10397 case IEMMODE_32BIT:
10398 {
10399 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10400 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
10401 }
10402
10403 case IEMMODE_64BIT:
10404 {
10405 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10406 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
10407 }
10408
10409 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10410 }
10411}
10412
10413
10414/** Opcode 0xe9. */
10415FNIEMOP_DEF(iemOp_jmp_Jv)
10416{
10417 IEMOP_MNEMONIC("jmp Jv");
10418 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10419 switch (pIemCpu->enmEffOpSize)
10420 {
10421 case IEMMODE_16BIT:
10422 {
10423 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
10424 IEM_MC_BEGIN(0, 0);
10425 IEM_MC_REL_JMP_S16(i16Imm);
10426 IEM_MC_END();
10427 return VINF_SUCCESS;
10428 }
10429
10430 case IEMMODE_64BIT:
10431 case IEMMODE_32BIT:
10432 {
10433 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
10434 IEM_MC_BEGIN(0, 0);
10435 IEM_MC_REL_JMP_S32(i32Imm);
10436 IEM_MC_END();
10437 return VINF_SUCCESS;
10438 }
10439
10440 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10441 }
10442}
10443
10444
10445/** Opcode 0xea. */
10446FNIEMOP_DEF(iemOp_jmp_Ap)
10447{
10448 IEMOP_MNEMONIC("jmp Ap");
10449 IEMOP_HLP_NO_64BIT();
10450
10451 /* Decode the far pointer address and pass it on to the far call C implementation. */
10452 uint32_t offSeg;
10453 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10454 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10455 else
10456 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
10457 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10458 IEMOP_HLP_NO_LOCK_PREFIX();
10459 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
10460}
10461
10462
10463/** Opcode 0xeb. */
10464FNIEMOP_DEF(iemOp_jmp_Jb)
10465{
10466 IEMOP_MNEMONIC("jmp Jb");
10467 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
10468 IEMOP_HLP_NO_LOCK_PREFIX();
10469 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10470
10471 IEM_MC_BEGIN(0, 0);
10472 IEM_MC_REL_JMP_S8(i8Imm);
10473 IEM_MC_END();
10474 return VINF_SUCCESS;
10475}
10476
10477
10478/** Opcode 0xec */
10479FNIEMOP_DEF(iemOp_in_AL_DX)
10480{
10481 IEMOP_MNEMONIC("in AL,DX");
10482 IEMOP_HLP_NO_LOCK_PREFIX();
10483 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
10484}
10485
10486
10487/** Opcode 0xed */
10488FNIEMOP_DEF(iemOp_eAX_DX)
10489{
10490 IEMOP_MNEMONIC("in eAX,DX");
10491 IEMOP_HLP_NO_LOCK_PREFIX();
10492 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10493}
10494
10495
10496/** Opcode 0xee */
10497FNIEMOP_DEF(iemOp_out_DX_AL)
10498{
10499 IEMOP_MNEMONIC("out DX,AL");
10500 IEMOP_HLP_NO_LOCK_PREFIX();
10501 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
10502}
10503
10504
10505/** Opcode 0xef */
10506FNIEMOP_DEF(iemOp_out_DX_eAX)
10507{
10508 IEMOP_MNEMONIC("out DX,eAX");
10509 IEMOP_HLP_NO_LOCK_PREFIX();
10510 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
10511}
10512
10513
10514/** Opcode 0xf0. */
10515FNIEMOP_DEF(iemOp_lock)
10516{
10517 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
10518
10519 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10520 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10521}
10522
10523
10524/** Opcode 0xf2. */
10525FNIEMOP_DEF(iemOp_repne)
10526{
10527 /* This overrides any previous REPE prefix. */
10528 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
10529 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
10530
10531 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10532 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10533}
10534
10535
10536/** Opcode 0xf3. */
10537FNIEMOP_DEF(iemOp_repe)
10538{
10539 /* This overrides any previous REPNE prefix. */
10540 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
10541 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
10542
10543 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10544 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
10545}
10546
10547
10548/** Opcode 0xf4. */
10549FNIEMOP_DEF(iemOp_hlt)
10550{
10551 IEMOP_HLP_NO_LOCK_PREFIX();
10552 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
10553}
10554
10555
10556/** Opcode 0xf5. */
10557FNIEMOP_DEF(iemOp_cmc)
10558{
10559 IEMOP_MNEMONIC("cmc");
10560 IEMOP_HLP_NO_LOCK_PREFIX();
10561 IEM_MC_BEGIN(0, 0);
10562 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
10563 IEM_MC_ADVANCE_RIP();
10564 IEM_MC_END();
10565 return VINF_SUCCESS;
10566}
10567
10568
10569/**
10570 * Common implementation of 'inc/dec/not/neg Eb'.
10571 *
10572 * @param bRm The RM byte.
10573 * @param pImpl The instruction implementation.
10574 */
10575FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10576{
10577 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10578 {
10579 /* register access */
10580 IEM_MC_BEGIN(2, 0);
10581 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10582 IEM_MC_ARG(uint32_t *, pEFlags, 1);
10583 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10584 IEM_MC_REF_EFLAGS(pEFlags);
10585 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10586 IEM_MC_ADVANCE_RIP();
10587 IEM_MC_END();
10588 }
10589 else
10590 {
10591 /* memory access. */
10592 IEM_MC_BEGIN(2, 2);
10593 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10594 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10595 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10596
10597 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10598 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10599 IEM_MC_FETCH_EFLAGS(EFlags);
10600 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10601 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
10602 else
10603 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
10604
10605 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
10606 IEM_MC_COMMIT_EFLAGS(EFlags);
10607 IEM_MC_ADVANCE_RIP();
10608 IEM_MC_END();
10609 }
10610 return VINF_SUCCESS;
10611}
10612
10613
10614/**
10615 * Common implementation of 'inc/dec/not/neg Ev'.
10616 *
10617 * @param bRm The RM byte.
10618 * @param pImpl The instruction implementation.
10619 */
10620FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
10621{
10622 /* Registers are handled by a common worker. */
10623 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10624 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10625
10626 /* Memory we do here. */
10627 switch (pIemCpu->enmEffOpSize)
10628 {
10629 case IEMMODE_16BIT:
10630 IEM_MC_BEGIN(2, 2);
10631 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10632 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10633 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10634
10635 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10636 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10637 IEM_MC_FETCH_EFLAGS(EFlags);
10638 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10639 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
10640 else
10641 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
10642
10643 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
10644 IEM_MC_COMMIT_EFLAGS(EFlags);
10645 IEM_MC_ADVANCE_RIP();
10646 IEM_MC_END();
10647 return VINF_SUCCESS;
10648
10649 case IEMMODE_32BIT:
10650 IEM_MC_BEGIN(2, 2);
10651 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10652 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10653 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10654
10655 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10656 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10657 IEM_MC_FETCH_EFLAGS(EFlags);
10658 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10659 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
10660 else
10661 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
10662
10663 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
10664 IEM_MC_COMMIT_EFLAGS(EFlags);
10665 IEM_MC_ADVANCE_RIP();
10666 IEM_MC_END();
10667 return VINF_SUCCESS;
10668
10669 case IEMMODE_64BIT:
10670 IEM_MC_BEGIN(2, 2);
10671 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10672 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
10673 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10674
10675 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10676 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10677 IEM_MC_FETCH_EFLAGS(EFlags);
10678 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
10679 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
10680 else
10681 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
10682
10683 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
10684 IEM_MC_COMMIT_EFLAGS(EFlags);
10685 IEM_MC_ADVANCE_RIP();
10686 IEM_MC_END();
10687 return VINF_SUCCESS;
10688
10689 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10690 }
10691}
10692
10693
10694/** Opcode 0xf6 /0. */
10695FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
10696{
10697 IEMOP_MNEMONIC("test Eb,Ib");
10698 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10699
10700 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10701 {
10702 /* register access */
10703 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10704 IEMOP_HLP_NO_LOCK_PREFIX();
10705
10706 IEM_MC_BEGIN(3, 0);
10707 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10708 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
10709 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10710 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10711 IEM_MC_REF_EFLAGS(pEFlags);
10712 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10713 IEM_MC_ADVANCE_RIP();
10714 IEM_MC_END();
10715 }
10716 else
10717 {
10718 /* memory access. */
10719 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10720
10721 IEM_MC_BEGIN(3, 2);
10722 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
10723 IEM_MC_ARG(uint8_t, u8Src, 1);
10724 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10725 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10726
10727 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10728 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
10729 IEM_MC_ASSIGN(u8Src, u8Imm);
10730 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10731 IEM_MC_FETCH_EFLAGS(EFlags);
10732 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
10733
10734 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
10735 IEM_MC_COMMIT_EFLAGS(EFlags);
10736 IEM_MC_ADVANCE_RIP();
10737 IEM_MC_END();
10738 }
10739 return VINF_SUCCESS;
10740}
10741
10742
10743/** Opcode 0xf7 /0. */
10744FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
10745{
10746 IEMOP_MNEMONIC("test Ev,Iv");
10747 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10748 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
10749
10750 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10751 {
10752 /* register access */
10753 switch (pIemCpu->enmEffOpSize)
10754 {
10755 case IEMMODE_16BIT:
10756 {
10757 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10758 IEM_MC_BEGIN(3, 0);
10759 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10760 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
10761 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10762 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10763 IEM_MC_REF_EFLAGS(pEFlags);
10764 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10765 IEM_MC_ADVANCE_RIP();
10766 IEM_MC_END();
10767 return VINF_SUCCESS;
10768 }
10769
10770 case IEMMODE_32BIT:
10771 {
10772 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10773 IEM_MC_BEGIN(3, 0);
10774 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10775 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
10776 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10777 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10778 IEM_MC_REF_EFLAGS(pEFlags);
10779 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10780 IEM_MC_ADVANCE_RIP();
10781 IEM_MC_END();
10782 return VINF_SUCCESS;
10783 }
10784
10785 case IEMMODE_64BIT:
10786 {
10787 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10788 IEM_MC_BEGIN(3, 0);
10789 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10790 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
10791 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10792 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10793 IEM_MC_REF_EFLAGS(pEFlags);
10794 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10795 IEM_MC_ADVANCE_RIP();
10796 IEM_MC_END();
10797 return VINF_SUCCESS;
10798 }
10799
10800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10801 }
10802 }
10803 else
10804 {
10805 /* memory access. */
10806 switch (pIemCpu->enmEffOpSize)
10807 {
10808 case IEMMODE_16BIT:
10809 {
10810 IEM_MC_BEGIN(3, 2);
10811 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
10812 IEM_MC_ARG(uint16_t, u16Src, 1);
10813 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10814 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10815
10816 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10817 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
10818 IEM_MC_ASSIGN(u16Src, u16Imm);
10819 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10820 IEM_MC_FETCH_EFLAGS(EFlags);
10821 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
10822
10823 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
10824 IEM_MC_COMMIT_EFLAGS(EFlags);
10825 IEM_MC_ADVANCE_RIP();
10826 IEM_MC_END();
10827 return VINF_SUCCESS;
10828 }
10829
10830 case IEMMODE_32BIT:
10831 {
10832 IEM_MC_BEGIN(3, 2);
10833 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
10834 IEM_MC_ARG(uint32_t, u32Src, 1);
10835 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10836 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10837
10838 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10839 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
10840 IEM_MC_ASSIGN(u32Src, u32Imm);
10841 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10842 IEM_MC_FETCH_EFLAGS(EFlags);
10843 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
10844
10845 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
10846 IEM_MC_COMMIT_EFLAGS(EFlags);
10847 IEM_MC_ADVANCE_RIP();
10848 IEM_MC_END();
10849 return VINF_SUCCESS;
10850 }
10851
10852 case IEMMODE_64BIT:
10853 {
10854 IEM_MC_BEGIN(3, 2);
10855 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
10856 IEM_MC_ARG(uint64_t, u64Src, 1);
10857 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
10858 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10859
10860 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10861 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
10862 IEM_MC_ASSIGN(u64Src, u64Imm);
10863 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10864 IEM_MC_FETCH_EFLAGS(EFlags);
10865 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
10866
10867 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
10868 IEM_MC_COMMIT_EFLAGS(EFlags);
10869 IEM_MC_ADVANCE_RIP();
10870 IEM_MC_END();
10871 return VINF_SUCCESS;
10872 }
10873
10874 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10875 }
10876 }
10877}
10878
10879
10880/** Opcode 0xf6 /4, /5, /6 and /7. */
10881FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
10882{
10883 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10884
10885 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10886 {
10887 /* register access */
10888 IEMOP_HLP_NO_LOCK_PREFIX();
10889 IEM_MC_BEGIN(3, 0);
10890 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10891 IEM_MC_ARG(uint8_t, u8Value, 1);
10892 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10893 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10894 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10895 IEM_MC_REF_EFLAGS(pEFlags);
10896 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10897 IEM_MC_ADVANCE_RIP();
10898 IEM_MC_END();
10899 }
10900 else
10901 {
10902 /* memory access. */
10903 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10904
10905 IEM_MC_BEGIN(3, 1);
10906 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10907 IEM_MC_ARG(uint8_t, u8Value, 1);
10908 IEM_MC_ARG(uint32_t *, pEFlags, 2);
10909 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10910
10911 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
10912 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
10913 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10914 IEM_MC_REF_EFLAGS(pEFlags);
10915 IEM_MC_CALL_VOID_AIMPL_3(pfnU8, pu16AX, u8Value, pEFlags);
10916
10917 IEM_MC_ADVANCE_RIP();
10918 IEM_MC_END();
10919 }
10920 return VINF_SUCCESS;
10921}
10922
10923
10924/** Opcode 0xf7 /4, /5, /6 and /7. */
10925FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
10926{
10927 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10928 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
10929
10930 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10931 {
10932 /* register access */
10933 switch (pIemCpu->enmEffOpSize)
10934 {
10935 case IEMMODE_16BIT:
10936 {
10937 IEMOP_HLP_NO_LOCK_PREFIX();
10938 IEM_MC_BEGIN(4, 1);
10939 IEM_MC_ARG(uint16_t *, pu16AX, 0);
10940 IEM_MC_ARG(uint16_t *, pu16DX, 1);
10941 IEM_MC_ARG(uint16_t, u16Value, 2);
10942 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10943 IEM_MC_LOCAL(int32_t, rc);
10944
10945 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10946 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
10947 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
10948 IEM_MC_REF_EFLAGS(pEFlags);
10949 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
10950 IEM_MC_IF_LOCAL_IS_Z(rc) {
10951 IEM_MC_ADVANCE_RIP();
10952 } IEM_MC_ELSE() {
10953 IEM_MC_RAISE_DIVIDE_ERROR();
10954 } IEM_MC_ENDIF();
10955
10956 IEM_MC_END();
10957 return VINF_SUCCESS;
10958 }
10959
10960 case IEMMODE_32BIT:
10961 {
10962 IEMOP_HLP_NO_LOCK_PREFIX();
10963 IEM_MC_BEGIN(4, 1);
10964 IEM_MC_ARG(uint32_t *, pu32AX, 0);
10965 IEM_MC_ARG(uint32_t *, pu32DX, 1);
10966 IEM_MC_ARG(uint32_t, u32Value, 2);
10967 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10968 IEM_MC_LOCAL(int32_t, rc);
10969
10970 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10971 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
10972 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
10973 IEM_MC_REF_EFLAGS(pEFlags);
10974 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
10975 IEM_MC_IF_LOCAL_IS_Z(rc) {
10976 IEM_MC_ADVANCE_RIP();
10977 } IEM_MC_ELSE() {
10978 IEM_MC_RAISE_DIVIDE_ERROR();
10979 } IEM_MC_ENDIF();
10980
10981 IEM_MC_END();
10982 return VINF_SUCCESS;
10983 }
10984
10985 case IEMMODE_64BIT:
10986 {
10987 IEMOP_HLP_NO_LOCK_PREFIX();
10988 IEM_MC_BEGIN(4, 1);
10989 IEM_MC_ARG(uint64_t *, pu64AX, 0);
10990 IEM_MC_ARG(uint64_t *, pu64DX, 1);
10991 IEM_MC_ARG(uint64_t, u64Value, 2);
10992 IEM_MC_ARG(uint32_t *, pEFlags, 3);
10993 IEM_MC_LOCAL(int32_t, rc);
10994
10995 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10996 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
10997 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
10998 IEM_MC_REF_EFLAGS(pEFlags);
10999 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11000 IEM_MC_IF_LOCAL_IS_Z(rc) {
11001 IEM_MC_ADVANCE_RIP();
11002 } IEM_MC_ELSE() {
11003 IEM_MC_RAISE_DIVIDE_ERROR();
11004 } IEM_MC_ENDIF();
11005
11006 IEM_MC_END();
11007 return VINF_SUCCESS;
11008 }
11009
11010 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11011 }
11012 }
11013 else
11014 {
11015 /* memory access. */
11016 switch (pIemCpu->enmEffOpSize)
11017 {
11018 case IEMMODE_16BIT:
11019 {
11020 IEMOP_HLP_NO_LOCK_PREFIX();
11021 IEM_MC_BEGIN(4, 2);
11022 IEM_MC_ARG(uint16_t *, pu16AX, 0);
11023 IEM_MC_ARG(uint16_t *, pu16DX, 1);
11024 IEM_MC_ARG(uint16_t, u16Value, 2);
11025 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11026 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11027 IEM_MC_LOCAL(int32_t, rc);
11028
11029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11030 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
11031 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
11032 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
11033 IEM_MC_REF_EFLAGS(pEFlags);
11034 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
11035 IEM_MC_IF_LOCAL_IS_Z(rc) {
11036 IEM_MC_ADVANCE_RIP();
11037 } IEM_MC_ELSE() {
11038 IEM_MC_RAISE_DIVIDE_ERROR();
11039 } IEM_MC_ENDIF();
11040
11041 IEM_MC_END();
11042 return VINF_SUCCESS;
11043 }
11044
11045 case IEMMODE_32BIT:
11046 {
11047 IEMOP_HLP_NO_LOCK_PREFIX();
11048 IEM_MC_BEGIN(4, 2);
11049 IEM_MC_ARG(uint32_t *, pu32AX, 0);
11050 IEM_MC_ARG(uint32_t *, pu32DX, 1);
11051 IEM_MC_ARG(uint32_t, u32Value, 2);
11052 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11053 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11054 IEM_MC_LOCAL(int32_t, rc);
11055
11056 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11057 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
11058 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
11059 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
11060 IEM_MC_REF_EFLAGS(pEFlags);
11061 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
11062 IEM_MC_IF_LOCAL_IS_Z(rc) {
11063 IEM_MC_ADVANCE_RIP();
11064 } IEM_MC_ELSE() {
11065 IEM_MC_RAISE_DIVIDE_ERROR();
11066 } IEM_MC_ENDIF();
11067
11068 IEM_MC_END();
11069 return VINF_SUCCESS;
11070 }
11071
11072 case IEMMODE_64BIT:
11073 {
11074 IEMOP_HLP_NO_LOCK_PREFIX();
11075 IEM_MC_BEGIN(4, 2);
11076 IEM_MC_ARG(uint64_t *, pu64AX, 0);
11077 IEM_MC_ARG(uint64_t *, pu64DX, 1);
11078 IEM_MC_ARG(uint64_t, u64Value, 2);
11079 IEM_MC_ARG(uint32_t *, pEFlags, 3);
11080 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
11081 IEM_MC_LOCAL(int32_t, rc);
11082
11083 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm);
11084 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
11085 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
11086 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
11087 IEM_MC_REF_EFLAGS(pEFlags);
11088 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
11089 IEM_MC_IF_LOCAL_IS_Z(rc) {
11090 IEM_MC_ADVANCE_RIP();
11091 } IEM_MC_ELSE() {
11092 IEM_MC_RAISE_DIVIDE_ERROR();
11093 } IEM_MC_ENDIF();
11094
11095 IEM_MC_END();
11096 return VINF_SUCCESS;
11097 }
11098
11099 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11100 }
11101 }
11102}
11103
11104/** Opcode 0xf6. */
11105FNIEMOP_DEF(iemOp_Grp3_Eb)
11106{
11107 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11108 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11109 {
11110 case 0:
11111 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
11112 case 1:
11113 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11114 case 2:
11115 IEMOP_MNEMONIC("not Eb");
11116 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
11117 case 3:
11118 IEMOP_MNEMONIC("neg Eb");
11119 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
11120 case 4:
11121 IEMOP_MNEMONIC("mul Eb");
11122 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11123 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
11124 case 5:
11125 IEMOP_MNEMONIC("imul Eb");
11126 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11127 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
11128 case 6:
11129 IEMOP_MNEMONIC("div Eb");
11130 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11131 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
11132 case 7:
11133 IEMOP_MNEMONIC("idiv Eb");
11134 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11135 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
11136 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11137 }
11138}
11139
11140
11141/** Opcode 0xf7. */
11142FNIEMOP_DEF(iemOp_Grp3_Ev)
11143{
11144 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11145 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11146 {
11147 case 0:
11148 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
11149 case 1:
11150 return IEMOP_RAISE_INVALID_LOCK_PREFIX();
11151 case 2:
11152 IEMOP_MNEMONIC("not Ev");
11153 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
11154 case 3:
11155 IEMOP_MNEMONIC("neg Ev");
11156 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
11157 case 4:
11158 IEMOP_MNEMONIC("mul Ev");
11159 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11160 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
11161 case 5:
11162 IEMOP_MNEMONIC("imul Ev");
11163 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
11164 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
11165 case 6:
11166 IEMOP_MNEMONIC("div Ev");
11167 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11168 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
11169 case 7:
11170 IEMOP_MNEMONIC("idiv Ev");
11171 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
11172 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
11173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11174 }
11175}
11176
11177
11178/** Opcode 0xf8. */
11179FNIEMOP_DEF(iemOp_clc)
11180{
11181 IEMOP_MNEMONIC("clc");
11182 IEMOP_HLP_NO_LOCK_PREFIX();
11183 IEM_MC_BEGIN(0, 0);
11184 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
11185 IEM_MC_ADVANCE_RIP();
11186 IEM_MC_END();
11187 return VINF_SUCCESS;
11188}
11189
11190
11191/** Opcode 0xf9. */
11192FNIEMOP_DEF(iemOp_stc)
11193{
11194 IEMOP_MNEMONIC("stc");
11195 IEMOP_HLP_NO_LOCK_PREFIX();
11196 IEM_MC_BEGIN(0, 0);
11197 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
11198 IEM_MC_ADVANCE_RIP();
11199 IEM_MC_END();
11200 return VINF_SUCCESS;
11201}
11202
11203
11204/** Opcode 0xfa. */
11205FNIEMOP_DEF(iemOp_cli)
11206{
11207 IEMOP_MNEMONIC("cli");
11208 IEMOP_HLP_NO_LOCK_PREFIX();
11209 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
11210}
11211
11212
11213FNIEMOP_DEF(iemOp_sti)
11214{
11215 IEMOP_MNEMONIC("sti");
11216 IEMOP_HLP_NO_LOCK_PREFIX();
11217 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
11218}
11219
11220
11221/** Opcode 0xfc. */
11222FNIEMOP_DEF(iemOp_cld)
11223{
11224 IEMOP_MNEMONIC("cld");
11225 IEMOP_HLP_NO_LOCK_PREFIX();
11226 IEM_MC_BEGIN(0, 0);
11227 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
11228 IEM_MC_ADVANCE_RIP();
11229 IEM_MC_END();
11230 return VINF_SUCCESS;
11231}
11232
11233
11234/** Opcode 0xfd. */
11235FNIEMOP_DEF(iemOp_std)
11236{
11237 IEMOP_MNEMONIC("std");
11238 IEMOP_HLP_NO_LOCK_PREFIX();
11239 IEM_MC_BEGIN(0, 0);
11240 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
11241 IEM_MC_ADVANCE_RIP();
11242 IEM_MC_END();
11243 return VINF_SUCCESS;
11244}
11245
11246
11247/** Opcode 0xfe. */
11248FNIEMOP_DEF(iemOp_Grp4)
11249{
11250 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11251 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11252 {
11253 case 0:
11254 IEMOP_MNEMONIC("inc Ev");
11255 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
11256 case 1:
11257 IEMOP_MNEMONIC("dec Ev");
11258 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
11259 default:
11260 IEMOP_MNEMONIC("grp4-ud");
11261 return IEMOP_RAISE_INVALID_OPCODE();
11262 }
11263}
11264
11265
11266/**
11267 * Opcode 0xff /2.
11268 * @param bRm The RM byte.
11269 */
11270FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
11271{
11272 IEMOP_MNEMONIC("calln Ev");
11273 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11274 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11275
11276 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11277 {
11278 /* The new RIP is taken from a register. */
11279 switch (pIemCpu->enmEffOpSize)
11280 {
11281 case IEMMODE_16BIT:
11282 IEM_MC_BEGIN(1, 0);
11283 IEM_MC_ARG(uint16_t, u16Target, 0);
11284 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11285 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11286 IEM_MC_END()
11287 return VINF_SUCCESS;
11288
11289 case IEMMODE_32BIT:
11290 IEM_MC_BEGIN(1, 0);
11291 IEM_MC_ARG(uint32_t, u32Target, 0);
11292 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11293 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11294 IEM_MC_END()
11295 return VINF_SUCCESS;
11296
11297 case IEMMODE_64BIT:
11298 IEM_MC_BEGIN(1, 0);
11299 IEM_MC_ARG(uint64_t, u64Target, 0);
11300 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11301 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11302 IEM_MC_END()
11303 return VINF_SUCCESS;
11304
11305 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11306 }
11307 }
11308 else
11309 {
11310 /* The new RIP is taken from a register. */
11311 switch (pIemCpu->enmEffOpSize)
11312 {
11313 case IEMMODE_16BIT:
11314 IEM_MC_BEGIN(1, 1);
11315 IEM_MC_ARG(uint16_t, u16Target, 0);
11316 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11317 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11318 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11319 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
11320 IEM_MC_END()
11321 return VINF_SUCCESS;
11322
11323 case IEMMODE_32BIT:
11324 IEM_MC_BEGIN(1, 1);
11325 IEM_MC_ARG(uint32_t, u32Target, 0);
11326 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11327 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11328 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11329 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
11330 IEM_MC_END()
11331 return VINF_SUCCESS;
11332
11333 case IEMMODE_64BIT:
11334 IEM_MC_BEGIN(1, 1);
11335 IEM_MC_ARG(uint64_t, u64Target, 0);
11336 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11337 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11338 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11339 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
11340 IEM_MC_END()
11341 return VINF_SUCCESS;
11342
11343 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11344 }
11345 }
11346}
11347
11348
11349/**
11350 * Opcode 0xff /3.
11351 * @param bRm The RM byte.
11352 */
11353FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
11354{
11355 IEMOP_MNEMONIC("callf Ep");
11356 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11357
11358 /* Registers? How?? */
11359 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11360 {
11361 /** @todo How the heck does a 'callf eax' work? Probably just have to
11362 * search the docs... */
11363 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11364 }
11365
11366 /* Far pointer loaded from memory. */
11367 switch (pIemCpu->enmEffOpSize)
11368 {
11369 case IEMMODE_16BIT:
11370 IEM_MC_BEGIN(3, 1);
11371 IEM_MC_ARG(uint16_t, u16Sel, 0);
11372 IEM_MC_ARG(uint16_t, offSeg, 1);
11373 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11374 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11375 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11376 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11377 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11378 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11379 IEM_MC_END();
11380 return VINF_SUCCESS;
11381
11382 case IEMMODE_32BIT:
11383 IEM_MC_BEGIN(3, 1);
11384 IEM_MC_ARG(uint16_t, u16Sel, 0);
11385 IEM_MC_ARG(uint32_t, offSeg, 1);
11386 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11387 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11388 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11389 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11390 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11391 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11392 IEM_MC_END();
11393 return VINF_SUCCESS;
11394
11395 case IEMMODE_64BIT:
11396 IEM_MC_BEGIN(3, 1);
11397 IEM_MC_ARG(uint16_t, u16Sel, 0);
11398 IEM_MC_ARG(uint64_t, offSeg, 1);
11399 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11400 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11401 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11402 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11403 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11404 IEM_MC_CALL_CIMPL_3(iemCImpl_callf, u16Sel, offSeg, enmEffOpSize);
11405 IEM_MC_END();
11406 return VINF_SUCCESS;
11407
11408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11409 }
11410}
11411
11412
11413/**
11414 * Opcode 0xff /4.
11415 * @param bRm The RM byte.
11416 */
11417FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
11418{
11419 IEMOP_MNEMONIC("jmpn Ev");
11420 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11421 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11422
11423 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11424 {
11425 /* The new RIP is taken from a register. */
11426 switch (pIemCpu->enmEffOpSize)
11427 {
11428 case IEMMODE_16BIT:
11429 IEM_MC_BEGIN(0, 1);
11430 IEM_MC_LOCAL(uint16_t, u16Target);
11431 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11432 IEM_MC_SET_RIP_U16(u16Target);
11433 IEM_MC_END()
11434 return VINF_SUCCESS;
11435
11436 case IEMMODE_32BIT:
11437 IEM_MC_BEGIN(0, 1);
11438 IEM_MC_LOCAL(uint32_t, u32Target);
11439 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11440 IEM_MC_SET_RIP_U32(u32Target);
11441 IEM_MC_END()
11442 return VINF_SUCCESS;
11443
11444 case IEMMODE_64BIT:
11445 IEM_MC_BEGIN(0, 1);
11446 IEM_MC_LOCAL(uint64_t, u64Target);
11447 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11448 IEM_MC_SET_RIP_U64(u64Target);
11449 IEM_MC_END()
11450 return VINF_SUCCESS;
11451
11452 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11453 }
11454 }
11455 else
11456 {
11457 /* The new RIP is taken from a register. */
11458 switch (pIemCpu->enmEffOpSize)
11459 {
11460 case IEMMODE_16BIT:
11461 IEM_MC_BEGIN(0, 2);
11462 IEM_MC_LOCAL(uint16_t, u16Target);
11463 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11464 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11465 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11466 IEM_MC_SET_RIP_U16(u16Target);
11467 IEM_MC_END()
11468 return VINF_SUCCESS;
11469
11470 case IEMMODE_32BIT:
11471 IEM_MC_BEGIN(0, 2);
11472 IEM_MC_LOCAL(uint32_t, u32Target);
11473 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11474 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11475 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11476 IEM_MC_SET_RIP_U32(u32Target);
11477 IEM_MC_END()
11478 return VINF_SUCCESS;
11479
11480 case IEMMODE_64BIT:
11481 IEM_MC_BEGIN(0, 2);
11482 IEM_MC_LOCAL(uint32_t, u32Target);
11483 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11484 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11485 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
11486 IEM_MC_SET_RIP_U32(u32Target);
11487 IEM_MC_END()
11488 return VINF_SUCCESS;
11489
11490 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11491 }
11492 }
11493}
11494
11495
11496/**
11497 * Opcode 0xff /5.
11498 * @param bRm The RM byte.
11499 */
11500FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
11501{
11502 IEMOP_MNEMONIC("jmp Ep");
11503 IEMOP_HLP_NO_64BIT();
11504 /** @todo could share all the decoding with iemOp_Grp5_callf_Ep. */
11505
11506 /* Decode the far pointer address and pass it on to the far call C
11507 implementation. */
11508 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11509 {
11510 /** @todo How the heck does a 'callf eax' work? Probably just have to
11511 * search the docs... */
11512 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11513 }
11514
11515 /* Far pointer loaded from memory. */
11516 switch (pIemCpu->enmEffOpSize)
11517 {
11518 case IEMMODE_16BIT:
11519 IEM_MC_BEGIN(3, 1);
11520 IEM_MC_ARG(uint16_t, u16Sel, 0);
11521 IEM_MC_ARG(uint16_t, offSeg, 1);
11522 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11523 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11524 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11525 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11526 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
11527 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11528 IEM_MC_END();
11529 return VINF_SUCCESS;
11530
11531 case IEMMODE_32BIT:
11532 IEM_MC_BEGIN(3, 1);
11533 IEM_MC_ARG(uint16_t, u16Sel, 0);
11534 IEM_MC_ARG(uint32_t, offSeg, 1);
11535 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
11536 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11537 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11538 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11539 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
11540 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11541 IEM_MC_END();
11542 return VINF_SUCCESS;
11543
11544 case IEMMODE_64BIT:
11545 IEM_MC_BEGIN(3, 1);
11546 IEM_MC_ARG(uint16_t, u16Sel, 0);
11547 IEM_MC_ARG(uint64_t, offSeg, 1);
11548 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
11549 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11550 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11551 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
11552 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
11553 IEM_MC_CALL_CIMPL_3(iemCImpl_FarJmp, u16Sel, offSeg, enmEffOpSize);
11554 IEM_MC_END();
11555 return VINF_SUCCESS;
11556
11557 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11558 }
11559}
11560
11561
11562/**
11563 * Opcode 0xff /6.
11564 * @param bRm The RM byte.
11565 */
11566FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
11567{
11568 IEMOP_MNEMONIC("push Ev");
11569 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
11570
11571 /* Registers are handled by a common worker. */
11572 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
11573 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
11574
11575 /* Memory we do here. */
11576 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
11577 switch (pIemCpu->enmEffOpSize)
11578 {
11579 case IEMMODE_16BIT:
11580 IEM_MC_BEGIN(0, 2);
11581 IEM_MC_LOCAL(uint16_t, u16Src);
11582 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11583 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11584 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11585 IEM_MC_PUSH_U16(u16Src);
11586 IEM_MC_ADVANCE_RIP();
11587 IEM_MC_END();
11588 return VINF_SUCCESS;
11589
11590 case IEMMODE_32BIT:
11591 IEM_MC_BEGIN(0, 2);
11592 IEM_MC_LOCAL(uint32_t, u32Src);
11593 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11594 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11595 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11596 IEM_MC_PUSH_U32(u32Src);
11597 IEM_MC_ADVANCE_RIP();
11598 IEM_MC_END();
11599 return VINF_SUCCESS;
11600
11601 case IEMMODE_64BIT:
11602 IEM_MC_BEGIN(0, 2);
11603 IEM_MC_LOCAL(uint64_t, u64Src);
11604 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
11605 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm);
11606 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
11607 IEM_MC_PUSH_U64(u64Src);
11608 IEM_MC_ADVANCE_RIP();
11609 IEM_MC_END();
11610 return VINF_SUCCESS;
11611 }
11612 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
11613}
11614
11615
11616/** Opcode 0xff. */
11617FNIEMOP_DEF(iemOp_Grp5)
11618{
11619 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
11620 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
11621 {
11622 case 0:
11623 IEMOP_MNEMONIC("inc Ev");
11624 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
11625 case 1:
11626 IEMOP_MNEMONIC("dec Ev");
11627 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
11628 case 2:
11629 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
11630 case 3:
11631 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
11632 case 4:
11633 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
11634 case 5:
11635 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
11636 case 6:
11637 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
11638 case 7:
11639 IEMOP_MNEMONIC("grp5-ud");
11640 return IEMOP_RAISE_INVALID_OPCODE();
11641 }
11642 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
11643}
11644
11645
11646
11647const PFNIEMOP g_apfnOneByteMap[256] =
11648{
11649 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
11650 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
11651 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
11652 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
11653 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
11654 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
11655 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
11656 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
11657 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
11658 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
11659 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
11660 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
11661 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
11662 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
11663 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
11664 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
11665 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
11666 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
11667 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
11668 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
11669 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
11670 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
11671 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
11672 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
11673 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma, iemOp_arpl_Ew_Gw,
11674 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
11675 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
11676 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
11677 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
11678 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
11679 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
11680 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
11681 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
11682 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
11683 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
11684 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_pop_Ev,
11685 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
11686 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
11687 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
11688 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
11689 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
11690 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
11691 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
11692 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
11693 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
11694 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
11695 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
11696 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
11697 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
11698 /* 0xc4 */ iemOp_les_Gv_Mp, iemOp_lds_Gv_Mp, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
11699 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
11700 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
11701 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
11702 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_Invalid, iemOp_xlat,
11703 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
11704 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
11705 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
11706 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
11707 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
11708 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
11709 /* 0xf0 */ iemOp_lock, iemOp_Invalid, iemOp_repne, iemOp_repe, /** @todo 0xf1 is INT1 / ICEBP. */
11710 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
11711 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
11712 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
11713};
11714
11715
11716/** @} */
11717
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette